mm: huge_memory: add the missing folio_test_pmd_mappable() for THP split statistics
Now the mTHP can also be split or added into the deferred list, so add folio_test_pmd_mappable() validation for PMD mapped THP, to avoid confusion with PMD mapped THP related statistics. [baolin.wang@linux.alibaba.com: check THP earlier in case folio is split, per Lance] Link: https://lkml.kernel.org/r/b99f8cb14bc85fdb6ab43721d1331cb5ebed2581.1713771041.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/a5341defeef27c9ac7b85c97f030f93e4368bbc1.1711694852.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Lance Yang <ioworker0@gmail.com> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d2136d749d
commit
835c3a25aa
@ -2934,6 +2934,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
|||||||
XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
|
XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
|
||||||
struct anon_vma *anon_vma = NULL;
|
struct anon_vma *anon_vma = NULL;
|
||||||
struct address_space *mapping = NULL;
|
struct address_space *mapping = NULL;
|
||||||
|
bool is_thp = folio_test_pmd_mappable(folio);
|
||||||
int extra_pins, ret;
|
int extra_pins, ret;
|
||||||
pgoff_t end;
|
pgoff_t end;
|
||||||
bool is_hzp;
|
bool is_hzp;
|
||||||
@ -3112,7 +3113,8 @@ out_unlock:
|
|||||||
i_mmap_unlock_read(mapping);
|
i_mmap_unlock_read(mapping);
|
||||||
out:
|
out:
|
||||||
xas_destroy(&xas);
|
xas_destroy(&xas);
|
||||||
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
|
if (is_thp)
|
||||||
|
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3174,7 +3176,8 @@ void deferred_split_folio(struct folio *folio)
|
|||||||
|
|
||||||
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
|
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
|
||||||
if (list_empty(&folio->_deferred_list)) {
|
if (list_empty(&folio->_deferred_list)) {
|
||||||
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
|
if (folio_test_pmd_mappable(folio))
|
||||||
|
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
|
||||||
list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
|
list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
|
||||||
ds_queue->split_queue_len++;
|
ds_queue->split_queue_len++;
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
|
Loading…
Reference in New Issue
Block a user