1

fs/proc/task_mmu: account non-present entries as "maybe shared, but no idea how often"

We currently rely on mapcount information for pages referenced by
non-present entries to calculate the USS (shared vs.  private) and the
PSS.

However, relying on mapcounts for non-present entries doesn't make any
sense.  We have to treat such entries as "maybe shared, but no idea how
often", implying that they will *not* get accounted towards the USS, and
will get fully accounted to the PSS (no idea how often shared).

There is one exception: device exclusive entries essentially behave like
present entries (e.g., mapcount incremented).

In smaps_pmd_entry(), use is_pfn_swap_entry() instead of
is_migration_entry(), which should not make a real difference but makes
the code look more similar to the PTE variant.

While at it, adjust the comments in smaps_account().

Link: https://lkml.kernel.org/r/20240607122357.115423-5-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Oscar Salvador <osalvador@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand 2024-06-07 14:23:55 +02:00 committed by Andrew Morton
parent 2c1f057e5b
commit 3689c3ebdd

View File

@ -442,7 +442,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
static void smaps_account(struct mem_size_stats *mss, struct page *page, static void smaps_account(struct mem_size_stats *mss, struct page *page,
bool compound, bool young, bool dirty, bool locked, bool compound, bool young, bool dirty, bool locked,
bool migration) bool present)
{ {
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
int i, nr = compound ? compound_nr(page) : 1; int i, nr = compound ? compound_nr(page) : 1;
@ -471,22 +471,27 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
* Then accumulate quantities that may depend on sharing, or that may * Then accumulate quantities that may depend on sharing, or that may
* differ page-by-page. * differ page-by-page.
* *
* refcount == 1 guarantees the page is mapped exactly once. * refcount == 1 for present entries guarantees that the folio is mapped
* If any subpage of the compound page mapped with PTE it would elevate * exactly once. For large folios this implies that exactly one
* the refcount. * PTE/PMD/... maps (a part of) this folio.
* *
* The page_mapcount() is called to get a snapshot of the mapcount. * Treat all non-present entries (where relying on the mapcount and
* Without holding the page lock this snapshot can be slightly wrong as * refcount doesn't make sense) as "maybe shared, but not sure how
* we cannot always read the mapcount atomically. It is not safe to * often". We treat device private entries as being fake-present.
* call page_mapcount() even with PTL held if the page is not mapped, *
* especially for migration entries. Treat regular migration entries * Note that it would not be safe to read the mapcount especially for
* as mapcount == 1. * pages referenced by migration entries, even with the PTL held.
*/ */
if ((folio_ref_count(folio) == 1) || migration) { if (folio_ref_count(folio) == 1 || !present) {
smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT, smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
dirty, locked, true); dirty, locked, present);
return; return;
} }
/*
* The page_mapcount() is called to get a snapshot of the mapcount.
* Without holding the folio lock this snapshot can be slightly wrong as
* we cannot always read the mapcount atomically.
*/
for (i = 0; i < nr; i++, page++) { for (i = 0; i < nr; i++, page++) {
int mapcount = page_mapcount(page); int mapcount = page_mapcount(page);
unsigned long pss = PAGE_SIZE << PSS_SHIFT; unsigned long pss = PAGE_SIZE << PSS_SHIFT;
@ -531,13 +536,14 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED); bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL; struct page *page = NULL;
bool migration = false, young = false, dirty = false; bool present = false, young = false, dirty = false;
pte_t ptent = ptep_get(pte); pte_t ptent = ptep_get(pte);
if (pte_present(ptent)) { if (pte_present(ptent)) {
page = vm_normal_page(vma, addr, ptent); page = vm_normal_page(vma, addr, ptent);
young = pte_young(ptent); young = pte_young(ptent);
dirty = pte_dirty(ptent); dirty = pte_dirty(ptent);
present = true;
} else if (is_swap_pte(ptent)) { } else if (is_swap_pte(ptent)) {
swp_entry_t swpent = pte_to_swp_entry(ptent); swp_entry_t swpent = pte_to_swp_entry(ptent);
@ -555,8 +561,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
} }
} else if (is_pfn_swap_entry(swpent)) { } else if (is_pfn_swap_entry(swpent)) {
if (is_migration_entry(swpent)) if (is_device_private_entry(swpent))
migration = true; present = true;
page = pfn_swap_entry_to_page(swpent); page = pfn_swap_entry_to_page(swpent);
} }
} else { } else {
@ -567,7 +573,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
if (!page) if (!page)
return; return;
smaps_account(mss, page, false, young, dirty, locked, migration); smaps_account(mss, page, false, young, dirty, locked, present);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@ -578,18 +584,17 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED); bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL; struct page *page = NULL;
bool present = false;
struct folio *folio; struct folio *folio;
bool migration = false;
if (pmd_present(*pmd)) { if (pmd_present(*pmd)) {
page = vm_normal_page_pmd(vma, addr, *pmd); page = vm_normal_page_pmd(vma, addr, *pmd);
present = true;
} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
swp_entry_t entry = pmd_to_swp_entry(*pmd); swp_entry_t entry = pmd_to_swp_entry(*pmd);
if (is_migration_entry(entry)) { if (is_pfn_swap_entry(entry))
migration = true;
page = pfn_swap_entry_to_page(entry); page = pfn_swap_entry_to_page(entry);
}
} }
if (IS_ERR_OR_NULL(page)) if (IS_ERR_OR_NULL(page))
return; return;
@ -604,7 +609,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
mss->file_thp += HPAGE_PMD_SIZE; mss->file_thp += HPAGE_PMD_SIZE;
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
locked, migration); locked, present);
} }
#else #else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@ -735,17 +740,21 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
pte_t ptent = huge_ptep_get(pte); pte_t ptent = huge_ptep_get(pte);
struct folio *folio = NULL; struct folio *folio = NULL;
bool present = false;
if (pte_present(ptent)) { if (pte_present(ptent)) {
folio = page_folio(pte_page(ptent)); folio = page_folio(pte_page(ptent));
present = true;
} else if (is_swap_pte(ptent)) { } else if (is_swap_pte(ptent)) {
swp_entry_t swpent = pte_to_swp_entry(ptent); swp_entry_t swpent = pte_to_swp_entry(ptent);
if (is_pfn_swap_entry(swpent)) if (is_pfn_swap_entry(swpent))
folio = pfn_swap_entry_folio(swpent); folio = pfn_swap_entry_folio(swpent);
} }
if (folio) { if (folio) {
if (folio_likely_mapped_shared(folio) || /* We treat non-present entries as "maybe shared". */
if (!present || folio_likely_mapped_shared(folio) ||
hugetlb_pmd_shared(pte)) hugetlb_pmd_shared(pte))
mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
else else