1

mm: support page_mapcount() on page_has_type() pages

Return 0 for pages which can't be mapped.  This matches how page_mapped()
works.  It is more convenient for users to not have to filter out these
pages.

Link: https://lkml.kernel.org/r/20240321142448.1645400-5-willy@infradead.org
Fixes: 9c5ccf2db0 ("mm: remove HUGETLB_PAGE_DTOR")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-03-21 14:24:42 +00:00 committed by Andrew Morton
parent 12bbaae763
commit fd1a745ce0
3 changed files with 9 additions and 10 deletions

View File

@ -67,7 +67,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
*/
ppage = pfn_to_online_page(pfn);
if (!ppage || PageSlab(ppage) || page_has_type(ppage))
if (!ppage)
pcount = 0;
else
pcount = page_mapcount(ppage);
@ -124,11 +124,8 @@ u64 stable_page_flags(struct page *page)
/*
* pseudo flags for the well known (anonymous) memory mapped pages
*
* Note that page->_mapcount is overloaded in SLAB, so the
* simple test in page_mapped() is not enough.
*/
if (!PageSlab(page) && page_mapped(page))
if (page_mapped(page))
u |= 1 << KPF_MMAP;
if (PageAnon(page))
u |= 1 << KPF_ANON;

View File

@ -1223,14 +1223,16 @@ static inline void page_mapcount_reset(struct page *page)
* a large folio, it includes the number of times this page is mapped
* as part of that folio.
*
* The result is undefined for pages which cannot be mapped into userspace.
* For example SLAB or special types of pages. See function page_has_type().
* They use this field in struct page differently.
* Will report 0 for pages which cannot be mapped into userspace, eg
* slab, page tables and similar.
*/
static inline int page_mapcount(struct page *page)
{
int mapcount = atomic_read(&page->_mapcount) + 1;
/* Handle page_has_type() pages */
if (mapcount < 0)
mapcount = 0;
if (unlikely(PageCompound(page)))
mapcount += folio_entire_mapcount(page_folio(page));

View File

@ -971,12 +971,12 @@ static inline bool is_page_hwpoison(struct page *page)
* page_type may be used. Because it is initialised to -1, we invert the
* sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
* __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
* low bits so that an underflow or overflow of page_mapcount() won't be
* low bits so that an underflow or overflow of _mapcount won't be
* mistaken for a page type value.
*/
#define PAGE_TYPE_BASE 0xf0000000
/* Reserve 0x0000007f to catch underflows of page_mapcount */
/* Reserve 0x0000007f to catch underflows of _mapcount */
#define PAGE_MAPCOUNT_RESERVE -128
#define PG_buddy 0x00000080
#define PG_offline 0x00000100