1

mm: introduce PageUnaccepted() page type

The new page type allows physical memory scanners to detect unaccepted
memory and handle it accordingly.

The page type is serialized with zone lock.

Link: https://lkml.kernel.org/r/20240809114854.3745464-5-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kirill A. Shutemov 2024-08-09 14:48:50 +03:00 committed by Andrew Morton
parent 4be9064baa
commit 310183de7b
2 changed files with 10 additions and 0 deletions

View File

@ -939,6 +939,7 @@ enum pagetype {
PG_hugetlb = 0x04000000, PG_hugetlb = 0x04000000,
PG_slab = 0x02000000, PG_slab = 0x02000000,
PG_zsmalloc = 0x01000000, PG_zsmalloc = 0x01000000,
PG_unaccepted = 0x00800000,
PAGE_TYPE_BASE = 0x80000000, PAGE_TYPE_BASE = 0x80000000,
@ -1072,6 +1073,13 @@ FOLIO_TEST_FLAG_FALSE(hugetlb)
PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc) PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
/*
* Mark pages that has to be accepted before touched for the first time.
*
* Serialized with zone lock.
*/
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
/** /**
* PageHuge - Determine if the page belongs to hugetlbfs * PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test. * @page: The page to test.

View File

@ -6963,6 +6963,7 @@ static bool try_to_accept_memory_one(struct zone *zone)
account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
__ClearPageUnaccepted(page);
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
accept_page(page, MAX_PAGE_ORDER); accept_page(page, MAX_PAGE_ORDER);
@ -7021,6 +7022,7 @@ static bool __free_unaccepted(struct page *page)
list_add_tail(&page->lru, &zone->unaccepted_pages); list_add_tail(&page->lru, &zone->unaccepted_pages);
account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
__SetPageUnaccepted(page);
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
if (first) if (first)