mm: remove migration for HugePage in isolate_single_pageblock()
The gigantic page size may larger than memory block size, so memory offline always fails in this case after commitb2c9e2fbba
("mm: make alloc_contig_range work at pageblock granularity"), offline_pages start_isolate_page_range start_isolate_page_range(isolate_before=true) isolate [isolate_start, isolate_start + pageblock_nr_pages) start_isolate_page_range(isolate_before=false) isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock __alloc_contig_migrate_range isolate_migratepages_range isolate_migratepages_block isolate_or_dissolve_huge_page if (hstate_is_gigantic(h)) return -ENOMEM; [ 15.815756] memory offlining [mem 0x3c0000000-0x3c7ffffff] failed due to failure to isolate range Gigantic PageHuge is bigger than a pageblock, but since it is freed as order-0 pages, its pageblocks after being freed will get to the right free list. There is no need to have special handling code for them in start_isolate_page_range(). For both alloc_contig_range() and memory offline cases, the migration code after start_isolate_page_range() will be able to migrate gigantic PageHuge when possible. Let's clean up start_isolate_page_range() and fix the aforementioned memory offline failure issue all together. Let's clean up start_isolate_page_range() and fix the aforementioned memory offline failure issue all together. Link: https://lkml.kernel.org/r/20240820032630.1894770-1-wangkefeng.wang@huawei.com Fixes:b2c9e2fbba
("mm: make alloc_contig_range work at pageblock granularity") Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Zi Yan <ziy@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
49029c4db3
commit
cd5f3193b4
@ -403,30 +403,8 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
|
|||||||
unsigned long head_pfn = page_to_pfn(head);
|
unsigned long head_pfn = page_to_pfn(head);
|
||||||
unsigned long nr_pages = compound_nr(head);
|
unsigned long nr_pages = compound_nr(head);
|
||||||
|
|
||||||
if (head_pfn + nr_pages <= boundary_pfn) {
|
if (head_pfn + nr_pages <= boundary_pfn ||
|
||||||
pfn = head_pfn + nr_pages;
|
PageHuge(page)) {
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
|
||||||
if (PageHuge(page)) {
|
|
||||||
int page_mt = get_pageblock_migratetype(page);
|
|
||||||
struct compact_control cc = {
|
|
||||||
.nr_migratepages = 0,
|
|
||||||
.order = -1,
|
|
||||||
.zone = page_zone(pfn_to_page(head_pfn)),
|
|
||||||
.mode = MIGRATE_SYNC,
|
|
||||||
.ignore_skip_hint = true,
|
|
||||||
.no_set_skip_hint = true,
|
|
||||||
.gfp_mask = gfp_flags,
|
|
||||||
.alloc_contig = true,
|
|
||||||
};
|
|
||||||
INIT_LIST_HEAD(&cc.migratepages);
|
|
||||||
|
|
||||||
ret = __alloc_contig_migrate_range(&cc, head_pfn,
|
|
||||||
head_pfn + nr_pages, page_mt);
|
|
||||||
if (ret)
|
|
||||||
goto failed;
|
|
||||||
pfn = head_pfn + nr_pages;
|
pfn = head_pfn + nr_pages;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -440,7 +418,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
|
|||||||
*/
|
*/
|
||||||
VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
|
VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
|
||||||
VM_WARN_ON_ONCE_PAGE(__PageMovable(page), page);
|
VM_WARN_ON_ONCE_PAGE(__PageMovable(page), page);
|
||||||
#endif
|
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user