diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst index c18d94fa6470..baa1c355741d 100644 --- a/Documentation/admin-guide/kdump/vmcoreinfo.rst +++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst @@ -325,8 +325,8 @@ NR_FREE_PAGES On linux-2.6.21 or later, the number of free pages is in vm_stat[NR_FREE_PAGES]. Used to get the number of free pages. -PG_lru|PG_private|PG_swapcache|PG_swapbacked|PG_slab|PG_hwpoision|PG_head_mask ------------------------------------------------------------------------------- +PG_lru|PG_private|PG_swapcache|PG_swapbacked|PG_slab|PG_hwpoision|PG_head_mask|PG_hugetlb +----------------------------------------------------------------------------------------- Page attributes. These flags are used to filter various unnecessary for dumping pages. @@ -338,12 +338,6 @@ More page attributes. These flags are used to filter various unnecessary for dumping pages. -HUGETLB_PAGE_DTOR ------------------ - -The HUGETLB_PAGE_DTOR flag denotes hugetlbfs pages. Makedumpfile -excludes these pages. - x86_64 ====== diff --git a/include/linux/mm.h b/include/linux/mm.h index 0955b6b13fd0..e241f5ee4dc4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1240,11 +1240,7 @@ void folio_copy(struct folio *dst, struct folio *src); unsigned long nr_free_buffer_pages(void); enum compound_dtor_id { - NULL_COMPOUND_DTOR, COMPOUND_PAGE_DTOR, -#ifdef CONFIG_HUGETLB_PAGE - HUGETLB_PAGE_DTOR, -#endif TRANSHUGE_PAGE_DTOR, NR_COMPOUND_DTORS, }; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 9218028caf33..e9e7cc45352d 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -171,15 +171,6 @@ enum pageflags { /* Remapped by swiotlb-xen. */ PG_xen_remapped = PG_owner_priv_1, -#ifdef CONFIG_MEMORY_FAILURE - /* - * Compound pages. Stored in first tail page's flags. - * Indicates that at least one subpage is hwpoisoned in the - * THP. - */ - PG_has_hwpoisoned = PG_error, -#endif - /* non-lru isolated movable page */ PG_isolated = PG_reclaim, @@ -190,6 +181,15 @@ enum pageflags { /* For self-hosted memmap pages */ PG_vmemmap_self_hosted = PG_owner_priv_1, #endif + + /* + * Flags only valid for compound pages. Stored in first tail page's + * flags word. + */ + + /* At least one page in this folio has the hwpoison flag set */ + PG_has_hwpoisoned = PG_error, + PG_hugetlb = PG_active, }; #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) @@ -812,7 +812,23 @@ static inline void ClearPageCompound(struct page *page) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); -bool folio_test_hugetlb(struct folio *folio); +SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND) +CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND) + +/** + * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs + * @folio: The folio to test. + * + * Context: Any context. Caller should have a reference on the folio to + * prevent it from being turned into a tail page. + * Return: True for hugetlbfs folios, false for anon folios or folios + * belonging to other filesystems. + */ +static inline bool folio_test_hugetlb(struct folio *folio) +{ + return folio_test_large(folio) && + test_bit(PG_hugetlb, folio_flags(folio, 1)); +} #else TESTPAGEFLAG_FALSE(Huge, hugetlb) #endif @@ -1056,6 +1072,13 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) #define PAGE_FLAGS_CHECK_AT_PREP \ ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK) +/* + * Flags stored in the second page of a compound page. They may overlap + * the CHECK_AT_FREE flags above, so need to be cleared. + */ +#define PAGE_FLAGS_SECOND \ + (1UL << PG_has_hwpoisoned | 1UL << PG_hugetlb) + #define PAGE_FLAGS_PRIVATE \ (1UL << PG_private | 1UL << PG_private_2) /** diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 90ce1dfd591c..dd5f87047d06 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -490,7 +490,7 @@ static int __init crash_save_vmcoreinfo_init(void) #define PAGE_BUDDY_MAPCOUNT_VALUE (~PG_buddy) VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); #ifdef CONFIG_HUGETLB_PAGE - VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR); + VMCOREINFO_NUMBER(PG_hugetlb); #define PAGE_OFFLINE_MAPCOUNT_VALUE (~PG_offline) VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE); #endif diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6a3c80026ab3..a82c3104337e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1585,25 +1585,7 @@ static inline void __clear_hugetlb_destructor(struct hstate *h, { lockdep_assert_held(&hugetlb_lock); - /* - * Very subtle - * - * For non-gigantic pages set the destructor to the normal compound - * page dtor. This is needed in case someone takes an additional - * temporary ref to the page, and freeing is delayed until they drop - * their reference. - * - * For gigantic pages set the destructor to the null dtor. This - * destructor will never be called. Before freeing the gigantic - * page destroy_compound_gigantic_folio will turn the folio into a - * simple group of pages. After this the destructor does not - * apply. - * - */ - if (hstate_is_gigantic(h)) - folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR); - else - folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); + folio_clear_hugetlb(folio); } /* @@ -1690,7 +1672,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio, h->surplus_huge_pages_node[nid]++; } - folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR); + folio_set_hugetlb(folio); folio_change_private(folio, NULL); /* * We have to set hugetlb_vmemmap_optimized again as above @@ -1814,9 +1796,8 @@ static void free_hpage_workfn(struct work_struct *work) /* * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in * folio_hstate() is going to trigger because a previous call to - * remove_hugetlb_folio() will call folio_set_compound_dtor - * (folio, NULL_COMPOUND_DTOR), so do not use folio_hstate() - * directly. + * remove_hugetlb_folio() will clear the hugetlb bit, so do + * not use folio_hstate() directly. */ h = size_to_hstate(page_size(page)); @@ -1955,7 +1936,7 @@ static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) { hugetlb_vmemmap_optimize(h, &folio->page); INIT_LIST_HEAD(&folio->lru); - folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR); + folio_set_hugetlb(folio); hugetlb_set_folio_subpool(folio, NULL); set_hugetlb_cgroup(folio, NULL); set_hugetlb_cgroup_rsvd(folio, NULL); @@ -2070,28 +2051,10 @@ int PageHuge(struct page *page) if (!PageCompound(page)) return 0; folio = page_folio(page); - return folio->_folio_dtor == HUGETLB_PAGE_DTOR; + return folio_test_hugetlb(folio); } EXPORT_SYMBOL_GPL(PageHuge); -/** - * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs - * @folio: The folio to test. - * - * Context: Any context. Caller should have a reference on the folio to - * prevent it from being turned into a tail page. - * Return: True for hugetlbfs folios, false for anon folios or folios - * belonging to other filesystems. - */ -bool folio_test_hugetlb(struct folio *folio) -{ - if (!folio_test_large(folio)) - return false; - - return folio->_folio_dtor == HUGETLB_PAGE_DTOR; -} -EXPORT_SYMBOL_GPL(folio_test_hugetlb); - /* * Find and lock address space (mapping) in write mode. * diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 31fec31be31e..d96dc6a3077a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1112,7 +1112,7 @@ static __always_inline bool free_pages_prepare(struct page *page, VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); if (compound) - ClearPageHasHWPoisoned(page); + page[1].flags &= ~PAGE_FLAGS_SECOND; for (i = 1; i < (1 << order); i++) { if (compound) bad += free_tail_page_prepare(page, page + i);