mm/memory-failure: convert shake_page() to shake_folio()
Removes two calls to compound_head(). Move the prototype to internal.h; we definitely don't want code outside mm using it. Link: https://lkml.kernel.org/r/20240412193510.2356957-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Jane Chu <jane.chu@oracle.com> Acked-by: Miaohe Lin <linmiaohe@huawei.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b87f978dc7
commit
fed5348ee2
@ -4033,7 +4033,6 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
|
||||
extern int memory_failure(unsigned long pfn, int flags);
|
||||
extern void memory_failure_queue_kick(int cpu);
|
||||
extern int unpoison_memory(unsigned long pfn);
|
||||
extern void shake_page(struct page *p);
|
||||
extern atomic_long_t num_poisoned_pages __read_mostly;
|
||||
extern int soft_offline_page(unsigned long pfn, int flags);
|
||||
#ifdef CONFIG_MEMORY_FAILURE
|
||||
|
@ -15,7 +15,7 @@ static int hwpoison_inject(void *data, u64 val)
|
||||
{
|
||||
unsigned long pfn = val;
|
||||
struct page *p;
|
||||
struct page *hpage;
|
||||
struct folio *folio;
|
||||
int err;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
@ -25,16 +25,17 @@ static int hwpoison_inject(void *data, u64 val)
|
||||
return -ENXIO;
|
||||
|
||||
p = pfn_to_page(pfn);
|
||||
hpage = compound_head(p);
|
||||
folio = page_folio(p);
|
||||
|
||||
if (!hwpoison_filter_enable)
|
||||
goto inject;
|
||||
|
||||
shake_page(hpage);
|
||||
shake_folio(folio);
|
||||
/*
|
||||
* This implies unable to support non-LRU pages except free page.
|
||||
*/
|
||||
if (!PageLRU(hpage) && !PageHuge(p) && !is_free_buddy_page(p))
|
||||
if (!folio_test_lru(folio) && !folio_test_hugetlb(folio) &&
|
||||
!is_free_buddy_page(p))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@ -42,7 +43,7 @@ static int hwpoison_inject(void *data, u64 val)
|
||||
* the targeted owner (or on a free page).
|
||||
* memory_failure() will redo the check reliably inside page lock.
|
||||
*/
|
||||
err = hwpoison_filter(hpage);
|
||||
err = hwpoison_filter(&folio->page);
|
||||
if (err)
|
||||
return 0;
|
||||
|
||||
|
@ -1037,6 +1037,7 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
|
||||
/*
|
||||
* mm/memory-failure.c
|
||||
*/
|
||||
void shake_folio(struct folio *folio);
|
||||
extern int hwpoison_filter(struct page *p);
|
||||
|
||||
extern u32 hwpoison_filter_dev_major;
|
||||
|
@ -369,20 +369,25 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
|
||||
* Unknown page type encountered. Try to check whether it can turn PageLRU by
|
||||
* lru_add_drain_all.
|
||||
*/
|
||||
void shake_page(struct page *p)
|
||||
void shake_folio(struct folio *folio)
|
||||
{
|
||||
if (PageHuge(p))
|
||||
if (folio_test_hugetlb(folio))
|
||||
return;
|
||||
/*
|
||||
* TODO: Could shrink slab caches here if a lightweight range-based
|
||||
* shrinker will be available.
|
||||
*/
|
||||
if (PageSlab(p))
|
||||
if (folio_test_slab(folio))
|
||||
return;
|
||||
|
||||
lru_add_drain_all();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(shake_page);
|
||||
EXPORT_SYMBOL_GPL(shake_folio);
|
||||
|
||||
static void shake_page(struct page *page)
|
||||
{
|
||||
shake_folio(page_folio(page));
|
||||
}
|
||||
|
||||
static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
|
||||
unsigned long address)
|
||||
@ -1639,7 +1644,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
* shake_page() again to ensure that it's flushed.
|
||||
*/
|
||||
if (mlocked)
|
||||
shake_page(hpage);
|
||||
shake_folio(folio);
|
||||
|
||||
/*
|
||||
* Now that the dirty bit has been propagated to the
|
||||
|
Loading…
Reference in New Issue
Block a user