1

memory-failure: use a folio in me_huge_page()

This function was already explicitly calling compound_head();
unfortunately the compiler can't know that and elide the redundant calls
to compound_head() buried in page_mapping(), unlock_page(), etc.  Switch
to using a folio, which does let us elide these calls.

Link: https://lkml.kernel.org/r/20231117161447.2461643-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-11-17 16:14:45 +00:00 committed by Andrew Morton
parent f709239357
commit b6fd410c32

View File

@ -1182,25 +1182,25 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p)
*/ */
static int me_huge_page(struct page_state *ps, struct page *p) static int me_huge_page(struct page_state *ps, struct page *p)
{ {
struct folio *folio = page_folio(p);
int res; int res;
struct page *hpage = compound_head(p);
struct address_space *mapping; struct address_space *mapping;
bool extra_pins = false; bool extra_pins = false;
mapping = page_mapping(hpage); mapping = folio_mapping(folio);
if (mapping) { if (mapping) {
res = truncate_error_page(hpage, page_to_pfn(p), mapping); res = truncate_error_page(&folio->page, page_to_pfn(p), mapping);
/* The page is kept in page cache. */ /* The page is kept in page cache. */
extra_pins = true; extra_pins = true;
unlock_page(hpage); folio_unlock(folio);
} else { } else {
unlock_page(hpage); folio_unlock(folio);
/* /*
* migration entry prevents later access on error hugepage, * migration entry prevents later access on error hugepage,
* so we can free and dissolve it into buddy to save healthy * so we can free and dissolve it into buddy to save healthy
* subpages. * subpages.
*/ */
put_page(hpage); folio_put(folio);
if (__page_handle_poison(p) >= 0) { if (__page_handle_poison(p) >= 0) {
page_ref_inc(p); page_ref_inc(p);
res = MF_RECOVERED; res = MF_RECOVERED;