rmap: replace two calls to compound_order with folio_order
Removes two unnecessary conversions from folio to page. Should be no difference in behaviour. Link: https://lkml.kernel.org/r/20240215205307.674707-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
902ccb86ed
commit
059ab7be09
@ -2169,7 +2169,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
|
|||||||
swp_pte = pte_swp_mkuffd_wp(swp_pte);
|
swp_pte = pte_swp_mkuffd_wp(swp_pte);
|
||||||
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
|
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
|
||||||
trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
|
trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
|
||||||
compound_order(&folio->page));
|
folio_order(folio));
|
||||||
/*
|
/*
|
||||||
* No need to invalidate here it will synchronize on
|
* No need to invalidate here it will synchronize on
|
||||||
* against the special swap migration pte.
|
* against the special swap migration pte.
|
||||||
@ -2261,7 +2261,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
|
|||||||
else
|
else
|
||||||
set_pte_at(mm, address, pvmw.pte, swp_pte);
|
set_pte_at(mm, address, pvmw.pte, swp_pte);
|
||||||
trace_set_migration_pte(address, pte_val(swp_pte),
|
trace_set_migration_pte(address, pte_val(swp_pte),
|
||||||
compound_order(&folio->page));
|
folio_order(folio));
|
||||||
/*
|
/*
|
||||||
* No need to invalidate here it will synchronize on
|
* No need to invalidate here it will synchronize on
|
||||||
* against the special swap migration pte.
|
* against the special swap migration pte.
|
||||||
|
Loading…
Reference in New Issue
Block a user