mm/ksm: page_add_anon_rmap() -> folio_add_anon_rmap_pte()
Let's convert replace_page(). While at it, perform some folio conversion. Link: https://lkml.kernel.org/r/20231220224504.646757-19-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Peter Xu <peterx@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Yin Fengwei <fengwei.yin@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
a15dc4785c
commit
977295349e
8
mm/ksm.c
8
mm/ksm.c
@ -1369,6 +1369,7 @@ out:
|
||||
static int replace_page(struct vm_area_struct *vma, struct page *page,
|
||||
struct page *kpage, pte_t orig_pte)
|
||||
{
|
||||
struct folio *kfolio = page_folio(kpage);
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct folio *folio;
|
||||
pmd_t *pmd;
|
||||
@ -1408,15 +1409,16 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
|
||||
goto out_mn;
|
||||
}
|
||||
VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
|
||||
VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage);
|
||||
VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
|
||||
kfolio);
|
||||
|
||||
/*
|
||||
* No need to check ksm_use_zero_pages here: we can only have a
|
||||
* zero_page here if ksm_use_zero_pages was enabled already.
|
||||
*/
|
||||
if (!is_zero_pfn(page_to_pfn(kpage))) {
|
||||
get_page(kpage);
|
||||
page_add_anon_rmap(kpage, vma, addr, RMAP_NONE);
|
||||
folio_get(kfolio);
|
||||
folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
|
||||
newpte = mk_pte(kpage, vma->vm_page_prot);
|
||||
} else {
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user