userfaultfd: convert mfill_atomic_pte_copy() to use a folio
Patch series "userfaultfd: convert userfaultfd functions to use folios", v6. This patch series converts several userfaultfd functions to use folios. This patch (of 6): Call vma_alloc_folio() directly instead of alloc_page_vma() and convert page_kaddr to kaddr in mfill_atomic_pte_copy(). Removes several calls to compound_head(). Link: https://lkml.kernel.org/r/20230410133932.32288-1-zhangpeng362@huawei.com Link: https://lkml.kernel.org/r/20230410133932.32288-2-zhangpeng362@huawei.com Signed-off-by: ZhangPeng <zhangpeng362@huawei.com> Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Nanyong Sun <sunnanyong@huawei.com> Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b4aca54792
commit
07e6d4095c
@ -135,17 +135,18 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
|
||||
uffd_flags_t flags,
|
||||
struct page **pagep)
|
||||
{
|
||||
void *page_kaddr;
|
||||
void *kaddr;
|
||||
int ret;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
if (!*pagep) {
|
||||
ret = -ENOMEM;
|
||||
page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
|
||||
if (!page)
|
||||
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
|
||||
dst_addr, false);
|
||||
if (!folio)
|
||||
goto out;
|
||||
|
||||
page_kaddr = kmap_local_page(page);
|
||||
kaddr = kmap_local_folio(folio, 0);
|
||||
/*
|
||||
* The read mmap_lock is held here. Despite the
|
||||
* mmap_lock being read recursive a deadlock is still
|
||||
@ -162,45 +163,44 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
|
||||
* and retry the copy outside the mmap_lock.
|
||||
*/
|
||||
pagefault_disable();
|
||||
ret = copy_from_user(page_kaddr,
|
||||
(const void __user *) src_addr,
|
||||
ret = copy_from_user(kaddr, (const void __user *) src_addr,
|
||||
PAGE_SIZE);
|
||||
pagefault_enable();
|
||||
kunmap_local(page_kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
/* fallback to copy_from_user outside mmap_lock */
|
||||
if (unlikely(ret)) {
|
||||
ret = -ENOENT;
|
||||
*pagep = page;
|
||||
*pagep = &folio->page;
|
||||
/* don't free the page */
|
||||
goto out;
|
||||
}
|
||||
|
||||
flush_dcache_page(page);
|
||||
flush_dcache_folio(folio);
|
||||
} else {
|
||||
page = *pagep;
|
||||
folio = page_folio(*pagep);
|
||||
*pagep = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The memory barrier inside __SetPageUptodate makes sure that
|
||||
* The memory barrier inside __folio_mark_uptodate makes sure that
|
||||
* preceding stores to the page contents become visible before
|
||||
* the set_pte_at() write.
|
||||
*/
|
||||
__SetPageUptodate(page);
|
||||
__folio_mark_uptodate(folio);
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (mem_cgroup_charge(page_folio(page), dst_vma->vm_mm, GFP_KERNEL))
|
||||
if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
|
||||
goto out_release;
|
||||
|
||||
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
|
||||
page, true, flags);
|
||||
&folio->page, true, flags);
|
||||
if (ret)
|
||||
goto out_release;
|
||||
out:
|
||||
return ret;
|
||||
out_release:
|
||||
put_page(page);
|
||||
folio_put(folio);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user