1

mm/arm64: override clear_young_dirty_ptes() batch helper

The per-pte get_and_clear/modify/set approach would result in
unfolding/refolding for contpte mappings on arm64.  So we need to override
clear_young_dirty_ptes() for arm64 to avoid it.

Link: https://lkml.kernel.org/r/20240418134435.6092-3-ioworker0@gmail.com
Signed-off-by: Lance Yang <ioworker0@gmail.com>
Suggested-by: Barry Song <21cnbao@gmail.com>
Suggested-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jeff Xie <xiehuan09@gmail.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Cc: Zach O'Keefe <zokeefe@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Lance Yang 2024-04-18 21:44:33 +08:00 committed by Andrew Morton
parent 1b68112c40
commit 89e86854fb
2 changed files with 84 additions and 0 deletions

View File

@ -1223,6 +1223,46 @@ static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address,
__ptep_set_wrprotect(mm, address, ptep); __ptep_set_wrprotect(mm, address, ptep);
} }
static inline void __clear_young_dirty_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, cydp_t flags)
{
pte_t old_pte;
do {
old_pte = pte;
if (flags & CYDP_CLEAR_YOUNG)
pte = pte_mkold(pte);
if (flags & CYDP_CLEAR_DIRTY)
pte = pte_mkclean(pte);
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));
} while (pte_val(pte) != pte_val(old_pte));
}
static inline void __clear_young_dirty_ptes(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
unsigned int nr, cydp_t flags)
{
pte_t pte;
for (;;) {
pte = __ptep_get(ptep);
if (flags == (CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY))
__set_pte(ptep, pte_mkclean(pte_mkold(pte)));
else
__clear_young_dirty_pte(vma, addr, ptep, pte, flags);
if (--nr == 0)
break;
ptep++;
addr += PAGE_SIZE;
}
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_SET_WRPROTECT #define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm, static inline void pmdp_set_wrprotect(struct mm_struct *mm,
@ -1379,6 +1419,9 @@ extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned long addr, pte_t *ptep,
pte_t entry, int dirty); pte_t entry, int dirty);
extern void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
unsigned int nr, cydp_t flags);
static __always_inline void contpte_try_fold(struct mm_struct *mm, static __always_inline void contpte_try_fold(struct mm_struct *mm,
unsigned long addr, pte_t *ptep, pte_t pte) unsigned long addr, pte_t *ptep, pte_t pte)
@ -1603,6 +1646,17 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty);
} }
#define clear_young_dirty_ptes clear_young_dirty_ptes
static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
unsigned int nr, cydp_t flags)
{
if (likely(nr == 1 && !pte_cont(__ptep_get(ptep))))
__clear_young_dirty_ptes(vma, addr, ptep, nr, flags);
else
contpte_clear_young_dirty_ptes(vma, addr, ptep, nr, flags);
}
#else /* CONFIG_ARM64_CONTPTE */ #else /* CONFIG_ARM64_CONTPTE */
#define ptep_get __ptep_get #define ptep_get __ptep_get
@ -1622,6 +1676,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
#define wrprotect_ptes __wrprotect_ptes #define wrprotect_ptes __wrprotect_ptes
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags __ptep_set_access_flags #define ptep_set_access_flags __ptep_set_access_flags
#define clear_young_dirty_ptes __clear_young_dirty_ptes
#endif /* CONFIG_ARM64_CONTPTE */ #endif /* CONFIG_ARM64_CONTPTE */

View File

@ -361,6 +361,35 @@ void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
} }
EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes); EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes);
void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
unsigned int nr, cydp_t flags)
{
/*
* We can safely clear access/dirty without needing to unfold from
* the architectures perspective, even when contpte is set. If the
* range starts or ends midway through a contpte block, we can just
* expand to include the full contpte block. While this is not
* exactly what the core-mm asked for, it tracks access/dirty per
* folio, not per page. And since we only create a contpte block
* when it is covered by a single folio, we can get away with
* clearing access/dirty for the whole block.
*/
unsigned long start = addr;
unsigned long end = start + nr;
if (pte_cont(__ptep_get(ptep + nr - 1)))
end = ALIGN(end, CONT_PTE_SIZE);
if (pte_cont(__ptep_get(ptep))) {
start = ALIGN_DOWN(start, CONT_PTE_SIZE);
ptep = contpte_align_down(ptep);
}
__clear_young_dirty_ptes(vma, start, ptep, end - start, flags);
}
EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);
int contpte_ptep_set_access_flags(struct vm_area_struct *vma, int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned long addr, pte_t *ptep,
pte_t entry, int dirty) pte_t entry, int dirty)