mm/powerpc: add missing pud helpers
Some new helpers will be needed for pud entry updates soon. Introduce these helpers by referencing the pmd ones. Namely: - pudp_invalidate(): this helper invalidates a huge pud before a split happens, so that the invalidated pud entry will make sure no race will happen (either with software, like a concurrent zap, or hardware, like a/d bit lost). - pud_modify(): this helper applies a new pgprot to an existing huge pud mapping. For more information on why we need these two helpers, please refer to the corresponding pmd helpers in the mprotect() code path. Link: https://lkml.kernel.org/r/20240812181225.1360970-4-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: "Edgecombe, Rick P" <rick.p.edgecombe@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Matthew Wilcox <willy@infradead.org> Cc: Oscar Salvador <osalvador@suse.de> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7f06e3aa2e
commit
4dd7724f02
@ -1124,6 +1124,7 @@ extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
|
||||
extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot);
|
||||
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
|
||||
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
|
||||
extern pud_t pud_modify(pud_t pud, pgprot_t newprot);
|
||||
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd);
|
||||
extern void set_pud_at(struct mm_struct *mm, unsigned long addr,
|
||||
@ -1384,6 +1385,8 @@ static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
|
||||
#define __HAVE_ARCH_PMDP_INVALIDATE
|
||||
extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmdp);
|
||||
extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
||||
pud_t *pudp);
|
||||
|
||||
#define pmd_move_must_withdraw pmd_move_must_withdraw
|
||||
struct spinlock;
|
||||
|
@ -176,6 +176,17 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
||||
return __pmd(old_pmd);
|
||||
}
|
||||
|
||||
pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
||||
pud_t *pudp)
|
||||
{
|
||||
unsigned long old_pud;
|
||||
|
||||
VM_WARN_ON_ONCE(!pud_present(*pudp));
|
||||
old_pud = pud_hugepage_update(vma->vm_mm, address, pudp, _PAGE_PRESENT, _PAGE_INVALID);
|
||||
flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
|
||||
return __pud(old_pud);
|
||||
}
|
||||
|
||||
pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
|
||||
unsigned long addr, pmd_t *pmdp, int full)
|
||||
{
|
||||
@ -259,6 +270,15 @@ pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
pmdv &= _HPAGE_CHG_MASK;
|
||||
return pmd_set_protbits(__pmd(pmdv), newprot);
|
||||
}
|
||||
|
||||
pud_t pud_modify(pud_t pud, pgprot_t newprot)
|
||||
{
|
||||
unsigned long pudv;
|
||||
|
||||
pudv = pud_val(pud);
|
||||
pudv &= _HPAGE_CHG_MASK;
|
||||
return pud_set_protbits(__pud(pudv), newprot);
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
/* For use by kexec, called with MMU off */
|
||||
|
Loading…
Reference in New Issue
Block a user