mm/powerpc: replace pXd_is_leaf() with pXd_leaf()
They're the same macros underneath. Drop pXd_is_leaf(), instead always use pXd_leaf(). At the meantime, instead of renames, drop the pXd_is_leaf() fallback definitions directly in arch/powerpc/include/asm/pgtable.h. because similar fallback macros for pXd_leaf() are already defined in include/linux/pgtable.h. Link: https://lkml.kernel.org/r/20240305043750.93762-3-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Muchun Song <muchun.song@linux.dev> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
a2aa530d85
commit
bd18b68822
@ -1439,18 +1439,16 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
|
||||
/*
|
||||
* Like pmd_huge() and pmd_large(), but works regardless of config options
|
||||
*/
|
||||
#define pmd_is_leaf pmd_is_leaf
|
||||
#define pmd_leaf pmd_is_leaf
|
||||
#define pmd_leaf pmd_leaf
|
||||
#define pmd_large pmd_leaf
|
||||
static inline bool pmd_is_leaf(pmd_t pmd)
|
||||
static inline bool pmd_leaf(pmd_t pmd)
|
||||
{
|
||||
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
|
||||
}
|
||||
|
||||
#define pud_is_leaf pud_is_leaf
|
||||
#define pud_leaf pud_is_leaf
|
||||
#define pud_leaf pud_leaf
|
||||
#define pud_large pud_leaf
|
||||
static inline bool pud_is_leaf(pud_t pud)
|
||||
static inline bool pud_leaf(pud_t pud)
|
||||
{
|
||||
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
|
||||
}
|
||||
|
@ -182,30 +182,6 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef pmd_is_leaf
|
||||
#define pmd_is_leaf pmd_is_leaf
|
||||
static inline bool pmd_is_leaf(pmd_t pmd)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef pud_is_leaf
|
||||
#define pud_is_leaf pud_is_leaf
|
||||
static inline bool pud_is_leaf(pud_t pud)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef p4d_is_leaf
|
||||
#define p4d_is_leaf p4d_is_leaf
|
||||
static inline bool p4d_is_leaf(p4d_t p4d)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define pmd_pgtable pmd_pgtable
|
||||
static inline pgtable_t pmd_pgtable(pmd_t pmd)
|
||||
{
|
||||
|
@ -503,7 +503,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
|
||||
for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
|
||||
if (!pmd_present(*p))
|
||||
continue;
|
||||
if (pmd_is_leaf(*p)) {
|
||||
if (pmd_leaf(*p)) {
|
||||
if (full) {
|
||||
pmd_clear(p);
|
||||
} else {
|
||||
@ -532,7 +532,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
|
||||
for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
|
||||
if (!pud_present(*p))
|
||||
continue;
|
||||
if (pud_is_leaf(*p)) {
|
||||
if (pud_leaf(*p)) {
|
||||
pud_clear(p);
|
||||
} else {
|
||||
pmd_t *pmd;
|
||||
@ -635,12 +635,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
|
||||
new_pud = pud_alloc_one(kvm->mm, gpa);
|
||||
|
||||
pmd = NULL;
|
||||
if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
|
||||
if (pud && pud_present(*pud) && !pud_leaf(*pud))
|
||||
pmd = pmd_offset(pud, gpa);
|
||||
else if (level <= 1)
|
||||
new_pmd = kvmppc_pmd_alloc();
|
||||
|
||||
if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
|
||||
if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd)))
|
||||
new_ptep = kvmppc_pte_alloc();
|
||||
|
||||
/* Check if we might have been invalidated; let the guest retry if so */
|
||||
@ -658,7 +658,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
|
||||
new_pud = NULL;
|
||||
}
|
||||
pud = pud_offset(p4d, gpa);
|
||||
if (pud_is_leaf(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
unsigned long hgpa = gpa & PUD_MASK;
|
||||
|
||||
/* Check if we raced and someone else has set the same thing */
|
||||
@ -709,7 +709,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
|
||||
new_pmd = NULL;
|
||||
}
|
||||
pmd = pmd_offset(pud, gpa);
|
||||
if (pmd_is_leaf(*pmd)) {
|
||||
if (pmd_leaf(*pmd)) {
|
||||
unsigned long lgpa = gpa & PMD_MASK;
|
||||
|
||||
/* Check if we raced and someone else has set the same thing */
|
||||
|
@ -204,14 +204,14 @@ static void radix__change_memory_range(unsigned long start, unsigned long end,
|
||||
pudp = pud_alloc(&init_mm, p4dp, idx);
|
||||
if (!pudp)
|
||||
continue;
|
||||
if (pud_is_leaf(*pudp)) {
|
||||
if (pud_leaf(*pudp)) {
|
||||
ptep = (pte_t *)pudp;
|
||||
goto update_the_pte;
|
||||
}
|
||||
pmdp = pmd_alloc(&init_mm, pudp, idx);
|
||||
if (!pmdp)
|
||||
continue;
|
||||
if (pmd_is_leaf(*pmdp)) {
|
||||
if (pmd_leaf(*pmdp)) {
|
||||
ptep = pmdp_ptep(pmdp);
|
||||
goto update_the_pte;
|
||||
}
|
||||
@ -767,7 +767,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
|
||||
if (!pmd_present(*pmd))
|
||||
continue;
|
||||
|
||||
if (pmd_is_leaf(*pmd)) {
|
||||
if (pmd_leaf(*pmd)) {
|
||||
if (IS_ALIGNED(addr, PMD_SIZE) &&
|
||||
IS_ALIGNED(next, PMD_SIZE)) {
|
||||
if (!direct)
|
||||
@ -807,7 +807,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
|
||||
if (!pud_present(*pud))
|
||||
continue;
|
||||
|
||||
if (pud_is_leaf(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
if (!IS_ALIGNED(addr, PUD_SIZE) ||
|
||||
!IS_ALIGNED(next, PUD_SIZE)) {
|
||||
WARN_ONCE(1, "%s: unaligned range\n", __func__);
|
||||
@ -845,7 +845,7 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
|
||||
if (!p4d_present(*p4d))
|
||||
continue;
|
||||
|
||||
if (p4d_is_leaf(*p4d)) {
|
||||
if (p4d_leaf(*p4d)) {
|
||||
if (!IS_ALIGNED(addr, P4D_SIZE) ||
|
||||
!IS_ALIGNED(next, P4D_SIZE)) {
|
||||
WARN_ONCE(1, "%s: unaligned range\n", __func__);
|
||||
@ -1554,7 +1554,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
|
||||
|
||||
int pud_clear_huge(pud_t *pud)
|
||||
{
|
||||
if (pud_is_leaf(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
pud_clear(pud);
|
||||
return 1;
|
||||
}
|
||||
@ -1601,7 +1601,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
|
||||
|
||||
int pmd_clear_huge(pmd_t *pmd)
|
||||
{
|
||||
if (pmd_is_leaf(*pmd)) {
|
||||
if (pmd_leaf(*pmd)) {
|
||||
pmd_clear(pmd);
|
||||
return 1;
|
||||
}
|
||||
|
@ -410,7 +410,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
|
||||
if (p4d_none(p4d))
|
||||
return NULL;
|
||||
|
||||
if (p4d_is_leaf(p4d)) {
|
||||
if (p4d_leaf(p4d)) {
|
||||
ret_pte = (pte_t *)p4dp;
|
||||
goto out;
|
||||
}
|
||||
@ -432,7 +432,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
|
||||
if (pud_none(pud))
|
||||
return NULL;
|
||||
|
||||
if (pud_is_leaf(pud)) {
|
||||
if (pud_leaf(pud)) {
|
||||
ret_pte = (pte_t *)pudp;
|
||||
goto out;
|
||||
}
|
||||
@ -471,7 +471,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (pmd_is_leaf(pmd)) {
|
||||
if (pmd_leaf(pmd)) {
|
||||
ret_pte = (pte_t *)pmdp;
|
||||
goto out;
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
|
||||
/* 4 level page table */
|
||||
struct page *p4d_page(p4d_t p4d)
|
||||
{
|
||||
if (p4d_is_leaf(p4d)) {
|
||||
if (p4d_leaf(p4d)) {
|
||||
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
|
||||
VM_WARN_ON(!p4d_huge(p4d));
|
||||
return pte_page(p4d_pte(p4d));
|
||||
@ -111,7 +111,7 @@ struct page *p4d_page(p4d_t p4d)
|
||||
|
||||
struct page *pud_page(pud_t pud)
|
||||
{
|
||||
if (pud_is_leaf(pud)) {
|
||||
if (pud_leaf(pud)) {
|
||||
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
|
||||
VM_WARN_ON(!pud_huge(pud));
|
||||
return pte_page(pud_pte(pud));
|
||||
@ -125,7 +125,7 @@ struct page *pud_page(pud_t pud)
|
||||
*/
|
||||
struct page *pmd_page(pmd_t pmd)
|
||||
{
|
||||
if (pmd_is_leaf(pmd)) {
|
||||
if (pmd_leaf(pmd)) {
|
||||
/*
|
||||
* vmalloc_to_page may be called on any vmap address (not only
|
||||
* vmalloc), and it uses pmd_page() etc., when huge vmap is
|
||||
|
@ -3342,7 +3342,7 @@ static void show_pte(unsigned long addr)
|
||||
return;
|
||||
}
|
||||
|
||||
if (p4d_is_leaf(*p4dp)) {
|
||||
if (p4d_leaf(*p4dp)) {
|
||||
format_pte(p4dp, p4d_val(*p4dp));
|
||||
return;
|
||||
}
|
||||
@ -3356,7 +3356,7 @@ static void show_pte(unsigned long addr)
|
||||
return;
|
||||
}
|
||||
|
||||
if (pud_is_leaf(*pudp)) {
|
||||
if (pud_leaf(*pudp)) {
|
||||
format_pte(pudp, pud_val(*pudp));
|
||||
return;
|
||||
}
|
||||
@ -3370,7 +3370,7 @@ static void show_pte(unsigned long addr)
|
||||
return;
|
||||
}
|
||||
|
||||
if (pmd_is_leaf(*pmdp)) {
|
||||
if (pmd_leaf(*pmdp)) {
|
||||
format_pte(pmdp, pmd_val(*pmdp));
|
||||
return;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user