Revert "riscv: mm: support Svnapot in huge vmap"
This reverts commitce173474cf
. We cannot correctly deal with NAPOT mappings in vmalloc/vmap because if some part of a NAPOT mapping is unmapped, the remaining mapping is not updated accordingly. For example: ptr = vmalloc_huge(64 * 1024, GFP_KERNEL); vunmap_range((unsigned long)(ptr + PAGE_SIZE), (unsigned long)(ptr + 64 * 1024)); leads to the following kernel page table dump: 0xffff8f8000ef0000-0xffff8f8000ef1000 0x00000001033c0000 4K PTE N .. .. D A G . . W R V Meaning the first entry which was not unmapped still has the N bit set, which, if accessed first and cached in the TLB, could allow access to the unmapped range. That's because the logic to break the NAPOT mapping does not exist and likely won't. Indeed, to break a NAPOT mapping, we first have to clear the whole mapping, flush the TLB and then set the new mapping ("break- before-make" equivalent). That works fine in userspace since we can handle any pagefault occurring on the remaining mapping but we can't handle a kernel pagefault on such mapping. So fix this by reverting the commit that introduced the vmap/vmalloc support. Fixes:ce173474cf
("riscv: mm: support Svnapot in huge vmap") Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Link: https://lore.kernel.org/r/20240227205016.121901-2-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
parent
6613476e22
commit
16ab4646c9
@ -19,65 +19,6 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_RISCV_ISA_SVNAPOT
|
#endif
|
||||||
#include <linux/pgtable.h>
|
|
||||||
|
|
||||||
#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
|
|
||||||
static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
|
|
||||||
u64 pfn, unsigned int max_page_shift)
|
|
||||||
{
|
|
||||||
unsigned long map_size = PAGE_SIZE;
|
|
||||||
unsigned long size, order;
|
|
||||||
|
|
||||||
if (!has_svnapot())
|
|
||||||
return map_size;
|
|
||||||
|
|
||||||
for_each_napot_order_rev(order) {
|
|
||||||
if (napot_cont_shift(order) > max_page_shift)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
size = napot_cont_size(order);
|
|
||||||
if (end - addr < size)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!IS_ALIGNED(addr, size))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!IS_ALIGNED(PFN_PHYS(pfn), size))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
map_size = size;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return map_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
|
|
||||||
static inline int arch_vmap_pte_supported_shift(unsigned long size)
|
|
||||||
{
|
|
||||||
int shift = PAGE_SHIFT;
|
|
||||||
unsigned long order;
|
|
||||||
|
|
||||||
if (!has_svnapot())
|
|
||||||
return shift;
|
|
||||||
|
|
||||||
WARN_ON_ONCE(size >= PMD_SIZE);
|
|
||||||
|
|
||||||
for_each_napot_order_rev(order) {
|
|
||||||
if (napot_cont_size(order) > size)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!IS_ALIGNED(size, napot_cont_size(order)))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
shift = napot_cont_shift(order);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return shift;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CONFIG_RISCV_ISA_SVNAPOT */
|
|
||||||
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
|
||||||
#endif /* _ASM_RISCV_VMALLOC_H */
|
#endif /* _ASM_RISCV_VMALLOC_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user