7a92fc8b4d
The pcpu setup when using the page allocator sets up a new vmalloc mapping very early in the boot process, so early that it cannot use the flush_cache_vmap() function which may depend on structures not yet initialized (for example in riscv, we currently send an IPI to flush other cpus TLB). But on some architectures, we must call flush_cache_vmap(): for example, in riscv, some uarchs can cache invalid TLB entries so we need to flush the new established mapping to avoid taking an exception. So fix this by introducing a new function flush_cache_vmap_early() which is called right after setting the new page table entry and before accessing this new mapping. This new function implements a local flush tlb on riscv and is no-op for other architectures (same as today). Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Dennis Zhou <dennis@kernel.org>
62 lines
2.3 KiB
C
62 lines
2.3 KiB
C
/*
|
|
* Copyright (C) 2003 Microtronix Datacom Ltd.
|
|
* Copyright (C) 2000-2002 Greg Ungerer <gerg@snapgear.com>
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*/
|
|
|
|
#ifndef _ASM_NIOS2_CACHEFLUSH_H
|
|
#define _ASM_NIOS2_CACHEFLUSH_H
|
|
|
|
#include <linux/mm_types.h>
|
|
|
|
/*
|
|
* This flag is used to indicate that the page pointed to by a pte is clean
|
|
* and does not require cleaning before returning it to the user.
|
|
*/
|
|
#define PG_dcache_clean PG_arch_1
|
|
|
|
struct mm_struct;
|
|
|
|
extern void flush_cache_all(void);
|
|
extern void flush_cache_mm(struct mm_struct *mm);
|
|
extern void flush_cache_dup_mm(struct mm_struct *mm);
|
|
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end);
|
|
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
|
unsigned long pfn);
|
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
|
void flush_dcache_page(struct page *page);
|
|
void flush_dcache_folio(struct folio *folio);
|
|
#define flush_dcache_folio flush_dcache_folio
|
|
|
|
extern void flush_icache_range(unsigned long start, unsigned long end);
|
|
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
|
|
unsigned int nr);
|
|
#define flush_icache_pages flush_icache_pages
|
|
|
|
#define flush_cache_vmap(start, end) flush_dcache_range(start, end)
|
|
#define flush_cache_vmap_early(start, end) do { } while (0)
|
|
#define flush_cache_vunmap(start, end) flush_dcache_range(start, end)
|
|
|
|
extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
|
unsigned long user_vaddr,
|
|
void *dst, void *src, int len);
|
|
extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
|
unsigned long user_vaddr,
|
|
void *dst, void *src, int len);
|
|
|
|
extern void flush_dcache_range(unsigned long start, unsigned long end);
|
|
extern void invalidate_dcache_range(unsigned long start, unsigned long end);
|
|
|
|
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
|
|
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
|
#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
|
|
xa_lock_irqsave(&mapping->i_pages, flags)
|
|
#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
|
|
xa_unlock_irqrestore(&mapping->i_pages, flags)
|
|
|
|
#endif /* _ASM_NIOS2_CACHEFLUSH_H */
|