2019-05-29 07:18:00 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2017-07-10 18:03:19 -07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2015 Regents of the University of California
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_RISCV_ASM_H
|
|
|
|
#define _ASM_RISCV_ASM_H
|
|
|
|
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#define __ASM_STR(x) x
|
|
|
|
#else
|
|
|
|
#define __ASM_STR(x) #x
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if __riscv_xlen == 64
|
|
|
|
#define __REG_SEL(a, b) __ASM_STR(a)
|
|
|
|
#elif __riscv_xlen == 32
|
|
|
|
#define __REG_SEL(a, b) __ASM_STR(b)
|
|
|
|
#else
|
|
|
|
#error "Unexpected __riscv_xlen"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define REG_L __REG_SEL(ld, lw)
|
|
|
|
#define REG_S __REG_SEL(sd, sw)
|
2019-09-24 17:15:56 -07:00
|
|
|
#define REG_SC __REG_SEL(sc.d, sc.w)
|
riscv: fix race when vmap stack overflow
Currently, when detecting vmap stack overflow, riscv firstly switches
to the so called shadow stack, then use this shadow stack to call the
get_overflow_stack() to get the overflow stack. However, there's
a race here if two or more harts use the same shadow stack at the same
time.
To solve this race, we introduce spin_shadow_stack atomic var, which
will be swap between its own address and 0 in atomic way, when the
var is set, it means the shadow_stack is being used; when the var
is cleared, it means the shadow_stack isn't being used.
Fixes: 31da94c25aea ("riscv: add VMAP_STACK overflow detection")
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Suggested-by: Guo Ren <guoren@kernel.org>
Reviewed-by: Guo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20221030124517.2370-1-jszhang@kernel.org
[Palmer: Add AQ to the swap, and also some comments.]
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2022-10-30 05:45:17 -07:00
|
|
|
#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq)
|
riscv: Introduce alternative mechanism to apply errata solution
Introduce the "alternative" mechanism from ARM64 and x86 to apply the CPU
vendors' errata solution at runtime. The main purpose of this patch is
to provide a framework. Therefore, the implementation is quite basic for
now so that some scenarios could not use this schemei, such as patching
code to a module, relocating the patching code and heterogeneous CPU
topology.
Users could use the macro ALTERNATIVE to apply an errata to the existing
code flow. In the macro ALTERNATIVE, users need to specify the manufacturer
information(vendorid, archid, and impid) for this errata. Therefore, kernel
will know this errata is suitable for which CPU core. During the booting
procedure, kernel will select the errata required by the CPU core and then
patch it. It means that the kernel only applies the errata to the specified
CPU core. In this case, the vendor's errata does not affect each other at
runtime. The above patching procedure only occurs during the booting phase,
so we only take the overhead of the "alternative" mechanism once.
This "alternative" mechanism is enabled by default to ensure that all
required errata will be applied. However, users can disable this feature by
the Kconfig "CONFIG_RISCV_ERRATA_ALTERNATIVE".
Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-03-22 07:26:03 -07:00
|
|
|
#define REG_ASM __REG_SEL(.dword, .word)
|
2017-07-10 18:03:19 -07:00
|
|
|
#define SZREG __REG_SEL(8, 4)
|
|
|
|
#define LGREG __REG_SEL(3, 2)
|
|
|
|
|
|
|
|
#if __SIZEOF_POINTER__ == 8
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#define RISCV_PTR .dword
|
|
|
|
#define RISCV_SZPTR 8
|
|
|
|
#define RISCV_LGPTR 3
|
|
|
|
#else
|
|
|
|
#define RISCV_PTR ".dword"
|
|
|
|
#define RISCV_SZPTR "8"
|
|
|
|
#define RISCV_LGPTR "3"
|
|
|
|
#endif
|
|
|
|
#elif __SIZEOF_POINTER__ == 4
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#define RISCV_PTR .word
|
|
|
|
#define RISCV_SZPTR 4
|
|
|
|
#define RISCV_LGPTR 2
|
|
|
|
#else
|
|
|
|
#define RISCV_PTR ".word"
|
|
|
|
#define RISCV_SZPTR "4"
|
|
|
|
#define RISCV_LGPTR "2"
|
|
|
|
#endif
|
|
|
|
#else
|
|
|
|
#error "Unexpected __SIZEOF_POINTER__"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (__SIZEOF_INT__ == 4)
|
2017-11-29 18:55:13 -07:00
|
|
|
#define RISCV_INT __ASM_STR(.word)
|
|
|
|
#define RISCV_SZINT __ASM_STR(4)
|
|
|
|
#define RISCV_LGINT __ASM_STR(2)
|
2017-07-10 18:03:19 -07:00
|
|
|
#else
|
|
|
|
#error "Unexpected __SIZEOF_INT__"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (__SIZEOF_SHORT__ == 2)
|
2017-11-29 18:55:13 -07:00
|
|
|
#define RISCV_SHORT __ASM_STR(.half)
|
|
|
|
#define RISCV_SZSHORT __ASM_STR(2)
|
|
|
|
#define RISCV_LGSHORT __ASM_STR(1)
|
2017-07-10 18:03:19 -07:00
|
|
|
#else
|
|
|
|
#error "Unexpected __SIZEOF_SHORT__"
|
|
|
|
#endif
|
|
|
|
|
2022-06-07 07:30:58 -07:00
|
|
|
#ifdef __ASSEMBLY__
|
2023-02-21 20:30:21 -07:00
|
|
|
#include <asm/asm-offsets.h>
|
2022-06-07 07:30:58 -07:00
|
|
|
|
|
|
|
/* Common assembly source macros */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NOP sequence
|
|
|
|
*/
|
|
|
|
.macro nops, num
|
|
|
|
.rept \num
|
|
|
|
nop
|
|
|
|
.endr
|
|
|
|
.endm
|
|
|
|
|
2023-09-27 15:47:59 -07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#ifdef CONFIG_32BIT
|
|
|
|
#define PER_CPU_OFFSET_SHIFT 2
|
|
|
|
#else
|
|
|
|
#define PER_CPU_OFFSET_SHIFT 3
|
|
|
|
#endif
|
|
|
|
|
|
|
|
.macro asm_per_cpu dst sym tmp
|
|
|
|
REG_L \tmp, TASK_TI_CPU_NUM(tp)
|
|
|
|
slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
|
|
|
|
la \dst, __per_cpu_offset
|
|
|
|
add \dst, \dst, \tmp
|
|
|
|
REG_L \tmp, 0(\dst)
|
|
|
|
la \dst, \sym
|
|
|
|
add \dst, \dst, \tmp
|
|
|
|
.endm
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
.macro asm_per_cpu dst sym tmp
|
|
|
|
la \dst, \sym
|
|
|
|
.endm
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
2023-09-27 15:48:00 -07:00
|
|
|
.macro load_per_cpu dst ptr tmp
|
|
|
|
asm_per_cpu \dst \ptr \tmp
|
|
|
|
REG_L \dst, 0(\dst)
|
|
|
|
.endm
|
|
|
|
|
2023-09-27 15:48:02 -07:00
|
|
|
#ifdef CONFIG_SHADOW_CALL_STACK
|
|
|
|
/* gp is used as the shadow call stack pointer instead */
|
|
|
|
.macro load_global_pointer
|
|
|
|
.endm
|
|
|
|
#else
|
2023-09-27 15:48:01 -07:00
|
|
|
/* load __global_pointer to gp */
|
|
|
|
.macro load_global_pointer
|
|
|
|
.option push
|
|
|
|
.option norelax
|
|
|
|
la gp, __global_pointer$
|
|
|
|
.option pop
|
|
|
|
.endm
|
2023-09-27 15:48:02 -07:00
|
|
|
#endif /* CONFIG_SHADOW_CALL_STACK */
|
2023-09-27 15:48:01 -07:00
|
|
|
|
2023-02-21 20:30:21 -07:00
|
|
|
/* save all GPs except x1 ~ x5 */
|
|
|
|
.macro save_from_x6_to_x31
|
|
|
|
REG_S x6, PT_T1(sp)
|
|
|
|
REG_S x7, PT_T2(sp)
|
|
|
|
REG_S x8, PT_S0(sp)
|
|
|
|
REG_S x9, PT_S1(sp)
|
|
|
|
REG_S x10, PT_A0(sp)
|
|
|
|
REG_S x11, PT_A1(sp)
|
|
|
|
REG_S x12, PT_A2(sp)
|
|
|
|
REG_S x13, PT_A3(sp)
|
|
|
|
REG_S x14, PT_A4(sp)
|
|
|
|
REG_S x15, PT_A5(sp)
|
|
|
|
REG_S x16, PT_A6(sp)
|
|
|
|
REG_S x17, PT_A7(sp)
|
|
|
|
REG_S x18, PT_S2(sp)
|
|
|
|
REG_S x19, PT_S3(sp)
|
|
|
|
REG_S x20, PT_S4(sp)
|
|
|
|
REG_S x21, PT_S5(sp)
|
|
|
|
REG_S x22, PT_S6(sp)
|
|
|
|
REG_S x23, PT_S7(sp)
|
|
|
|
REG_S x24, PT_S8(sp)
|
|
|
|
REG_S x25, PT_S9(sp)
|
|
|
|
REG_S x26, PT_S10(sp)
|
|
|
|
REG_S x27, PT_S11(sp)
|
|
|
|
REG_S x28, PT_T3(sp)
|
|
|
|
REG_S x29, PT_T4(sp)
|
|
|
|
REG_S x30, PT_T5(sp)
|
|
|
|
REG_S x31, PT_T6(sp)
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/* restore all GPs except x1 ~ x5 */
|
|
|
|
.macro restore_from_x6_to_x31
|
|
|
|
REG_L x6, PT_T1(sp)
|
|
|
|
REG_L x7, PT_T2(sp)
|
|
|
|
REG_L x8, PT_S0(sp)
|
|
|
|
REG_L x9, PT_S1(sp)
|
|
|
|
REG_L x10, PT_A0(sp)
|
|
|
|
REG_L x11, PT_A1(sp)
|
|
|
|
REG_L x12, PT_A2(sp)
|
|
|
|
REG_L x13, PT_A3(sp)
|
|
|
|
REG_L x14, PT_A4(sp)
|
|
|
|
REG_L x15, PT_A5(sp)
|
|
|
|
REG_L x16, PT_A6(sp)
|
|
|
|
REG_L x17, PT_A7(sp)
|
|
|
|
REG_L x18, PT_S2(sp)
|
|
|
|
REG_L x19, PT_S3(sp)
|
|
|
|
REG_L x20, PT_S4(sp)
|
|
|
|
REG_L x21, PT_S5(sp)
|
|
|
|
REG_L x22, PT_S6(sp)
|
|
|
|
REG_L x23, PT_S7(sp)
|
|
|
|
REG_L x24, PT_S8(sp)
|
|
|
|
REG_L x25, PT_S9(sp)
|
|
|
|
REG_L x26, PT_S10(sp)
|
|
|
|
REG_L x27, PT_S11(sp)
|
|
|
|
REG_L x28, PT_T3(sp)
|
|
|
|
REG_L x29, PT_T4(sp)
|
|
|
|
REG_L x30, PT_T5(sp)
|
|
|
|
REG_L x31, PT_T6(sp)
|
|
|
|
.endm
|
|
|
|
|
2023-10-04 06:10:09 -07:00
|
|
|
/* Annotate a function as being unsuitable for kprobes. */
|
|
|
|
#ifdef CONFIG_KPROBES
|
|
|
|
#define ASM_NOKPROBE(name) \
|
|
|
|
.pushsection "_kprobe_blacklist", "aw"; \
|
|
|
|
RISCV_PTR name; \
|
|
|
|
.popsection
|
|
|
|
#else
|
|
|
|
#define ASM_NOKPROBE(name)
|
|
|
|
#endif
|
|
|
|
|
2022-06-07 07:30:58 -07:00
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
2017-07-10 18:03:19 -07:00
|
|
|
#endif /* _ASM_RISCV_ASM_H */
|