82982fdd51
With CONFIG_IRQ_STACKS, we switch to a separate per-CPU IRQ stack before calling handle_riscv_irq or __do_softirq. We currently have duplicate inline assembly snippets for stack switching in both code paths. Now that we can access per-CPU variables in assembly, implement call_on_irq_stack in assembly, and use that instead of redundant inline assembly. Signed-off-by: Sami Tolvanen <samitolvanen@google.com> Tested-by: Nathan Chancellor <nathan@kernel.org> Reviewed-by: Guo Ren <guoren@kernel.org> Link: https://lore.kernel.org/r/20230927224757.1154247-10-samitolvanen@google.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
34 lines
843 B
C
34 lines
843 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _ASM_RISCV_IRQ_STACK_H
|
|
#define _ASM_RISCV_IRQ_STACK_H
|
|
|
|
#include <linux/bug.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/kconfig.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/pgtable.h>
|
|
#include <asm/thread_info.h>
|
|
|
|
DECLARE_PER_CPU(ulong *, irq_stack_ptr);
|
|
|
|
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
|
|
void (*func)(struct pt_regs *));
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
/*
|
|
* To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
|
|
* stacks need to have the same alignment.
|
|
*/
|
|
static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
|
|
{
|
|
void *p;
|
|
|
|
p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
|
|
__builtin_return_address(0));
|
|
return kasan_reset_tag(p);
|
|
}
|
|
#endif /* CONFIG_VMAP_STACK */
|
|
|
|
#endif /* _ASM_RISCV_IRQ_STACK_H */
|