1

powerpc/64s/interrupt: handle MSR EE and RI in interrupt entry wrapper

The mtmsrd to enable MSR[RI] can be combined with the mtmsrd to enable
MSR[EE] in interrupt entry code, for those interrupts which enable EE.
This helps performance of important synchronous interrupts (e.g., page
faults).

This is similar to what commit dd152f70bd ("powerpc/64s: system call
avoid setting MSR[RI] until we set MSR[EE]") does for system calls.

Do this by enabling EE and RI together at the beginning of the entry
wrapper if PACA_IRQ_HARD_DIS is clear, and only enabling RI if it is
set.

Asynchronous interrupts set PACA_IRQ_HARD_DIS, but synchronous ones
leave it unchanged, so by default they always get EE=1 unless they have
interrupted a caller that is hard disabled. When the sync interrupt
later calls interrupt_cond_local_irq_enable(), it will not require
another mtmsrd because MSR[EE] was already enabled here.

This avoids one mtmsrd L=1 for synchronous interrupts on 64s, which
saves about 20 cycles on POWER9. And for kernel-mode interrupts, both
synchronous and asynchronous, this saves an additional 40 cycles due to
the mtmsrd being moved ahead of mfspr SPRN_AMR, which prevents a SPR
scoreboard stall.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210922145452.352571-3-npiggin@gmail.com
This commit is contained in:
Nicholas Piggin 2021-09-23 00:54:48 +10:00 committed by Michael Ellerman
parent 4423eb5ae3
commit ff0b0d6e1a
4 changed files with 42 additions and 38 deletions

View File

@ -149,8 +149,14 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
#endif
#ifdef CONFIG_PPC64
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
trace_hardirqs_off();
bool trace_enable = false;
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
trace_enable = true;
} else {
irq_soft_mask_set(IRQS_ALL_DISABLED);
}
/*
* If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
@ -164,8 +170,14 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(!(regs->msr & MSR_EE));
__hard_irq_enable();
} else {
__hard_RI_enable();
}
/* Do this when RI=1 because it can cause SLB faults */
if (trace_enable)
trace_hardirqs_off();
if (user_mode(regs)) {
kuap_lock();
CT_WARN_ON(ct_state() != CONTEXT_USER);
@ -220,13 +232,16 @@ static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct in
/* Ensure interrupt_enter_prepare does not enable MSR[EE] */
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
#endif
interrupt_enter_prepare(regs, state);
#ifdef CONFIG_PPC_BOOK3S_64
/*
* RI=1 is set by interrupt_enter_prepare, so this thread flags access
* has to come afterward (it can cause SLB faults).
*/
if (cpu_has_feature(CPU_FTR_CTRL) &&
!test_thread_local_flags(_TLF_RUNLATCH))
__ppc64_runlatch_on();
#endif
interrupt_enter_prepare(regs, state);
irq_enter();
}
@ -296,6 +311,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
regs->softe = IRQS_ALL_DISABLED;
}
__hard_RI_enable();
/* Don't do any per-CPU operations until interrupt state is fixed */
if (nmi_disables_ftrace(regs)) {
@ -393,6 +410,8 @@ interrupt_handler long func(struct pt_regs *regs) \
{ \
long ret; \
\
__hard_RI_enable(); \
\
ret = ____##func (regs); \
\
return ret; \

View File

@ -113,7 +113,6 @@ name:
#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */
#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */
#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */
#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */
#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
@ -157,9 +156,6 @@ do_define_int n
.ifndef IDSISR
IDSISR=0
.endif
.ifndef ISET_RI
ISET_RI=1
.endif
.ifndef IBRANCH_TO_COMMON
IBRANCH_TO_COMMON=1
.endif
@ -512,11 +508,6 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
stb r10,PACASRR_VALID(r13)
.endif
.if ISET_RI
li r10,MSR_RI
mtmsrd r10,1 /* Set MSR_RI */
.endif
.if ISTACK
.if IKUAP
kuap_save_amr_and_lock r9, r10, cr1, cr0
@ -900,11 +891,6 @@ INT_DEFINE_BEGIN(system_reset)
IVEC=0x100
IAREA=PACA_EXNMI
IVIRT=0 /* no virt entry point */
/*
* MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
* being used, so a nested NMI exception would corrupt it.
*/
ISET_RI=0
ISTACK=0
IKVM_REAL=1
INT_DEFINE_END(system_reset)
@ -977,16 +963,14 @@ TRAMP_REAL_BEGIN(system_reset_fwnmi)
EXC_COMMON_BEGIN(system_reset_common)
__GEN_COMMON_ENTRY system_reset
/*
* Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
* to recover, but nested NMI will notice in_nmi and not recover
* because of the use of the NMI stack. in_nmi reentrancy is tested in
* system_reset_exception.
* Increment paca->in_nmi. When the interrupt entry wrapper later
* enable MSR_RI, then SLB or MCE will be able to recover, but a nested
* NMI will notice in_nmi and not recover because of the use of the NMI
* stack. in_nmi reentrancy is tested in system_reset_exception.
*/
lhz r10,PACA_IN_NMI(r13)
addi r10,r10,1
sth r10,PACA_IN_NMI(r13)
li r10,MSR_RI
mtmsrd r10,1
mr r10,r1
ld r1,PACA_NMI_EMERG_SP(r13)
@ -1060,12 +1044,6 @@ INT_DEFINE_BEGIN(machine_check_early)
IAREA=PACA_EXMC
IVIRT=0 /* no virt entry point */
IREALMODE_COMMON=1
/*
* MSR_RI is not enabled, because PACA_EXMC is being used, so a
* nested machine check corrupts it. machine_check_common enables
* MSR_RI.
*/
ISET_RI=0
ISTACK=0
IDAR=1
IDSISR=1
@ -1076,7 +1054,6 @@ INT_DEFINE_BEGIN(machine_check)
IVEC=0x200
IAREA=PACA_EXMC
IVIRT=0 /* no virt entry point */
ISET_RI=0
IDAR=1
IDSISR=1
IKVM_REAL=1
@ -1146,9 +1123,6 @@ EXC_COMMON_BEGIN(machine_check_early_common)
BEGIN_FTR_SECTION
bl enable_machine_check
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
li r10,MSR_RI
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early
std r3,RESULT(r1) /* Save result */
@ -1236,10 +1210,6 @@ EXC_COMMON_BEGIN(machine_check_common)
* save area: PACA_EXMC instead of PACA_EXGEN.
*/
GEN_COMMON machine_check
/* Enable MSR_RI when finished with PACA_EXMC */
li r10,MSR_RI
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception_async
b interrupt_return_srr

View File

@ -81,7 +81,12 @@ EXPORT_SYMBOL(store_fp_state)
*/
_GLOBAL(load_up_fpu)
mfmsr r5
#ifdef CONFIG_PPC_BOOK3S_64
/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
ori r5,r5,MSR_FP|MSR_RI
#else
ori r5,r5,MSR_FP
#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
oris r5,r5,MSR_VSX@h

View File

@ -47,6 +47,10 @@ EXPORT_SYMBOL(store_vr_state)
*/
_GLOBAL(load_up_altivec)
mfmsr r5 /* grab the current MSR */
#ifdef CONFIG_PPC_BOOK3S_64
/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
ori r5,r5,MSR_RI
#endif
oris r5,r5,MSR_VEC@h
MTMSRD(r5) /* enable use of AltiVec now */
isync
@ -126,6 +130,12 @@ _GLOBAL(load_up_vsx)
andis. r5,r12,MSR_VEC@h
beql+ load_up_altivec /* skip if already loaded */
#ifdef CONFIG_PPC_BOOK3S_64
/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
li r5,MSR_RI
mtmsrd r5,1
#endif
ld r4,PACACURRENT(r13)
addi r4,r4,THREAD /* Get THREAD */
li r6,1