9524f2278f
When irqs are soft-disabled, MSR[EE] is volatile and can change from 1 to 0 asynchronously (if a PACA_IRQ_MUST_HARD_MASK interrupt hits). So it can not be used to check hard IRQ enabled status, except to confirm it is disabled. ppc64_runlatch_on/off functions use MSR this way to decide whether to re-enable MSR[EE] after disabling it, which leads to MSR[EE] being enabled when it shouldn't be (when a PACA_IRQ_MUST_HARD_MASK had disabled it between reading the MSR and clearing EE). This has been tolerated in the kernel previously, and it doesn't seem to cause a problem, but it is unexpected and may trip warnings or cause other problems as we tighten up this state management. Fix this by only re-enabling if PACA_IRQ_HARD_DIS is clear. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220926054305.2671436-5-npiggin@gmail.com
45 lines
1.2 KiB
C
45 lines
1.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
|
|
*/
|
|
#ifndef _ASM_POWERPC_RUNLATCH_H
|
|
#define _ASM_POWERPC_RUNLATCH_H
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
extern void __ppc64_runlatch_on(void);
|
|
extern void __ppc64_runlatch_off(void);
|
|
|
|
/*
|
|
* We manually hard enable-disable, this is called
|
|
* in the idle loop and we don't want to mess up
|
|
* with soft-disable/enable & interrupt replay.
|
|
*/
|
|
#define ppc64_runlatch_off() \
|
|
do { \
|
|
if (cpu_has_feature(CPU_FTR_CTRL) && \
|
|
test_thread_local_flags(_TLF_RUNLATCH)) { \
|
|
__hard_irq_disable(); \
|
|
__ppc64_runlatch_off(); \
|
|
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \
|
|
__hard_irq_enable(); \
|
|
} \
|
|
} while (0)
|
|
|
|
#define ppc64_runlatch_on() \
|
|
do { \
|
|
if (cpu_has_feature(CPU_FTR_CTRL) && \
|
|
!test_thread_local_flags(_TLF_RUNLATCH)) { \
|
|
__hard_irq_disable(); \
|
|
__ppc64_runlatch_on(); \
|
|
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \
|
|
__hard_irq_enable(); \
|
|
} \
|
|
} while (0)
|
|
#else
|
|
#define ppc64_runlatch_on()
|
|
#define ppc64_runlatch_off()
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
#endif /* _ASM_POWERPC_RUNLATCH_H */
|