1
linux/arch/sh/kernel/cpu/fpu.c
Eric W. Biederman ce0ee4e6ac signal/sh: Use force_sig(SIGKILL) instead of do_group_exit(SIGKILL)
Today the sh code allocates memory the first time a process uses
the fpu.  If that memory allocation fails, kill the affected task
with force_sig(SIGKILL) rather than do_group_exit(SIGKILL).

Calling do_group_exit from an exception handler can potentially lead
to dead locks as do_group_exit is not designed to be called from
interrupt context.  Instead use force_sig(SIGKILL) to kill the
userspace process.  Sending signals in general and force_sig in
particular has been tested from interrupt context so there should be
no problems.

Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Rich Felker <dalias@libc.org>
Cc: linux-sh@vger.kernel.org
Fixes: 0ea820cf9b ("sh: Move over to dynamically allocated FPU context.")
Link: https://lkml.kernel.org/r/20211020174406.17889-6-ebiederm@xmission.com
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
2021-10-25 15:56:29 -05:00

93 lines
1.8 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/fpu.h>
#include <asm/traps.h>
#include <asm/ptrace.h>
int init_fpu(struct task_struct *tsk)
{
if (tsk_used_math(tsk)) {
if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
unlazy_fpu(tsk, task_pt_regs(tsk));
return 0;
}
/*
* Memory allocation at the first usage of the FPU and other state.
*/
if (!tsk->thread.xstate) {
tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
GFP_KERNEL);
if (!tsk->thread.xstate)
return -ENOMEM;
}
if (boot_cpu_data.flags & CPU_HAS_FPU) {
struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
memset(fp, 0, xstate_size);
fp->fpscr = FPSCR_INIT;
} else {
struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
memset(fp, 0, xstate_size);
fp->fpscr = FPSCR_INIT;
}
set_stopped_child_used_math(tsk);
return 0;
}
#ifdef CONFIG_SH_FPU
void __fpu_state_restore(void)
{
struct task_struct *tsk = current;
restore_fpu(tsk);
task_thread_info(tsk)->status |= TS_USEDFPU;
tsk->thread.fpu_counter++;
}
void fpu_state_restore(struct pt_regs *regs)
{
struct task_struct *tsk = current;
if (unlikely(!user_mode(regs))) {
printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
BUG();
return;
}
if (!tsk_used_math(tsk)) {
int ret;
/*
* does a slab alloc which can sleep
*/
local_irq_enable();
ret = init_fpu(tsk);
local_irq_disable();
if (ret) {
/*
* ran out of memory!
*/
force_sig(SIGKILL);
return;
}
}
grab_fpu(regs);
__fpu_state_restore();
}
BUILD_TRAP_HANDLER(fpu_state_restore)
{
TRAP_HANDLER_DECL;
fpu_state_restore(regs);
}
#endif /* CONFIG_SH_FPU */