1
linux/arch/alpha/include/asm/fpu.h
Al Viro 0509666660 alpha: lazy FPU switching
On each context switch we save the FPU registers on stack
of old process and restore FPU registers from the stack of new one.
That allows us to avoid doing that each time we enter/leave the
kernel mode; however, that can get suboptimal in some cases.

	For one thing, we don't need to bother saving anything
for kernel threads.  For another, if between entering and leaving
the kernel a thread gives CPU up more than once, it will do
useless work, saving the same values every time, only to discard
the saved copy as soon as it returns from switch_to().

	Alternative solution:

* move the array we save into from switch_stack to thread_info
* have a (thread-synchronous) flag set when we save them
* have another flag set when they should be restored on return to userland.
* do *NOT* save/restore them in do_switch_stack()/undo_switch_stack().
* restore on the exit to user mode if the restore flag had
been set.  Clear both flags.
* on context switch, entry to fork/clone/vfork, before entry into do_signal()
and on entry into straced syscall save the registers and set the 'saved' flag
unless it had been already set.
* on context switch set the 'restore' flag as well.
* have copy_thread() set both flags for child, so the registers would be
restored once the child returns to userland.
* use the saved data in setup_sigcontext(); have restore_sigcontext() set both flags
and copy from sigframe to save area.
* teach ptrace to look for FPU registers in thread_info instead of
switch_stack.
* teach isolated accesses to FPU registers (rdfpcr, wrfpcr, etc.)
to check the 'saved' flag (under preempt_disable()) and work with the save area
if it's been set; if 'saved' flag is found upon write access, set 'restore' flag
as well.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Matt Turner <mattst88@gmail.com>
2023-02-24 23:14:22 -05:00

90 lines
2.1 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ALPHA_FPU_H
#define __ASM_ALPHA_FPU_H
#include <asm/special_insns.h>
#include <uapi/asm/fpu.h>
/* The following two functions don't need trapb/excb instructions
around the mf_fpcr/mt_fpcr instructions because (a) the kernel
never generates arithmetic faults and (b) call_pal instructions
are implied trap barriers. */
static inline unsigned long
rdfpcr(void)
{
unsigned long tmp, ret;
preempt_disable();
if (current_thread_info()->status & TS_SAVED_FP) {
ret = current_thread_info()->fp[31];
} else {
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
__asm__ __volatile__ (
"ftoit $f0,%0\n\t"
"mf_fpcr $f0\n\t"
"ftoit $f0,%1\n\t"
"itoft %0,$f0"
: "=r"(tmp), "=r"(ret));
#else
__asm__ __volatile__ (
"stt $f0,%0\n\t"
"mf_fpcr $f0\n\t"
"stt $f0,%1\n\t"
"ldt $f0,%0"
: "=m"(tmp), "=m"(ret));
#endif
}
preempt_enable();
return ret;
}
static inline void
wrfpcr(unsigned long val)
{
unsigned long tmp;
preempt_disable();
if (current_thread_info()->status & TS_SAVED_FP) {
current_thread_info()->status |= TS_RESTORE_FP;
current_thread_info()->fp[31] = val;
} else {
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
__asm__ __volatile__ (
"ftoit $f0,%0\n\t"
"itoft %1,$f0\n\t"
"mt_fpcr $f0\n\t"
"itoft %0,$f0"
: "=&r"(tmp) : "r"(val));
#else
__asm__ __volatile__ (
"stt $f0,%0\n\t"
"ldt $f0,%1\n\t"
"mt_fpcr $f0\n\t"
"ldt $f0,%0"
: "=m"(tmp) : "m"(val));
#endif
}
preempt_enable();
}
static inline unsigned long
swcr_update_status(unsigned long swcr, unsigned long fpcr)
{
/* EV6 implements most of the bits in hardware. Collect
the acrued exception bits from the real fpcr. */
if (implver() == IMPLVER_EV6) {
swcr &= ~IEEE_STATUS_MASK;
swcr |= (fpcr >> 35) & IEEE_STATUS_MASK;
}
return swcr;
}
extern unsigned long alpha_read_fp_reg (unsigned long reg);
extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
extern unsigned long alpha_read_fp_reg_s (unsigned long reg);
extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val);
#endif /* __ASM_ALPHA_FPU_H */