1
linux/arch/x86/kernel/paravirt-spinlocks.c
Juergen Gross 4e6292114c x86/paravirt: Add new features for paravirt patching
For being able to switch paravirt patching from special cased custom
code sequences to ALTERNATIVE handling some X86_FEATURE_* are needed
as new features. This enables to have the standard indirect pv call
as the default code and to patch that with the non-Xen custom code
sequence via ALTERNATIVE patching later.

Make sure paravirt patching is performed before alternatives patching.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210311142319.4723-9-jgross@suse.com
2021-03-11 19:51:49 +01:00

44 lines
1.0 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Split spinlock implementation out into its own file, so it can be
* compiled in a FTRACE-compatible way.
*/
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/jump_label.h>
#include <asm/paravirt.h>
__visible void __native_queued_spin_unlock(struct qspinlock *lock)
{
native_queued_spin_unlock(lock);
}
PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
bool pv_is_native_spin_unlock(void)
{
return pv_ops.lock.queued_spin_unlock.func ==
__raw_callee_save___native_queued_spin_unlock;
}
__visible bool __native_vcpu_is_preempted(long cpu)
{
return false;
}
PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
bool pv_is_native_vcpu_is_preempted(void)
{
return pv_ops.lock.vcpu_is_preempted.func ==
__raw_callee_save___native_vcpu_is_preempted;
}
void __init paravirt_set_cap(void)
{
if (!pv_is_native_spin_unlock())
setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);
if (!pv_is_native_vcpu_is_preempted())
setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
}