LoongArch: KVM: Add PV IPI support on host side
On LoongArch system, IPI hw uses iocsr registers. There are one iocsr register access on IPI sending, and two iocsr access on IPI receiving for the IPI interrupt handler. In VM mode all iocsr accessing will cause VM to trap into hypervisor. So with one IPI hw notification there will be three times of trap. In this patch PV IPI is added for VM, hypercall instruction is used for IPI sender, and hypervisor will inject an SWI to the destination vcpu. During the SWI interrupt handler, only CSR.ESTAT register is written to clear irq. CSR.ESTAT register access will not trap into hypervisor, so with PV IPI supported, there is one trap with IPI sender, and no trap with IPI receiver, there is only one trap with IPI notification. Also this patch adds IPI multicast support, the method is similar with x86. With IPI multicast support, IPI notification can be sent to at most 128 vcpus at one time. It greatly reduces the times of trapping into hypervisor. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
parent
73516e9da5
commit
e33bda7ee5
@ -43,6 +43,7 @@ struct kvm_vcpu_stat {
|
|||||||
u64 idle_exits;
|
u64 idle_exits;
|
||||||
u64 cpucfg_exits;
|
u64 cpucfg_exits;
|
||||||
u64 signal_exits;
|
u64 signal_exits;
|
||||||
|
u64 hypercall_exits;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0)
|
#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0)
|
||||||
|
@ -2,6 +2,18 @@
|
|||||||
#ifndef _ASM_LOONGARCH_KVM_PARA_H
|
#ifndef _ASM_LOONGARCH_KVM_PARA_H
|
||||||
#define _ASM_LOONGARCH_KVM_PARA_H
|
#define _ASM_LOONGARCH_KVM_PARA_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hypercall code field
|
||||||
|
*/
|
||||||
|
#define HYPERVISOR_KVM 1
|
||||||
|
#define HYPERVISOR_VENDOR_SHIFT 8
|
||||||
|
#define HYPERCALL_ENCODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
|
||||||
|
|
||||||
|
#define KVM_HCALL_CODE_SERVICE 0
|
||||||
|
|
||||||
|
#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
|
||||||
|
#define KVM_HCALL_FUNC_IPI 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LoongArch hypercall return code
|
* LoongArch hypercall return code
|
||||||
*/
|
*/
|
||||||
@ -9,6 +21,125 @@
|
|||||||
#define KVM_HCALL_INVALID_CODE -1UL
|
#define KVM_HCALL_INVALID_CODE -1UL
|
||||||
#define KVM_HCALL_INVALID_PARAMETER -2UL
|
#define KVM_HCALL_INVALID_PARAMETER -2UL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hypercall interface for KVM hypervisor
|
||||||
|
*
|
||||||
|
* a0: function identifier
|
||||||
|
* a1-a6: args
|
||||||
|
* Return value will be placed in a0.
|
||||||
|
* Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6.
|
||||||
|
*/
|
||||||
|
static __always_inline long kvm_hypercall0(u64 fid)
|
||||||
|
{
|
||||||
|
register long ret asm("a0");
|
||||||
|
register unsigned long fun asm("a0") = fid;
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"hvcl "__stringify(KVM_HCALL_SERVICE)
|
||||||
|
: "=r" (ret)
|
||||||
|
: "r" (fun)
|
||||||
|
: "memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
|
||||||
|
{
|
||||||
|
register long ret asm("a0");
|
||||||
|
register unsigned long fun asm("a0") = fid;
|
||||||
|
register unsigned long a1 asm("a1") = arg0;
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"hvcl "__stringify(KVM_HCALL_SERVICE)
|
||||||
|
: "=r" (ret)
|
||||||
|
: "r" (fun), "r" (a1)
|
||||||
|
: "memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline long kvm_hypercall2(u64 fid,
|
||||||
|
unsigned long arg0, unsigned long arg1)
|
||||||
|
{
|
||||||
|
register long ret asm("a0");
|
||||||
|
register unsigned long fun asm("a0") = fid;
|
||||||
|
register unsigned long a1 asm("a1") = arg0;
|
||||||
|
register unsigned long a2 asm("a2") = arg1;
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"hvcl "__stringify(KVM_HCALL_SERVICE)
|
||||||
|
: "=r" (ret)
|
||||||
|
: "r" (fun), "r" (a1), "r" (a2)
|
||||||
|
: "memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline long kvm_hypercall3(u64 fid,
|
||||||
|
unsigned long arg0, unsigned long arg1, unsigned long arg2)
|
||||||
|
{
|
||||||
|
register long ret asm("a0");
|
||||||
|
register unsigned long fun asm("a0") = fid;
|
||||||
|
register unsigned long a1 asm("a1") = arg0;
|
||||||
|
register unsigned long a2 asm("a2") = arg1;
|
||||||
|
register unsigned long a3 asm("a3") = arg2;
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"hvcl "__stringify(KVM_HCALL_SERVICE)
|
||||||
|
: "=r" (ret)
|
||||||
|
: "r" (fun), "r" (a1), "r" (a2), "r" (a3)
|
||||||
|
: "memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline long kvm_hypercall4(u64 fid,
|
||||||
|
unsigned long arg0, unsigned long arg1,
|
||||||
|
unsigned long arg2, unsigned long arg3)
|
||||||
|
{
|
||||||
|
register long ret asm("a0");
|
||||||
|
register unsigned long fun asm("a0") = fid;
|
||||||
|
register unsigned long a1 asm("a1") = arg0;
|
||||||
|
register unsigned long a2 asm("a2") = arg1;
|
||||||
|
register unsigned long a3 asm("a3") = arg2;
|
||||||
|
register unsigned long a4 asm("a4") = arg3;
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"hvcl "__stringify(KVM_HCALL_SERVICE)
|
||||||
|
: "=r" (ret)
|
||||||
|
: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
|
||||||
|
: "memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline long kvm_hypercall5(u64 fid,
|
||||||
|
unsigned long arg0, unsigned long arg1,
|
||||||
|
unsigned long arg2, unsigned long arg3, unsigned long arg4)
|
||||||
|
{
|
||||||
|
register long ret asm("a0");
|
||||||
|
register unsigned long fun asm("a0") = fid;
|
||||||
|
register unsigned long a1 asm("a1") = arg0;
|
||||||
|
register unsigned long a2 asm("a2") = arg1;
|
||||||
|
register unsigned long a3 asm("a3") = arg2;
|
||||||
|
register unsigned long a4 asm("a4") = arg3;
|
||||||
|
register unsigned long a5 asm("a5") = arg4;
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"hvcl "__stringify(KVM_HCALL_SERVICE)
|
||||||
|
: "=r" (ret)
|
||||||
|
: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
|
||||||
|
: "memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned int kvm_arch_para_features(void)
|
static inline unsigned int kvm_arch_para_features(void)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -110,4 +110,14 @@ static inline int kvm_queue_exception(struct kvm_vcpu *vcpu,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long kvm_read_reg(struct kvm_vcpu *vcpu, int num)
|
||||||
|
{
|
||||||
|
return vcpu->arch.gprs[num];
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long val)
|
||||||
|
{
|
||||||
|
vcpu->arch.gprs[num] = val;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
|
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
|
||||||
|
@ -168,6 +168,7 @@
|
|||||||
#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
|
#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
|
||||||
#define KVM_SIGNATURE "KVM\0"
|
#define KVM_SIGNATURE "KVM\0"
|
||||||
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
|
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
|
||||||
|
#define KVM_FEATURE_IPI BIT(1)
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
@ -48,6 +48,9 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
|
|||||||
/* CPUCFG emulation between 0x40000000 -- 0x400000ff */
|
/* CPUCFG emulation between 0x40000000 -- 0x400000ff */
|
||||||
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
|
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
|
||||||
break;
|
break;
|
||||||
|
case CPUCFG_KVM_FEATURE:
|
||||||
|
vcpu->arch.gprs[rd] = KVM_FEATURE_IPI;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
vcpu->arch.gprs[rd] = 0;
|
vcpu->arch.gprs[rd] = 0;
|
||||||
break;
|
break;
|
||||||
@ -706,12 +709,74 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
|
|||||||
return RESUME_GUEST;
|
return RESUME_GUEST;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
unsigned int min, cpu, i;
|
||||||
|
unsigned long ipi_bitmap;
|
||||||
|
struct kvm_vcpu *dest;
|
||||||
|
|
||||||
|
min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
|
||||||
|
for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
|
||||||
|
ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
|
||||||
|
if (!ipi_bitmap)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
|
||||||
|
while (cpu < BITS_PER_LONG) {
|
||||||
|
dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
|
||||||
|
cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
|
||||||
|
if (!dest)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* Send SWI0 to dest vcpu to emulate IPI interrupt */
|
||||||
|
kvm_queue_irq(dest, INT_SWI0);
|
||||||
|
kvm_vcpu_kick(dest);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hypercall emulation always return to guest, Caller should check retval.
|
||||||
|
*/
|
||||||
|
static void kvm_handle_service(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
|
||||||
|
long ret;
|
||||||
|
|
||||||
|
switch (func) {
|
||||||
|
case KVM_HCALL_FUNC_IPI:
|
||||||
|
kvm_send_pv_ipi(vcpu);
|
||||||
|
ret = KVM_HCALL_SUCCESS;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ret = KVM_HCALL_INVALID_CODE;
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
|
||||||
|
}
|
||||||
|
|
||||||
static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
|
static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
larch_inst inst;
|
||||||
|
unsigned int code;
|
||||||
|
|
||||||
|
inst.word = vcpu->arch.badi;
|
||||||
|
code = inst.reg0i15_format.immediate;
|
||||||
update_pc(&vcpu->arch);
|
update_pc(&vcpu->arch);
|
||||||
|
|
||||||
/* Treat it as noop intruction, only set return value */
|
switch (code) {
|
||||||
vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE;
|
case KVM_HCALL_SERVICE:
|
||||||
|
vcpu->stat.hypercall_exits++;
|
||||||
|
kvm_handle_service(vcpu);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
/* Treat it as noop intruction, only set return value */
|
||||||
|
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
return RESUME_GUEST;
|
return RESUME_GUEST;
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
|
|||||||
STATS_DESC_COUNTER(VCPU, idle_exits),
|
STATS_DESC_COUNTER(VCPU, idle_exits),
|
||||||
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
|
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
|
||||||
STATS_DESC_COUNTER(VCPU, signal_exits),
|
STATS_DESC_COUNTER(VCPU, signal_exits),
|
||||||
|
STATS_DESC_COUNTER(VCPU, hypercall_exits)
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct kvm_stats_header kvm_vcpu_stats_header = {
|
const struct kvm_stats_header kvm_vcpu_stats_header = {
|
||||||
|
Loading…
Reference in New Issue
Block a user