1

Merge tag 'kvm-riscv-6.12-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv changes for 6.12

- Fix sbiret init before forwarding to userspace
- Don't zero-out PMU snapshot area before freeing data
- Allow legacy PMU access from guest
- Fix to allow hpmcounter31 from the guest
This commit is contained in:
Paolo Bonzini 2024-09-14 09:56:08 -04:00
commit 0cdcc99eea
3 changed files with 21 additions and 18 deletions

View File

@ -10,6 +10,7 @@
#define __KVM_VCPU_RISCV_PMU_H #define __KVM_VCPU_RISCV_PMU_H
#include <linux/perf/riscv_pmu.h> #include <linux/perf/riscv_pmu.h>
#include <asm/kvm_vcpu_insn.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#ifdef CONFIG_RISCV_PMU_SBI #ifdef CONFIG_RISCV_PMU_SBI
@ -64,11 +65,11 @@ struct kvm_pmu {
#if defined(CONFIG_32BIT) #if defined(CONFIG_32BIT)
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ {.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
#else #else
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
#endif #endif
int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
@ -104,8 +105,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
struct kvm_pmu { struct kvm_pmu {
}; };
static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
{
if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
*val = 0;
return KVM_INSN_CONTINUE_NEXT_SEPC;
} else {
return KVM_INSN_ILLEGAL_TRAP;
}
}
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = 0, .count = 0, .func = NULL }, {.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)

View File

@ -391,19 +391,9 @@ int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
static void kvm_pmu_clear_snapshot_area(struct kvm_vcpu *vcpu) static void kvm_pmu_clear_snapshot_area(struct kvm_vcpu *vcpu)
{ {
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data);
if (kvpmu->sdata) { kfree(kvpmu->sdata);
if (kvpmu->snapshot_addr != INVALID_GPA) { kvpmu->sdata = NULL;
memset(kvpmu->sdata, 0, snapshot_area_size);
kvm_vcpu_write_guest(vcpu, kvpmu->snapshot_addr,
kvpmu->sdata, snapshot_area_size);
} else {
pr_warn("snapshot address invalid\n");
}
kfree(kvpmu->sdata);
kvpmu->sdata = NULL;
}
kvpmu->snapshot_addr = INVALID_GPA; kvpmu->snapshot_addr = INVALID_GPA;
} }

View File

@ -127,8 +127,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->riscv_sbi.args[3] = cp->a3; run->riscv_sbi.args[3] = cp->a3;
run->riscv_sbi.args[4] = cp->a4; run->riscv_sbi.args[4] = cp->a4;
run->riscv_sbi.args[5] = cp->a5; run->riscv_sbi.args[5] = cp->a5;
run->riscv_sbi.ret[0] = cp->a0; run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
run->riscv_sbi.ret[1] = cp->a1; run->riscv_sbi.ret[1] = 0;
} }
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,