1

Merge branch 'topic/ppc-kvm' into next

Merge the powerpc KVM topic branch.
This commit is contained in:
Michael Ellerman 2024-07-18 14:32:46 +10:00
commit 9ff0251b2e
14 changed files with 189 additions and 8 deletions

View File

@ -546,7 +546,9 @@ table information.
+--------+-------+----+--------+----------------------------------+
| 0x1052 | 0x08 | RW | T | CTRL |
+--------+-------+----+--------+----------------------------------+
| 0x1053-| | | | Reserved |
| 0x1053 | 0x08 | RW | T | DPDES |
+--------+-------+----+--------+----------------------------------+
| 0x1054-| | | | Reserved |
| 0x1FFF | | | | |
+--------+-------+----+--------+----------------------------------+
| 0x2000 | 0x04 | RW | T | CR |

View File

@ -2439,8 +2439,11 @@ registers, find a list below:
PPC KVM_REG_PPC_PSSCR 64
PPC KVM_REG_PPC_DEC_EXPIRY 64
PPC KVM_REG_PPC_PTCR 64
PPC KVM_REG_PPC_HASHKEYR 64
PPC KVM_REG_PPC_HASHPKEYR 64
PPC KVM_REG_PPC_DAWR1 64
PPC KVM_REG_PPC_DAWRX1 64
PPC KVM_REG_PPC_DEXCR 64
PPC KVM_REG_PPC_TM_GPR0 64
...
PPC KVM_REG_PPC_TM_GPR31 64

View File

@ -81,6 +81,7 @@
#define KVMPPC_GSID_HASHKEYR 0x1050
#define KVMPPC_GSID_HASHPKEYR 0x1051
#define KVMPPC_GSID_CTRL 0x1052
#define KVMPPC_GSID_DPDES 0x1053
#define KVMPPC_GSID_CR 0x2000
#define KVMPPC_GSID_PIDR 0x2001
@ -110,7 +111,7 @@
#define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1)
#define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0)
#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_CTRL
#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_DPDES
#define KVMPPC_GSE_DW_REGS_COUNT \
(KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1)

View File

@ -594,6 +594,7 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
KVMPPC_BOOK3S_VCORE_ACCESSOR(dpdes, 64, KVMPPC_GSID_DPDES)
KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)

View File

@ -684,6 +684,11 @@ int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa);
int kmvhv_counters_tracepoint_regfunc(void);
void kmvhv_counters_tracepoint_unregfunc(void);
int kvmhv_get_l2_counters_status(void);
void kvmhv_set_l2_counters_status(int cpu, bool status);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */

View File

@ -599,6 +599,9 @@ struct kvm_vcpu_arch {
ulong dawrx0;
ulong dawr1;
ulong dawrx1;
ulong dexcr;
ulong hashkeyr;
ulong hashpkeyr;
ulong ciabr;
ulong cfar;
ulong ppr;

View File

@ -62,7 +62,8 @@ struct lppaca {
u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */
u8 fpregs_in_use;
u8 pmcregs_in_use;
u8 reserved8[28];
u8 l2_counters_enable; /* Enable usage of counters for KVM guest */
u8 reserved8[27];
__be64 wait_state_cycles; /* Wait cycles for this proc */
u8 reserved9[28];
__be16 slb_count; /* # of SLBs to maintain */
@ -92,9 +93,13 @@ struct lppaca {
/* cacheline 4-5 */
__be32 page_ins; /* CMO Hint - # page ins by OS */
u8 reserved12[148];
u8 reserved12[28];
volatile __be64 l1_to_l2_cs_tb;
volatile __be64 l2_to_l1_cs_tb;
volatile __be64 l2_runtime_tb;
u8 reserved13[96];
volatile __be64 dtl_idx; /* Dispatch Trace Log head index */
u8 reserved13[96];
u8 reserved14[96];
} ____cacheline_aligned;
#define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr)

View File

@ -645,6 +645,9 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3)
#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4)
#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5)
#define KVM_REG_PPC_DEXCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc6)
#define KVM_REG_PPC_HASHKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc7)
#define KVM_REG_PPC_HASHPKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc8)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs

View File

@ -2305,7 +2305,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
break;
case KVM_REG_PPC_SDAR:
*val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
*val = get_reg_val(id, kvmppc_get_sdar_hv(vcpu));
break;
case KVM_REG_PPC_SIER:
*val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0));
@ -2349,6 +2349,15 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_DAWRX1:
*val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu));
break;
case KVM_REG_PPC_DEXCR:
*val = get_reg_val(id, kvmppc_get_dexcr_hv(vcpu));
break;
case KVM_REG_PPC_HASHKEYR:
*val = get_reg_val(id, kvmppc_get_hashkeyr_hv(vcpu));
break;
case KVM_REG_PPC_HASHPKEYR:
*val = get_reg_val(id, kvmppc_get_hashpkeyr_hv(vcpu));
break;
case KVM_REG_PPC_CIABR:
*val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu));
break;
@ -2540,7 +2549,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.mmcrs = set_reg_val(id, *val);
break;
case KVM_REG_PPC_MMCR3:
*val = get_reg_val(id, vcpu->arch.mmcr[3]);
kvmppc_set_mmcr_hv(vcpu, 3, set_reg_val(id, *val));
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
@ -2592,6 +2601,15 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_DAWRX1:
kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
break;
case KVM_REG_PPC_DEXCR:
kvmppc_set_dexcr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_HASHKEYR:
kvmppc_set_hashkeyr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_HASHPKEYR:
kvmppc_set_hashpkeyr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_CIABR:
kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val));
/* Don't allow setting breakpoints in hypervisor code */
@ -4108,6 +4126,77 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
}
}
/* Helper functions for reading L2's stats from L1's VPA */
#ifdef CONFIG_PPC_PSERIES
static DEFINE_PER_CPU(u64, l1_to_l2_cs);
static DEFINE_PER_CPU(u64, l2_to_l1_cs);
static DEFINE_PER_CPU(u64, l2_runtime_agg);
int kvmhv_get_l2_counters_status(void)
{
return firmware_has_feature(FW_FEATURE_LPAR) &&
get_lppaca()->l2_counters_enable;
}
void kvmhv_set_l2_counters_status(int cpu, bool status)
{
if (!firmware_has_feature(FW_FEATURE_LPAR))
return;
if (status)
lppaca_of(cpu).l2_counters_enable = 1;
else
lppaca_of(cpu).l2_counters_enable = 0;
}
int kmvhv_counters_tracepoint_regfunc(void)
{
int cpu;
for_each_present_cpu(cpu) {
kvmhv_set_l2_counters_status(cpu, true);
}
return 0;
}
void kmvhv_counters_tracepoint_unregfunc(void)
{
int cpu;
for_each_present_cpu(cpu) {
kvmhv_set_l2_counters_status(cpu, false);
}
}
static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu)
{
struct lppaca *lp = get_lppaca();
u64 l1_to_l2_ns, l2_to_l1_ns, l2_runtime_ns;
u64 *l1_to_l2_cs_ptr = this_cpu_ptr(&l1_to_l2_cs);
u64 *l2_to_l1_cs_ptr = this_cpu_ptr(&l2_to_l1_cs);
u64 *l2_runtime_agg_ptr = this_cpu_ptr(&l2_runtime_agg);
l1_to_l2_ns = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb));
l2_to_l1_ns = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb));
l2_runtime_ns = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb));
trace_kvmppc_vcpu_stats(vcpu, l1_to_l2_ns - *l1_to_l2_cs_ptr,
l2_to_l1_ns - *l2_to_l1_cs_ptr,
l2_runtime_ns - *l2_runtime_agg_ptr);
*l1_to_l2_cs_ptr = l1_to_l2_ns;
*l2_to_l1_cs_ptr = l2_to_l1_ns;
*l2_runtime_agg_ptr = l2_runtime_ns;
}
#else
int kvmhv_get_l2_counters_status(void)
{
return 0;
}
static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu)
{
}
#endif
static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr, u64 *tb)
{
@ -4116,6 +4205,11 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
int trap;
long rc;
if (vcpu->arch.doorbell_request) {
vcpu->arch.doorbell_request = 0;
kvmppc_set_dpdes(vcpu, 1);
}
io = &vcpu->arch.nestedv2_io;
msr = mfmsr();
@ -4156,6 +4250,10 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
timer_rearm_host_dec(*tb);
/* Record context switch and guest_run_time data */
if (kvmhv_get_l2_counters_status())
do_trace_nested_cs_time(vcpu);
return trap;
}
@ -6519,6 +6617,7 @@ static void kvmppc_book3s_exit_hv(void)
module_init(kvmppc_book3s_init_hv);
module_exit(kvmppc_book3s_exit_hv);
MODULE_DESCRIPTION("KVM on Book3S (POWER8 and later) in hypervisor mode");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");

View File

@ -116,6 +116,9 @@ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64, KVMPPC_GSID_DAWR0)
KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64, KVMPPC_GSID_DAWR1)
KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64, KVMPPC_GSID_DAWRX0)
KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64, KVMPPC_GSID_DAWRX1)
KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dexcr, 64, KVMPPC_GSID_DEXCR)
KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hashkeyr, 64, KVMPPC_GSID_HASHKEYR)
KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hashpkeyr, 64, KVMPPC_GSID_HASHPKEYR)
KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64, KVMPPC_GSID_CIABR)
KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64, KVMPPC_GSID_WORT)
KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64, KVMPPC_GSID_PPR)

View File

@ -193,6 +193,15 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
case KVMPPC_GSID_DAWRX1:
rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
break;
case KVMPPC_GSID_DEXCR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dexcr);
break;
case KVMPPC_GSID_HASHKEYR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashkeyr);
break;
case KVMPPC_GSID_HASHPKEYR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashpkeyr);
break;
case KVMPPC_GSID_CIABR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
break;
@ -311,6 +320,10 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.vcore->vtb);
break;
case KVMPPC_GSID_DPDES:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.vcore->dpdes);
break;
case KVMPPC_GSID_LPCR:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.vcore->lpcr);
@ -441,6 +454,15 @@ static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
case KVMPPC_GSID_DAWRX1:
vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
break;
case KVMPPC_GSID_DEXCR:
vcpu->arch.dexcr = kvmppc_gse_get_u64(gse);
break;
case KVMPPC_GSID_HASHKEYR:
vcpu->arch.hashkeyr = kvmppc_gse_get_u64(gse);
break;
case KVMPPC_GSID_HASHPKEYR:
vcpu->arch.hashpkeyr = kvmppc_gse_get_u64(gse);
break;
case KVMPPC_GSID_CIABR:
vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
break;
@ -543,6 +565,9 @@ static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
case KVMPPC_GSID_VTB:
vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
break;
case KVMPPC_GSID_DPDES:
vcpu->arch.vcore->dpdes = kvmppc_gse_get_u64(gse);
break;
case KVMPPC_GSID_LPCR:
vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
break;

View File

@ -2111,6 +2111,7 @@ void kvmppc_book3s_exit_pr(void)
module_init(kvmppc_book3s_init_pr);
module_exit(kvmppc_book3s_exit_pr);
MODULE_DESCRIPTION("KVM on Book3S without using hypervisor mode");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");

View File

@ -151,7 +151,7 @@ static void test_gs_bitmap(struct kunit *test)
i++;
}
for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) {
for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSE_DW_REGS_END; iden++) {
kvmppc_gsbm_set(&gsbm, iden);
kvmppc_gsbm_set(&gsbm1, iden);
KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
@ -325,4 +325,5 @@ static struct kunit_suite guest_state_buffer_test_suite = {
kunit_test_suites(&guest_state_buffer_test_suite);
MODULE_DESCRIPTION("KUnit tests for Guest State Buffer APIs");
MODULE_LICENSE("GPL");

View File

@ -512,6 +512,35 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
__entry->vcpu_id, __entry->exit, __entry->ret)
);
#ifdef CONFIG_PPC_PSERIES
TRACE_EVENT_FN_COND(kvmppc_vcpu_stats,
TP_PROTO(struct kvm_vcpu *vcpu, u64 l1_to_l2_cs, u64 l2_to_l1_cs, u64 l2_runtime),
TP_ARGS(vcpu, l1_to_l2_cs, l2_to_l1_cs, l2_runtime),
TP_CONDITION(l1_to_l2_cs || l2_to_l1_cs || l2_runtime),
TP_STRUCT__entry(
__field(int, vcpu_id)
__field(u64, l1_to_l2_cs)
__field(u64, l2_to_l1_cs)
__field(u64, l2_runtime)
),
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
__entry->l1_to_l2_cs = l1_to_l2_cs;
__entry->l2_to_l1_cs = l2_to_l1_cs;
__entry->l2_runtime = l2_runtime;
),
TP_printk("VCPU %d: l1_to_l2_cs_time=%llu ns l2_to_l1_cs_time=%llu ns l2_runtime=%llu ns",
__entry->vcpu_id, __entry->l1_to_l2_cs,
__entry->l2_to_l1_cs, __entry->l2_runtime),
kmvhv_counters_tracepoint_regfunc, kmvhv_counters_tracepoint_unregfunc
);
#endif
#endif /* _TRACE_KVM_HV_H */
/* This part must be outside protection */