2019-05-29 07:12:40 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2008-04-16 21:28:09 -07:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2007
|
2011-11-17 05:39:59 -07:00
|
|
|
* Copyright 2011 Freescale Semiconductor, Inc.
|
2008-04-16 21:28:09 -07:00
|
|
|
*
|
|
|
|
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/jiffies.h>
|
2009-11-02 05:02:31 -07:00
|
|
|
#include <linux/hrtimer.h>
|
2008-04-16 21:28:09 -07:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kvm_host.h>
|
2012-04-17 23:01:19 -07:00
|
|
|
#include <linux/clockchips.h>
|
2008-04-16 21:28:09 -07:00
|
|
|
|
2008-11-05 08:36:16 -07:00
|
|
|
#include <asm/reg.h>
|
2008-04-16 21:28:09 -07:00
|
|
|
#include <asm/time.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
#include <asm/kvm_ppc.h>
|
2008-11-05 08:36:15 -07:00
|
|
|
#include <asm/disassemble.h>
|
2013-04-27 22:20:07 -07:00
|
|
|
#include <asm/ppc-opcode.h>
|
2008-12-02 14:51:57 -07:00
|
|
|
#include "timing.h"
|
2009-06-18 07:47:27 -07:00
|
|
|
#include "trace.h"
|
2008-04-16 21:28:09 -07:00
|
|
|
|
2008-11-05 08:36:16 -07:00
|
|
|
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
|
2008-04-16 21:28:09 -07:00
|
|
|
{
|
2009-11-02 05:02:31 -07:00
|
|
|
unsigned long dec_nsec;
|
2011-10-18 21:16:06 -07:00
|
|
|
unsigned long long dec_time;
|
2009-10-29 22:47:15 -07:00
|
|
|
|
2017-05-21 23:55:16 -07:00
|
|
|
pr_debug("mtDEC: %lx\n", vcpu->arch.dec);
|
2011-11-17 05:39:59 -07:00
|
|
|
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
|
|
|
|
|
2010-04-15 15:11:42 -07:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S
|
2009-12-21 12:21:24 -07:00
|
|
|
/* mtdec lowers the interrupt line when positive. */
|
|
|
|
kvmppc_core_dequeue_dec(vcpu);
|
2009-10-29 22:47:16 -07:00
|
|
|
#endif
|
2011-11-17 05:39:59 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_BOOKE
|
|
|
|
/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
|
|
|
|
if (vcpu->arch.dec == 0)
|
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The decrementer ticks at the same rate as the timebase, so
|
|
|
|
* that's how we convert the guest DEC value to the number of
|
|
|
|
* host ticks.
|
|
|
|
*/
|
|
|
|
|
|
|
|
dec_time = vcpu->arch.dec;
|
2012-04-17 23:01:19 -07:00
|
|
|
/*
|
2018-10-20 02:54:55 -07:00
|
|
|
* Guest timebase ticks at the same frequency as host timebase.
|
|
|
|
* So use the host timebase calculations for decrementer emulation.
|
2012-04-17 23:01:19 -07:00
|
|
|
*/
|
2018-10-20 02:54:55 -07:00
|
|
|
dec_time = tb_to_ns(dec_time);
|
2011-11-17 05:39:59 -07:00
|
|
|
dec_nsec = do_div(dec_time, NSEC_PER_SEC);
|
|
|
|
hrtimer_start(&vcpu->arch.dec_timer,
|
|
|
|
ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
|
|
|
|
vcpu->arch.dec_jiffies = get_tb();
|
2008-04-16 21:28:09 -07:00
|
|
|
}
|
|
|
|
|
2011-04-27 15:24:21 -07:00
|
|
|
u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
|
|
|
|
{
|
|
|
|
u64 jd = tb - vcpu->arch.dec_jiffies;
|
2011-11-17 05:39:59 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_BOOKE
|
|
|
|
if (vcpu->arch.dec < jd)
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
|
2011-04-27 15:24:21 -07:00
|
|
|
return vcpu->arch.dec - jd;
|
|
|
|
}
|
|
|
|
|
2012-10-06 14:19:01 -07:00
|
|
|
static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
|
|
|
|
{
|
|
|
|
enum emulation_result emulated = EMULATE_DONE;
|
|
|
|
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
|
|
|
|
|
|
|
|
switch (sprn) {
|
|
|
|
case SPRN_SRR0:
|
2014-04-24 04:46:24 -07:00
|
|
|
kvmppc_set_srr0(vcpu, spr_val);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
case SPRN_SRR1:
|
2014-04-24 04:46:24 -07:00
|
|
|
kvmppc_set_srr1(vcpu, spr_val);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* XXX We need to context-switch the timebase for
|
|
|
|
* watchdog and FIT. */
|
|
|
|
case SPRN_TBWL: break;
|
|
|
|
case SPRN_TBWU: break;
|
|
|
|
|
|
|
|
case SPRN_DEC:
|
2017-05-21 23:55:16 -07:00
|
|
|
vcpu->arch.dec = (u32) spr_val;
|
2012-10-06 14:19:01 -07:00
|
|
|
kvmppc_emulate_dec(vcpu);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPRN_SPRG0:
|
2014-04-24 04:46:24 -07:00
|
|
|
kvmppc_set_sprg0(vcpu, spr_val);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
case SPRN_SPRG1:
|
2014-04-24 04:46:24 -07:00
|
|
|
kvmppc_set_sprg1(vcpu, spr_val);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
case SPRN_SPRG2:
|
2014-04-24 04:46:24 -07:00
|
|
|
kvmppc_set_sprg2(vcpu, spr_val);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
case SPRN_SPRG3:
|
2014-04-24 04:46:24 -07:00
|
|
|
kvmppc_set_sprg3(vcpu, spr_val);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
|
2013-06-26 16:07:15 -07:00
|
|
|
/* PIR can legally be written, but we ignore it */
|
|
|
|
case SPRN_PIR: break;
|
|
|
|
|
2012-10-06 14:19:01 -07:00
|
|
|
default:
|
2013-10-07 09:48:01 -07:00
|
|
|
emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
|
|
|
|
spr_val);
|
2012-10-06 14:19:01 -07:00
|
|
|
if (emulated == EMULATE_FAIL)
|
|
|
|
printk(KERN_INFO "mtspr: unknown spr "
|
|
|
|
"0x%x\n", sprn);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
|
|
|
|
|
|
|
|
return emulated;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
|
|
|
|
{
|
|
|
|
enum emulation_result emulated = EMULATE_DONE;
|
|
|
|
ulong spr_val = 0;
|
|
|
|
|
|
|
|
switch (sprn) {
|
|
|
|
case SPRN_SRR0:
|
2014-04-24 04:46:24 -07:00
|
|
|
spr_val = kvmppc_get_srr0(vcpu);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
case SPRN_SRR1:
|
2014-04-24 04:46:24 -07:00
|
|
|
spr_val = kvmppc_get_srr1(vcpu);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
case SPRN_PVR:
|
|
|
|
spr_val = vcpu->arch.pvr;
|
|
|
|
break;
|
|
|
|
case SPRN_PIR:
|
|
|
|
spr_val = vcpu->vcpu_id;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Note: mftb and TBRL/TBWL are user-accessible, so
|
|
|
|
* the guest can always access the real TB anyways.
|
|
|
|
* In fact, we probably will never see these traps. */
|
|
|
|
case SPRN_TBWL:
|
|
|
|
spr_val = get_tb() >> 32;
|
|
|
|
break;
|
|
|
|
case SPRN_TBWU:
|
|
|
|
spr_val = get_tb();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPRN_SPRG0:
|
2014-04-24 04:46:24 -07:00
|
|
|
spr_val = kvmppc_get_sprg0(vcpu);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
case SPRN_SPRG1:
|
2014-04-24 04:46:24 -07:00
|
|
|
spr_val = kvmppc_get_sprg1(vcpu);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
case SPRN_SPRG2:
|
2014-04-24 04:46:24 -07:00
|
|
|
spr_val = kvmppc_get_sprg2(vcpu);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
case SPRN_SPRG3:
|
2014-04-24 04:46:24 -07:00
|
|
|
spr_val = kvmppc_get_sprg3(vcpu);
|
2012-10-06 14:19:01 -07:00
|
|
|
break;
|
|
|
|
/* Note: SPRG4-7 are user-readable, so we don't get
|
|
|
|
* a trap. */
|
|
|
|
|
|
|
|
case SPRN_DEC:
|
|
|
|
spr_val = kvmppc_get_dec(vcpu, get_tb());
|
|
|
|
break;
|
|
|
|
default:
|
2013-10-07 09:48:01 -07:00
|
|
|
emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
|
|
|
|
&spr_val);
|
2012-10-06 14:19:01 -07:00
|
|
|
if (unlikely(emulated == EMULATE_FAIL)) {
|
|
|
|
printk(KERN_INFO "mfspr: unknown spr "
|
|
|
|
"0x%x\n", sprn);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (emulated == EMULATE_DONE)
|
|
|
|
kvmppc_set_gpr(vcpu, rt, spr_val);
|
|
|
|
kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
|
|
|
|
|
|
|
|
return emulated;
|
|
|
|
}
|
|
|
|
|
2008-11-05 08:36:16 -07:00
|
|
|
/* XXX Should probably auto-generate instruction decoding for a particular core
|
|
|
|
* from opcode tables in the future. */
|
2020-04-26 21:35:11 -07:00
|
|
|
int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
|
2008-04-16 21:28:09 -07:00
|
|
|
{
|
2014-07-23 09:06:21 -07:00
|
|
|
u32 inst;
|
2023-03-07 23:34:48 -07:00
|
|
|
ppc_inst_t pinst;
|
2014-06-18 05:53:49 -07:00
|
|
|
int rs, rt, sprn;
|
2014-07-23 09:06:21 -07:00
|
|
|
enum emulation_result emulated;
|
2008-04-16 21:28:09 -07:00
|
|
|
int advance = 1;
|
|
|
|
|
2008-12-02 14:51:57 -07:00
|
|
|
/* this default type might be overwritten by subcategories */
|
|
|
|
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
|
|
|
|
|
2023-03-07 23:34:48 -07:00
|
|
|
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
|
|
|
|
inst = ppc_inst_val(pinst);
|
2014-07-23 09:06:21 -07:00
|
|
|
if (emulated != EMULATE_DONE)
|
|
|
|
return emulated;
|
|
|
|
|
2010-09-11 12:10:53 -07:00
|
|
|
pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
|
2009-10-29 22:47:16 -07:00
|
|
|
|
2014-07-23 09:06:21 -07:00
|
|
|
rs = get_rs(inst);
|
|
|
|
rt = get_rt(inst);
|
|
|
|
sprn = get_sprn(inst);
|
|
|
|
|
2008-04-16 21:28:09 -07:00
|
|
|
switch (get_op(inst)) {
|
2009-01-03 15:23:05 -07:00
|
|
|
case OP_TRAP:
|
2010-04-15 15:11:42 -07:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S
|
2009-10-29 22:47:16 -07:00
|
|
|
case OP_TRAP_64:
|
2010-02-02 04:44:35 -07:00
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
|
2009-10-29 22:47:16 -07:00
|
|
|
#else
|
KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn
This allows additional registers to be accessed by the guest
in PR-mode KVM without trapping.
SPRG4-7 are readable from userspace. On booke, KVM will sync
these registers when it enters the guest, so that accesses from
guest userspace will work. The guest kernel, OTOH, must consistently
use either the real registers or the shared area between exits. This
also applies to the already-paravirted SPRG3.
On non-booke, it's not clear to what extent SPRG4-7 are supported
(they're not architected for book3s, but exist on at least some classic
chips). They are copied in the get/set regs ioctls, but I do not see any
non-booke emulation. I also do not see any syncing with real registers
(in PR-mode) including the user-readable SPRG3. This patch should not
make that situation any worse.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
2011-11-08 17:23:30 -07:00
|
|
|
kvmppc_core_queue_program(vcpu,
|
|
|
|
vcpu->arch.shared->esr | ESR_PTR);
|
2009-10-29 22:47:16 -07:00
|
|
|
#endif
|
2008-04-16 21:28:09 -07:00
|
|
|
advance = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 31:
|
|
|
|
switch (get_xop(inst)) {
|
|
|
|
|
2012-03-13 14:15:45 -07:00
|
|
|
case OP_31_XOP_TRAP:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
case OP_31_XOP_TRAP_64:
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
|
|
|
|
#else
|
|
|
|
kvmppc_core_queue_program(vcpu,
|
|
|
|
vcpu->arch.shared->esr | ESR_PTR);
|
|
|
|
#endif
|
|
|
|
advance = 0;
|
|
|
|
break;
|
2008-04-16 21:28:09 -07:00
|
|
|
|
2009-01-03 15:23:05 -07:00
|
|
|
case OP_31_XOP_MFSPR:
|
2012-10-06 14:19:01 -07:00
|
|
|
emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
|
2017-04-05 06:58:51 -07:00
|
|
|
if (emulated == EMULATE_AGAIN) {
|
|
|
|
emulated = EMULATE_DONE;
|
|
|
|
advance = 0;
|
|
|
|
}
|
2008-04-16 21:28:09 -07:00
|
|
|
break;
|
|
|
|
|
2009-01-03 15:23:05 -07:00
|
|
|
case OP_31_XOP_MTSPR:
|
2012-10-06 14:19:01 -07:00
|
|
|
emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
|
2017-04-05 06:58:51 -07:00
|
|
|
if (emulated == EMULATE_AGAIN) {
|
|
|
|
emulated = EMULATE_DONE;
|
|
|
|
advance = 0;
|
|
|
|
}
|
2008-04-16 21:28:09 -07:00
|
|
|
break;
|
|
|
|
|
2009-01-03 15:23:05 -07:00
|
|
|
case OP_31_XOP_TLBSYNC:
|
2008-04-16 21:28:09 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2008-11-05 08:36:16 -07:00
|
|
|
/* Attempt core-specific emulation below. */
|
2008-04-16 21:28:09 -07:00
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2014-09-09 10:07:35 -07:00
|
|
|
case 0:
|
|
|
|
/*
|
|
|
|
* Instruction with primary opcode 0. Based on PowerISA
|
|
|
|
* these are illegal instructions.
|
|
|
|
*/
|
|
|
|
if (inst == KVMPPC_INST_SW_BREAKPOINT) {
|
2020-04-26 21:35:11 -07:00
|
|
|
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
|
|
|
|
vcpu->run->debug.arch.status = 0;
|
|
|
|
vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
|
2014-09-09 10:07:35 -07:00
|
|
|
emulated = EMULATE_EXIT_USER;
|
|
|
|
advance = 0;
|
|
|
|
} else
|
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
2008-04-16 21:28:09 -07:00
|
|
|
default:
|
|
|
|
emulated = EMULATE_FAIL;
|
2008-11-05 08:36:16 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (emulated == EMULATE_FAIL) {
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst,
|
2013-10-07 09:48:01 -07:00
|
|
|
&advance);
|
2010-02-19 03:00:31 -07:00
|
|
|
if (emulated == EMULATE_AGAIN) {
|
|
|
|
advance = 0;
|
|
|
|
} else if (emulated == EMULATE_FAIL) {
|
2008-11-05 08:36:16 -07:00
|
|
|
advance = 0;
|
|
|
|
printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
|
|
|
|
"(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
|
|
|
|
}
|
2008-04-16 21:28:09 -07:00
|
|
|
}
|
|
|
|
|
2010-04-15 15:11:40 -07:00
|
|
|
trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
|
2008-07-14 05:00:04 -07:00
|
|
|
|
2010-04-15 15:11:40 -07:00
|
|
|
/* Advance past emulated instruction. */
|
KVM: PPC: Fetch prefixed instructions from the guest
In order to handle emulation of prefixed instructions in the guest,
this first makes vcpu->arch.last_inst be an unsigned long, i.e. 64
bits on 64-bit platforms. For prefixed instructions, the upper 32
bits are used for the prefix and the lower 32 bits for the suffix, and
both halves are byte-swapped if the guest endianness differs from the
host.
Next, vcpu->arch.emul_inst is now 64 bits wide, to match the HEIR
register on POWER10. Like HEIR, for a prefixed instruction it is
defined to have the prefix is in the top 32 bits and the suffix in the
bottom 32 bits, with both halves in the correct byte order.
kvmppc_get_last_inst is extended on 64-bit machines to put the prefix
and suffix in the right places in the ppc_inst_t being returned.
kvmppc_load_last_inst now returns the instruction in an unsigned long
in the same format as vcpu->arch.last_inst. It makes the decision
about whether to fetch a suffix based on the SRR1_PREFIXED bit in the
MSR image stored in the vcpu struct, which generally comes from SRR1
or HSRR1 on an interrupt. This bit is defined in Power ISA v3.1B to
be set if the interrupt occurred due to a prefixed instruction and
cleared otherwise for all interrupts except for instruction storage
interrupt, which does not come to the hypervisor. It is set to zero
for asynchronous interrupts such as external interrupts. In previous
ISA versions it was always set to 0 for all interrupts except
instruction storage interrupt.
The code in book3s_hv_rmhandlers.S that loads the faulting instruction
on a HDSI is only used on POWER8 and therefore doesn't ever need to
load a suffix.
[npiggin@gmail.com - check that the is-prefixed bit in SRR1 matches the
type of instruction that was fetched.]
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Tested-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/ZAgsq9h1CCzouQuV@cleo
2023-03-07 23:35:23 -07:00
|
|
|
/*
|
|
|
|
* If this ever handles prefixed instructions, the 4
|
|
|
|
* will need to become ppc_inst_len(pinst) instead.
|
|
|
|
*/
|
2008-04-16 21:28:09 -07:00
|
|
|
if (advance)
|
2010-04-15 15:11:40 -07:00
|
|
|
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
|
2008-04-16 21:28:09 -07:00
|
|
|
|
|
|
|
return emulated;
|
|
|
|
}
|
2013-10-07 09:47:59 -07:00
|
|
|
EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
|