2019-05-29 07:12:40 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-06-18 05:53:49 -07:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2007
|
|
|
|
* Copyright 2011 Freescale Semiconductor, Inc.
|
|
|
|
*
|
|
|
|
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/hrtimer.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/clockchips.h>
|
|
|
|
|
|
|
|
#include <asm/reg.h>
|
|
|
|
#include <asm/time.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
#include <asm/kvm_ppc.h>
|
|
|
|
#include <asm/disassemble.h>
|
|
|
|
#include <asm/ppc-opcode.h>
|
2018-05-20 22:24:21 -07:00
|
|
|
#include <asm/sstep.h>
|
2014-06-18 05:53:49 -07:00
|
|
|
#include "timing.h"
|
|
|
|
#include "trace.h"
|
|
|
|
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 06:12:36 -07:00
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
|
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
|
2023-03-30 03:32:24 -07:00
|
|
|
kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 06:12:36 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PPC_FPU */
|
|
|
|
|
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
|
2023-03-30 03:32:24 -07:00
|
|
|
kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 06:12:36 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_VSX */
|
|
|
|
|
2018-02-03 13:24:26 -07:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
|
2023-03-30 03:32:24 -07:00
|
|
|
kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
|
2018-02-03 13:24:26 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
|
KVM: PPC: Emulation for more integer loads and stores
This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.
- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx
Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.
This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.
With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-03-20 21:43:47 -07:00
|
|
|
/*
|
|
|
|
* XXX to do:
|
|
|
|
* lfiwax, lfiwzx
|
|
|
|
* vector loads and stores
|
2014-06-18 05:53:49 -07:00
|
|
|
*
|
KVM: PPC: Emulation for more integer loads and stores
This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.
- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx
Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.
This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.
With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-03-20 21:43:47 -07:00
|
|
|
* Instructions that trap when used on cache-inhibited mappings
|
|
|
|
* are not emulated here: multiple and string instructions,
|
|
|
|
* lq/stq, and the load-reserve/store-conditional instructions.
|
2014-06-18 05:53:49 -07:00
|
|
|
*/
|
|
|
|
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2023-03-07 23:34:48 -07:00
|
|
|
ppc_inst_t inst;
|
2018-05-20 22:24:21 -07:00
|
|
|
enum emulation_result emulated = EMULATE_FAIL;
|
|
|
|
struct instruction_op op;
|
2014-06-18 05:53:49 -07:00
|
|
|
|
|
|
|
/* this default type might be overwritten by subcategories */
|
|
|
|
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
|
|
|
|
|
2014-09-10 05:37:29 -07:00
|
|
|
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
|
2014-06-18 05:53:49 -07:00
|
|
|
if (emulated != EMULATE_DONE)
|
|
|
|
return emulated;
|
|
|
|
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 06:12:36 -07:00
|
|
|
vcpu->arch.mmio_vsx_copy_nums = 0;
|
|
|
|
vcpu->arch.mmio_vsx_offset = 0;
|
2018-05-20 22:24:25 -07:00
|
|
|
vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 06:12:36 -07:00
|
|
|
vcpu->arch.mmio_sp64_extend = 0;
|
|
|
|
vcpu->arch.mmio_sign_extend = 0;
|
2018-02-03 13:24:26 -07:00
|
|
|
vcpu->arch.mmio_vmx_copy_nums = 0;
|
2018-05-20 22:24:26 -07:00
|
|
|
vcpu->arch.mmio_vmx_offset = 0;
|
2018-05-06 23:20:09 -07:00
|
|
|
vcpu->arch.mmio_host_swabbed = 0;
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 06:12:36 -07:00
|
|
|
|
2018-05-20 22:24:21 -07:00
|
|
|
emulated = EMULATE_FAIL;
|
2023-09-13 20:05:59 -07:00
|
|
|
vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
|
2023-03-07 23:34:48 -07:00
|
|
|
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
|
2018-05-20 22:24:21 -07:00
|
|
|
int type = op.type & INSTR_TYPE_MASK;
|
|
|
|
int size = GETSIZE(op.type);
|
2017-03-17 01:31:38 -07:00
|
|
|
|
2022-01-25 14:56:55 -07:00
|
|
|
vcpu->mmio_is_write = OP_IS_STORE(type);
|
|
|
|
|
2018-05-20 22:24:21 -07:00
|
|
|
switch (type) {
|
|
|
|
case LOAD: {
|
|
|
|
int instr_byte_swap = op.type & BYTEREV;
|
KVM: PPC: Emulation for more integer loads and stores
This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.
- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx
Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.
This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.
With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-03-20 21:43:47 -07:00
|
|
|
|
2018-05-20 22:24:21 -07:00
|
|
|
if (op.type & SIGNEXT)
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_loads(vcpu,
|
2018-05-20 22:24:21 -07:00
|
|
|
op.reg, size, !instr_byte_swap);
|
|
|
|
else
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_load(vcpu,
|
2018-05-20 22:24:21 -07:00
|
|
|
op.reg, size, !instr_byte_swap);
|
2014-06-18 05:53:49 -07:00
|
|
|
|
2018-05-20 22:24:21 -07:00
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
2023-12-01 06:26:16 -07:00
|
|
|
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
|
2014-06-18 05:53:49 -07:00
|
|
|
|
|
|
|
break;
|
2018-05-20 22:24:21 -07:00
|
|
|
}
|
2018-05-20 22:24:23 -07:00
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
|
case LOAD_FP:
|
|
|
|
if (kvmppc_check_fp_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
|
|
|
|
if (op.type & FPCONV)
|
|
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
|
|
|
|
if (op.type & SIGNEXT)
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_loads(vcpu,
|
2018-05-20 22:24:23 -07:00
|
|
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
|
|
|
else
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_load(vcpu,
|
2018-05-20 22:24:23 -07:00
|
|
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
|
|
|
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
2023-12-01 06:26:16 -07:00
|
|
|
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
|
2018-05-20 22:24:23 -07:00
|
|
|
|
|
|
|
break;
|
2018-05-20 22:24:24 -07:00
|
|
|
#endif
|
2018-05-20 22:24:26 -07:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
case LOAD_VMX:
|
|
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
|
|
|
|
/* Hardware enforces alignment of VMX accesses */
|
|
|
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
|
|
|
|
if (size == 16) { /* lvx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_DWORD;
|
|
|
|
} else if (size == 4) { /* lvewx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_WORD;
|
|
|
|
} else if (size == 2) { /* lvehx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_HWORD;
|
|
|
|
} else if (size == 1) { /* lvebx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_BYTE;
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
|
|
|
|
vcpu->arch.mmio_vmx_offset =
|
|
|
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
|
|
|
|
|
|
|
if (size == 16) {
|
|
|
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_vmx_load(vcpu,
|
|
|
|
KVM_MMIO_REG_VMX|op.reg,
|
2018-05-20 22:24:26 -07:00
|
|
|
8, 1);
|
|
|
|
} else {
|
|
|
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_vmx_load(vcpu,
|
2018-05-20 22:24:26 -07:00
|
|
|
KVM_MMIO_REG_VMX|op.reg,
|
|
|
|
size, 1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif
|
2018-05-20 22:24:24 -07:00
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
case LOAD_VSX: {
|
|
|
|
int io_size_each;
|
|
|
|
|
|
|
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
|
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
} else {
|
|
|
|
if (kvmppc_check_vsx_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (op.vsx_flags & VSX_FPCONV)
|
|
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
|
|
|
|
if (op.element_size == 8) {
|
|
|
|
if (op.vsx_flags & VSX_SPLAT)
|
2018-05-20 22:24:25 -07:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-20 22:24:24 -07:00
|
|
|
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
|
|
|
|
else
|
2018-05-20 22:24:25 -07:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-20 22:24:24 -07:00
|
|
|
KVMPPC_VSX_COPY_DWORD;
|
|
|
|
} else if (op.element_size == 4) {
|
|
|
|
if (op.vsx_flags & VSX_SPLAT)
|
2018-05-20 22:24:25 -07:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-20 22:24:24 -07:00
|
|
|
KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
|
|
|
|
else
|
2018-05-20 22:24:25 -07:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-20 22:24:24 -07:00
|
|
|
KVMPPC_VSX_COPY_WORD;
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (size < op.element_size) {
|
|
|
|
/* precision convert case: lxsspx, etc */
|
|
|
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
|
|
io_size_each = size;
|
|
|
|
} else { /* lxvw4x, lxvd2x, etc */
|
|
|
|
vcpu->arch.mmio_vsx_copy_nums =
|
|
|
|
size/op.element_size;
|
|
|
|
io_size_each = op.element_size;
|
|
|
|
}
|
|
|
|
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_vsx_load(vcpu,
|
2018-05-27 18:48:26 -07:00
|
|
|
KVM_MMIO_REG_VSX|op.reg, io_size_each,
|
|
|
|
1, op.type & SIGNEXT);
|
2018-05-20 22:24:24 -07:00
|
|
|
break;
|
|
|
|
}
|
2018-05-20 22:24:23 -07:00
|
|
|
#endif
|
2023-12-01 06:26:16 -07:00
|
|
|
case STORE: {
|
|
|
|
int instr_byte_swap = op.type & BYTEREV;
|
|
|
|
|
|
|
|
emulated = kvmppc_handle_store(vcpu, kvmppc_get_gpr(vcpu, op.reg),
|
|
|
|
size, !instr_byte_swap);
|
2014-06-18 05:53:49 -07:00
|
|
|
|
2018-05-20 22:24:21 -07:00
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
2023-12-01 06:26:16 -07:00
|
|
|
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
|
2014-06-18 05:53:49 -07:00
|
|
|
|
|
|
|
break;
|
2023-12-01 06:26:16 -07:00
|
|
|
}
|
2018-05-20 22:24:23 -07:00
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
|
case STORE_FP:
|
|
|
|
if (kvmppc_check_fp_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
|
|
|
|
/* The FP registers need to be flushed so that
|
|
|
|
* kvmppc_handle_store() can read actual FP vals
|
|
|
|
* from vcpu->arch.
|
|
|
|
*/
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
|
|
MSR_FP);
|
|
|
|
|
|
|
|
if (op.type & FPCONV)
|
|
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_store(vcpu,
|
2023-09-13 20:05:51 -07:00
|
|
|
kvmppc_get_fpr(vcpu, op.reg), size, 1);
|
2018-05-20 22:24:23 -07:00
|
|
|
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
2023-12-01 06:26:16 -07:00
|
|
|
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
|
2018-05-20 22:24:23 -07:00
|
|
|
|
|
|
|
break;
|
2018-05-20 22:24:24 -07:00
|
|
|
#endif
|
2018-05-20 22:24:26 -07:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
case STORE_VMX:
|
|
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
|
|
|
|
/* Hardware enforces alignment of VMX accesses. */
|
|
|
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
|
|
MSR_VEC);
|
|
|
|
if (size == 16) { /* stvx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_DWORD;
|
|
|
|
} else if (size == 4) { /* stvewx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_WORD;
|
|
|
|
} else if (size == 2) { /* stvehx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_HWORD;
|
|
|
|
} else if (size == 1) { /* stvebx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_BYTE;
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
|
|
|
|
vcpu->arch.mmio_vmx_offset =
|
|
|
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
|
|
|
|
|
|
|
if (size == 16) {
|
|
|
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_vmx_store(vcpu,
|
|
|
|
op.reg, 8, 1);
|
2018-05-20 22:24:26 -07:00
|
|
|
} else {
|
|
|
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_vmx_store(vcpu,
|
|
|
|
op.reg, size, 1);
|
2018-05-20 22:24:26 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
#endif
|
2018-05-20 22:24:24 -07:00
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
case STORE_VSX: {
|
|
|
|
int io_size_each;
|
|
|
|
|
|
|
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
|
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
} else {
|
|
|
|
if (kvmppc_check_vsx_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
|
|
MSR_VSX);
|
|
|
|
|
|
|
|
if (op.vsx_flags & VSX_FPCONV)
|
|
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
|
|
|
|
if (op.element_size == 8)
|
2018-05-20 22:24:25 -07:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-20 22:24:24 -07:00
|
|
|
KVMPPC_VSX_COPY_DWORD;
|
|
|
|
else if (op.element_size == 4)
|
2018-05-20 22:24:25 -07:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-20 22:24:24 -07:00
|
|
|
KVMPPC_VSX_COPY_WORD;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (size < op.element_size) {
|
|
|
|
/* precise conversion case, like stxsspx */
|
|
|
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
|
|
io_size_each = size;
|
|
|
|
} else { /* stxvw4x, stxvd2x, etc */
|
|
|
|
vcpu->arch.mmio_vsx_copy_nums =
|
|
|
|
size/op.element_size;
|
|
|
|
io_size_each = op.element_size;
|
|
|
|
}
|
|
|
|
|
2020-04-26 21:35:11 -07:00
|
|
|
emulated = kvmppc_handle_vsx_store(vcpu,
|
2018-05-27 18:48:26 -07:00
|
|
|
op.reg, io_size_each, 1);
|
2018-05-20 22:24:24 -07:00
|
|
|
break;
|
|
|
|
}
|
2018-05-20 22:24:23 -07:00
|
|
|
#endif
|
2018-05-20 22:24:21 -07:00
|
|
|
case CACHEOP:
|
2014-06-18 05:53:49 -07:00
|
|
|
/* Do nothing. The guest is performing dcbi because
|
|
|
|
* hardware DMA is not snooped by the dcache, but
|
|
|
|
* emulated DMA either goes through the dcache as
|
|
|
|
* normal writes, or the host kernel has handled dcache
|
2018-05-20 22:24:21 -07:00
|
|
|
* coherence.
|
|
|
|
*/
|
|
|
|
emulated = EMULATE_DONE;
|
2014-06-18 05:53:49 -07:00
|
|
|
break;
|
2018-05-20 22:24:21 -07:00
|
|
|
default:
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 06:12:36 -07:00
|
|
|
break;
|
2018-05-20 22:24:21 -07:00
|
|
|
}
|
|
|
|
}
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 06:12:36 -07:00
|
|
|
|
2023-03-07 23:34:48 -07:00
|
|
|
trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
|
2014-06-18 05:53:49 -07:00
|
|
|
|
|
|
|
/* Advance past emulated instruction. */
|
2022-01-25 14:56:55 -07:00
|
|
|
if (emulated != EMULATE_FAIL)
|
KVM: PPC: Fetch prefixed instructions from the guest
In order to handle emulation of prefixed instructions in the guest,
this first makes vcpu->arch.last_inst be an unsigned long, i.e. 64
bits on 64-bit platforms. For prefixed instructions, the upper 32
bits are used for the prefix and the lower 32 bits for the suffix, and
both halves are byte-swapped if the guest endianness differs from the
host.
Next, vcpu->arch.emul_inst is now 64 bits wide, to match the HEIR
register on POWER10. Like HEIR, for a prefixed instruction it is
defined to have the prefix is in the top 32 bits and the suffix in the
bottom 32 bits, with both halves in the correct byte order.
kvmppc_get_last_inst is extended on 64-bit machines to put the prefix
and suffix in the right places in the ppc_inst_t being returned.
kvmppc_load_last_inst now returns the instruction in an unsigned long
in the same format as vcpu->arch.last_inst. It makes the decision
about whether to fetch a suffix based on the SRR1_PREFIXED bit in the
MSR image stored in the vcpu struct, which generally comes from SRR1
or HSRR1 on an interrupt. This bit is defined in Power ISA v3.1B to
be set if the interrupt occurred due to a prefixed instruction and
cleared otherwise for all interrupts except for instruction storage
interrupt, which does not come to the hypervisor. It is set to zero
for asynchronous interrupts such as external interrupts. In previous
ISA versions it was always set to 0 for all interrupts except
instruction storage interrupt.
The code in book3s_hv_rmhandlers.S that loads the faulting instruction
on a HDSI is only used on POWER8 and therefore doesn't ever need to
load a suffix.
[npiggin@gmail.com - check that the is-prefixed bit in SRR1 matches the
type of instruction that was fetched.]
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Tested-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/ZAgsq9h1CCzouQuV@cleo
2023-03-07 23:35:23 -07:00
|
|
|
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));
|
2014-06-18 05:53:49 -07:00
|
|
|
|
|
|
|
return emulated;
|
|
|
|
}
|