1

x86/vmware: Use VMware hypercall API

Remove VMWARE_CMD macro and move to vmware_hypercall API.
No functional changes intended.

Use u32/u64 instead of uint32_t/uint64_t across the file.

Signed-off-by: Alexey Makhalov <alexey.makhalov@broadcom.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20240613191650.9913-6-alexey.makhalov@broadcom.com
This commit is contained in:
Alexey Makhalov 2024-06-13 12:16:47 -07:00 committed by Borislav Petkov (AMD)
parent 90328eaaff
commit b2c13c23ea

View File

@ -49,54 +49,16 @@
#define STEALCLOCK_DISABLED 0
#define STEALCLOCK_ENABLED 1
#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
__asm__("inl (%%dx), %%eax" : \
"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
"a"(VMWARE_HYPERVISOR_MAGIC), \
"c"(VMWARE_CMD_##cmd), \
"d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) : \
"memory")
#define VMWARE_VMCALL(cmd, eax, ebx, ecx, edx) \
__asm__("vmcall" : \
"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
"a"(VMWARE_HYPERVISOR_MAGIC), \
"c"(VMWARE_CMD_##cmd), \
"d"(0), "b"(UINT_MAX) : \
"memory")
#define VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx) \
__asm__("vmmcall" : \
"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
"a"(VMWARE_HYPERVISOR_MAGIC), \
"c"(VMWARE_CMD_##cmd), \
"d"(0), "b"(UINT_MAX) : \
"memory")
#define VMWARE_CMD(cmd, eax, ebx, ecx, edx) do { \
switch (vmware_hypercall_mode) { \
case CPUID_VMWARE_FEATURES_ECX_VMCALL: \
VMWARE_VMCALL(cmd, eax, ebx, ecx, edx); \
break; \
case CPUID_VMWARE_FEATURES_ECX_VMMCALL: \
VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx); \
break; \
default: \
VMWARE_PORT(cmd, eax, ebx, ecx, edx); \
break; \
} \
} while (0)
struct vmware_steal_time {
union {
uint64_t clock; /* stolen time counter in units of vtsc */
u64 clock; /* stolen time counter in units of vtsc */
struct {
/* only for little-endian */
uint32_t clock_low;
uint32_t clock_high;
u32 clock_low;
u32 clock_high;
};
};
uint64_t reserved[7];
u64 reserved[7];
};
static unsigned long vmware_tsc_khz __ro_after_init;
@ -166,9 +128,10 @@ unsigned long vmware_hypercall_slow(unsigned long cmd,
static inline int __vmware_platform(void)
{
uint32_t eax, ebx, ecx, edx;
VMWARE_CMD(GETVERSION, eax, ebx, ecx, edx);
return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
u32 eax, ebx, ecx;
eax = vmware_hypercall3(VMWARE_CMD_GETVERSION, 0, &ebx, &ecx);
return eax != UINT_MAX && ebx == VMWARE_HYPERVISOR_MAGIC;
}
static unsigned long vmware_get_tsc_khz(void)
@ -220,21 +183,12 @@ static void __init vmware_cyc2ns_setup(void)
pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset);
}
static int vmware_cmd_stealclock(uint32_t arg1, uint32_t arg2)
static int vmware_cmd_stealclock(u32 addr_hi, u32 addr_lo)
{
uint32_t result, info;
u32 info;
asm volatile (VMWARE_HYPERCALL :
"=a"(result),
"=c"(info) :
"a"(VMWARE_HYPERVISOR_MAGIC),
"b"(0),
"c"(VMWARE_CMD_STEALCLOCK),
"d"(0),
"S"(arg1),
"D"(arg2) :
"memory");
return result;
return vmware_hypercall5(VMWARE_CMD_STEALCLOCK, 0, 0, addr_hi, addr_lo,
&info);
}
static bool stealclock_enable(phys_addr_t pa)
@ -269,15 +223,15 @@ static bool vmware_is_stealclock_available(void)
* Return:
* The steal clock reading in ns.
*/
static uint64_t vmware_steal_clock(int cpu)
static u64 vmware_steal_clock(int cpu)
{
struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu);
uint64_t clock;
u64 clock;
if (IS_ENABLED(CONFIG_64BIT))
clock = READ_ONCE(steal->clock);
else {
uint32_t initial_high, low, high;
u32 initial_high, low, high;
do {
initial_high = READ_ONCE(steal->clock_high);
@ -289,7 +243,7 @@ static uint64_t vmware_steal_clock(int cpu)
high = READ_ONCE(steal->clock_high);
} while (initial_high != high);
clock = ((uint64_t)high << 32) | low;
clock = ((u64)high << 32) | low;
}
return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul,
@ -443,13 +397,13 @@ static void __init vmware_set_capabilities(void)
static void __init vmware_platform_setup(void)
{
uint32_t eax, ebx, ecx, edx;
uint64_t lpj, tsc_khz;
u32 eax, ebx, ecx;
u64 lpj, tsc_khz;
VMWARE_CMD(GETHZ, eax, ebx, ecx, edx);
eax = vmware_hypercall3(VMWARE_CMD_GETHZ, UINT_MAX, &ebx, &ecx);
if (ebx != UINT_MAX) {
lpj = tsc_khz = eax | (((uint64_t)ebx) << 32);
lpj = tsc_khz = eax | (((u64)ebx) << 32);
do_div(tsc_khz, 1000);
WARN_ON(tsc_khz >> 32);
pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
@ -500,7 +454,7 @@ static u8 __init vmware_select_hypercall(void)
* If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode
* intentionally defaults to 0.
*/
static uint32_t __init vmware_platform(void)
static u32 __init vmware_platform(void)
{
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
unsigned int eax;
@ -528,8 +482,9 @@ static uint32_t __init vmware_platform(void)
/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
static bool __init vmware_legacy_x2apic_available(void)
{
uint32_t eax, ebx, ecx, edx;
VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);
u32 eax;
eax = vmware_hypercall1(VMWARE_CMD_GETVCPU_INFO, 0);
return !(eax & BIT(VMWARE_CMD_VCPU_RESERVED)) &&
(eax & BIT(VMWARE_CMD_LEGACY_X2APIC));
}