1

KVM/arm64 fixes for 6.12, take #3

- Stop wasting space in the HYP idmap, as we are dangerously close
   to the 4kB limit, and this has already exploded in -next
 
 - Fix another race in vgic_init()
 
 - Fix a UBSAN error when faking the cache topology with MTE
   enabled
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmcQ0ioACgkQI9DQutE9
 ekP71Q/9FJOSfvZU0aPTeMLkrkJFkVO1Xvecq2YSrYSdBBtaVO3WAybV/tbSLKGb
 rgy64JOoRYys9I8kbvztGul7vT6QgrTZMGjbNVHGmKVXsnEYRY2jKWwtRh20nnhx
 hwIiAItkp76MGEurOJShFffDju4Psuj0lMPDYDv3j24YHSX+fiWIWcwHoqM9baVt
 IRIx2bkJf5isbXJ0i2dtV6PvxBn7ShdDU2VoMr+69LtSgiQypRQ2OXyBvOHt1Pg4
 7r5MY3YueMLSRdyuFFyr1esPDzZLRdhHZts/BbFmiBafGUPBrzSXjeBXM+GO0day
 8eEqhe1HSDsg9ctC6EJZbTfKC0T6UpSwUT7zlujgLhdaBae8mSuJVC0rywGRmWi8
 AHFWZQSu4mTdDaJ5A/dyZeXv3ir+lQIUMLTm6EJ+3+PSACTKt1TCDqUKM0z2tFvB
 F96cH3/m9vAlxoeLmxeZqX6RGU44ttDFkyqDcS8hezV5meTNLJ580r0iyqnBhIu0
 szNhOxhHziTUOrps0EM8Cx7WElYUK3MJ30t8pZRwZaYQOjlm2NeqTwI0I/c4dHNX
 IUvdFXCVSWLFyGULpKf/tPqeq/nZKQ900gzoGJNSPnZ2gEw0QnVhGyIFaPbj42Eb
 TTs2nvCJU5bE96nvz3mAkxWF5Uuu0yw1CRII6XPbkMCpaTocrPs=
 =2Cfq
 -----END PGP SIGNATURE-----

Merge tag 'kvmarm-fixes-6.12-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.12, take #3

- Stop wasting space in the HYP idmap, as we are dangerously close
  to the 4kB limit, and this has already exploded in -next

- Fix another race in vgic_init()

- Fix a UBSAN error when faking the cache topology with MTE
  enabled
This commit is contained in:
Paolo Bonzini 2024-10-20 12:10:59 -04:00
commit e9001a382f
6 changed files with 49 additions and 27 deletions

View File

@ -178,6 +178,7 @@ struct kvm_nvhe_init_params {
unsigned long hcr_el2; unsigned long hcr_el2;
unsigned long vttbr; unsigned long vttbr;
unsigned long vtcr; unsigned long vtcr;
unsigned long tmp;
}; };
/* /*

View File

@ -146,6 +146,7 @@ int main(void)
DEFINE(NVHE_INIT_HCR_EL2, offsetof(struct kvm_nvhe_init_params, hcr_el2)); DEFINE(NVHE_INIT_HCR_EL2, offsetof(struct kvm_nvhe_init_params, hcr_el2));
DEFINE(NVHE_INIT_VTTBR, offsetof(struct kvm_nvhe_init_params, vttbr)); DEFINE(NVHE_INIT_VTTBR, offsetof(struct kvm_nvhe_init_params, vttbr));
DEFINE(NVHE_INIT_VTCR, offsetof(struct kvm_nvhe_init_params, vtcr)); DEFINE(NVHE_INIT_VTCR, offsetof(struct kvm_nvhe_init_params, vtcr));
DEFINE(NVHE_INIT_TMP, offsetof(struct kvm_nvhe_init_params, tmp));
#endif #endif
#ifdef CONFIG_CPU_PM #ifdef CONFIG_CPU_PM
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));

View File

@ -24,28 +24,25 @@
.align 11 .align 11
SYM_CODE_START(__kvm_hyp_init) SYM_CODE_START(__kvm_hyp_init)
ventry __invalid // Synchronous EL2t ventry . // Synchronous EL2t
ventry __invalid // IRQ EL2t ventry . // IRQ EL2t
ventry __invalid // FIQ EL2t ventry . // FIQ EL2t
ventry __invalid // Error EL2t ventry . // Error EL2t
ventry __invalid // Synchronous EL2h ventry . // Synchronous EL2h
ventry __invalid // IRQ EL2h ventry . // IRQ EL2h
ventry __invalid // FIQ EL2h ventry . // FIQ EL2h
ventry __invalid // Error EL2h ventry . // Error EL2h
ventry __do_hyp_init // Synchronous 64-bit EL1 ventry __do_hyp_init // Synchronous 64-bit EL1
ventry __invalid // IRQ 64-bit EL1 ventry . // IRQ 64-bit EL1
ventry __invalid // FIQ 64-bit EL1 ventry . // FIQ 64-bit EL1
ventry __invalid // Error 64-bit EL1 ventry . // Error 64-bit EL1
ventry __invalid // Synchronous 32-bit EL1 ventry . // Synchronous 32-bit EL1
ventry __invalid // IRQ 32-bit EL1 ventry . // IRQ 32-bit EL1
ventry __invalid // FIQ 32-bit EL1 ventry . // FIQ 32-bit EL1
ventry __invalid // Error 32-bit EL1 ventry . // Error 32-bit EL1
__invalid:
b .
/* /*
* Only uses x0..x3 so as to not clobber callee-saved SMCCC registers. * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
@ -76,6 +73,13 @@ __do_hyp_init:
eret eret
SYM_CODE_END(__kvm_hyp_init) SYM_CODE_END(__kvm_hyp_init)
SYM_CODE_START_LOCAL(__kvm_init_el2_state)
/* Initialize EL2 CPU state to sane values. */
init_el2_state // Clobbers x0..x2
finalise_el2_state
ret
SYM_CODE_END(__kvm_init_el2_state)
/* /*
* Initialize the hypervisor in EL2. * Initialize the hypervisor in EL2.
* *
@ -102,9 +106,12 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init)
// TPIDR_EL2 is used to preserve x0 across the macro maze... // TPIDR_EL2 is used to preserve x0 across the macro maze...
isb isb
msr tpidr_el2, x0 msr tpidr_el2, x0
init_el2_state str lr, [x0, #NVHE_INIT_TMP]
finalise_el2_state
bl __kvm_init_el2_state
mrs x0, tpidr_el2 mrs x0, tpidr_el2
ldr lr, [x0, #NVHE_INIT_TMP]
1: 1:
ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] ldr x1, [x0, #NVHE_INIT_TPIDR_EL2]
@ -199,9 +206,8 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
2: msr SPsel, #1 // We want to use SP_EL{1,2} 2: msr SPsel, #1 // We want to use SP_EL{1,2}
/* Initialize EL2 CPU state to sane values. */ bl __kvm_init_el2_state
init_el2_state // Clobbers x0..x2
finalise_el2_state
__init_el2_nvhe_prepare_eret __init_el2_nvhe_prepare_eret
/* Enable MMU, set vectors and stack. */ /* Enable MMU, set vectors and stack. */

View File

@ -1994,7 +1994,7 @@ static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
* one cache line. * one cache line.
*/ */
if (kvm_has_mte(vcpu->kvm)) if (kvm_has_mte(vcpu->kvm))
clidr |= 2 << CLIDR_TTYPE_SHIFT(loc); clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
__vcpu_sys_reg(vcpu, r->reg) = clidr; __vcpu_sys_reg(vcpu, r->reg) = clidr;

View File

@ -544,14 +544,23 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (ret) if (ret)
goto out; goto out;
dist->ready = true;
dist_base = dist->vgic_dist_base; dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->arch.config_lock);
ret = vgic_register_dist_iodev(kvm, dist_base, type); ret = vgic_register_dist_iodev(kvm, dist_base, type);
if (ret) if (ret) {
kvm_err("Unable to register VGIC dist MMIO regions\n"); kvm_err("Unable to register VGIC dist MMIO regions\n");
goto out_slots;
}
/*
* kvm_io_bus_register_dev() guarantees all readers see the new MMIO
* registration before returning through synchronize_srcu(), which also
* implies a full memory barrier. As such, marking the distributor as
* 'ready' here is guaranteed to be ordered after all vCPUs having seen
* a completely configured distributor.
*/
dist->ready = true;
goto out_slots; goto out_slots;
out: out:
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->arch.config_lock);

View File

@ -236,7 +236,12 @@ static int vgic_set_common_attr(struct kvm_device *dev,
mutex_lock(&dev->kvm->arch.config_lock); mutex_lock(&dev->kvm->arch.config_lock);
if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) /*
* Either userspace has already configured NR_IRQS or
* the vgic has already been initialized and vgic_init()
* supplied a default amount of SPIs.
*/
if (dev->kvm->arch.vgic.nr_spis)
ret = -EBUSY; ret = -EBUSY;
else else
dev->kvm->arch.vgic.nr_spis = dev->kvm->arch.vgic.nr_spis =