KVM: arm64: nv: Do not block when unmapping stage-2 if disallowed
Right now the nested code allows unmap operations on a shadow stage-2 to block unconditionally. This is wrong in a couple places, such as a non-blocking MMU notifier or on the back of a sched_in() notifier as part of shadow MMU recycling. Carry through whether or not blocking is allowed to kvm_pgtable_stage2_unmap(). This 'fixes' an issue where stage-2 MMU reclaim would precipitate a stack overflow from a pile of kvm_sched_in() callbacks, all trying to recycle a stage-2 MMU. Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20241007233028.2236133-3-oliver.upton@linux.dev Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
6ded46b5a4
commit
3c164eb946
@ -166,7 +166,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
|
||||
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
|
||||
void __init free_hyp_pgds(void);
|
||||
|
||||
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
|
||||
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
|
||||
u64 size, bool may_block);
|
||||
void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
|
||||
void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
|
||||
|
||||
|
@ -124,7 +124,7 @@ extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
|
||||
struct kvm_s2_trans *trans);
|
||||
extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
|
||||
extern void kvm_nested_s2_wp(struct kvm *kvm);
|
||||
extern void kvm_nested_s2_unmap(struct kvm *kvm);
|
||||
extern void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block);
|
||||
extern void kvm_nested_s2_flush(struct kvm *kvm);
|
||||
|
||||
unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);
|
||||
|
@ -328,9 +328,10 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
|
||||
may_block));
|
||||
}
|
||||
|
||||
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
|
||||
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
|
||||
u64 size, bool may_block)
|
||||
{
|
||||
__unmap_stage2_range(mmu, start, size, true);
|
||||
__unmap_stage2_range(mmu, start, size, may_block);
|
||||
}
|
||||
|
||||
void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
|
||||
@ -1015,7 +1016,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
|
||||
|
||||
if (!(vma->vm_flags & VM_PFNMAP)) {
|
||||
gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
|
||||
kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
|
||||
kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, vm_end - vm_start, true);
|
||||
}
|
||||
hva = vm_end;
|
||||
} while (hva < reg_end);
|
||||
@ -1042,7 +1043,7 @@ void stage2_unmap_vm(struct kvm *kvm)
|
||||
kvm_for_each_memslot(memslot, bkt, slots)
|
||||
stage2_unmap_memslot(kvm, memslot);
|
||||
|
||||
kvm_nested_s2_unmap(kvm);
|
||||
kvm_nested_s2_unmap(kvm, true);
|
||||
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
mmap_read_unlock(current->mm);
|
||||
@ -1912,7 +1913,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||
(range->end - range->start) << PAGE_SHIFT,
|
||||
range->may_block);
|
||||
|
||||
kvm_nested_s2_unmap(kvm);
|
||||
kvm_nested_s2_unmap(kvm, range->may_block);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2179,8 +2180,8 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
phys_addr_t size = slot->npages << PAGE_SHIFT;
|
||||
|
||||
write_lock(&kvm->mmu_lock);
|
||||
kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, size);
|
||||
kvm_nested_s2_unmap(kvm);
|
||||
kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, size, true);
|
||||
kvm_nested_s2_unmap(kvm, true);
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
|
@ -634,7 +634,7 @@ static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
|
||||
|
||||
/* Clear the old state */
|
||||
if (kvm_s2_mmu_valid(s2_mmu))
|
||||
kvm_stage2_unmap_range(s2_mmu, 0, kvm_phys_size(s2_mmu));
|
||||
kvm_stage2_unmap_range(s2_mmu, 0, kvm_phys_size(s2_mmu), false);
|
||||
|
||||
/*
|
||||
* The virtual VMID (modulo CnP) will be used as a key when matching
|
||||
@ -745,7 +745,7 @@ void kvm_nested_s2_wp(struct kvm *kvm)
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_nested_s2_unmap(struct kvm *kvm)
|
||||
void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -755,7 +755,7 @@ void kvm_nested_s2_unmap(struct kvm *kvm)
|
||||
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
|
||||
|
||||
if (kvm_s2_mmu_valid(mmu))
|
||||
kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu));
|
||||
kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2937,7 +2937,7 @@ static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
* Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
|
||||
* corresponding VMIDs.
|
||||
*/
|
||||
kvm_nested_s2_unmap(vcpu->kvm);
|
||||
kvm_nested_s2_unmap(vcpu->kvm, true);
|
||||
|
||||
write_unlock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
@ -2989,7 +2989,7 @@ union tlbi_info {
|
||||
static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
|
||||
const union tlbi_info *info)
|
||||
{
|
||||
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size);
|
||||
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
|
||||
}
|
||||
|
||||
static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
@ -3084,7 +3084,7 @@ static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
|
||||
max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
|
||||
base_addr &= ~(max_size - 1);
|
||||
|
||||
kvm_stage2_unmap_range(mmu, base_addr, max_size);
|
||||
kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
|
||||
}
|
||||
|
||||
static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
|
Loading…
Reference in New Issue
Block a user