KVM: s390: Refactor kvm_is_error_gpa() into kvm_is_gpa_in_memslot()
Rename kvm_is_error_gpa() to kvm_is_gpa_in_memslot() and invert the polarity accordingly in order to (a) free up kvm_is_error_gpa() to match with kvm_is_error_{hva,page}(), and (b) to make it more obvious that the helper is doing a memslot lookup, i.e. not simply checking for INVALID_GPA. No functional change intended. Link: https://lore.kernel.org/r/20240215152916.1158-9-paul@xen.org Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
406c10962a
commit
9e7325acb3
@ -102,7 +102,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
|
||||
parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, parm.token_addr))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
|
||||
vcpu->arch.pfault_token = parm.token_addr;
|
||||
|
@ -664,7 +664,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_REGION1: {
|
||||
union region1_table_entry rfte;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rfte.val))
|
||||
return -EFAULT;
|
||||
@ -682,7 +682,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_REGION2: {
|
||||
union region2_table_entry rste;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rste.val))
|
||||
return -EFAULT;
|
||||
@ -700,7 +700,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_REGION3: {
|
||||
union region3_table_entry rtte;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rtte.val))
|
||||
return -EFAULT;
|
||||
@ -728,7 +728,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_SEGMENT: {
|
||||
union segment_table_entry ste;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &ste.val))
|
||||
return -EFAULT;
|
||||
@ -748,7 +748,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
|
||||
}
|
||||
}
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &pte.val))
|
||||
return -EFAULT;
|
||||
@ -770,7 +770,7 @@ absolute_address:
|
||||
*prot = PROT_TYPE_IEP;
|
||||
return PGM_PROTECTION;
|
||||
}
|
||||
if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, raddr.addr))
|
||||
return PGM_ADDRESSING;
|
||||
*gpa = raddr.addr;
|
||||
return 0;
|
||||
@ -957,7 +957,7 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
||||
return rc;
|
||||
} else {
|
||||
gpa = kvm_s390_real_to_abs(vcpu, ga);
|
||||
if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) {
|
||||
rc = PGM_ADDRESSING;
|
||||
prot = PROT_NONE;
|
||||
}
|
||||
|
@ -2878,7 +2878,7 @@ static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (kvm_is_error_gpa(kvm, mop->gaddr)) {
|
||||
if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
|
||||
r = PGM_ADDRESSING;
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -2940,7 +2940,7 @@ static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *m
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (kvm_is_error_gpa(kvm, mop->gaddr)) {
|
||||
if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
|
||||
r = PGM_ADDRESSING;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
|
||||
* first page, since address is 8k aligned and memory pieces are always
|
||||
* at least 1MB aligned and have at least a size of 1MB.
|
||||
*/
|
||||
if (kvm_is_error_gpa(vcpu->kvm, address))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, address))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
|
||||
kvm_s390_set_prefix(vcpu, address);
|
||||
@ -464,7 +464,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
|
||||
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
||||
addr = kvm_s390_real_to_abs(vcpu, addr);
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, addr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, addr))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
/*
|
||||
* We don't expect errors on modern systems, and do not care
|
||||
|
@ -172,7 +172,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
|
||||
* first page, since address is 8k aligned and memory pieces are always
|
||||
* at least 1MB aligned and have at least a size of 1MB.
|
||||
*/
|
||||
if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, irq.u.prefix.address)) {
|
||||
*reg &= 0xffffffff00000000UL;
|
||||
*reg |= SIGP_STATUS_INVALID_PARAMETER;
|
||||
return SIGP_CC_STATUS_STORED;
|
||||
|
@ -1779,11 +1779,11 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
|
||||
return (hpa_t)pfn << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
|
||||
static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
|
||||
{
|
||||
unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
|
||||
|
||||
return kvm_is_error_hva(hva);
|
||||
return !kvm_is_error_hva(hva);
|
||||
}
|
||||
|
||||
static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)
|
||||
|
Loading…
Reference in New Issue
Block a user