mm: pass VMA instead of MM to follow_pte()
... and centralize the VM_IO/VM_PFNMAP sanity check in there. We'll now also perform these sanity checks for direct follow_pte() invocations. For generic_access_phys(), we might now check multiple times: nothing to worry about, really. Link: https://lkml.kernel.org/r/20240410155527.474777-3-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Sean Christopherson <seanjc@google.com> [KVM] Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Fei Li <fei1.li@intel.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Yonghua Huang <yonghua.huang@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
3d6586008f
commit
29ae7d96d1
@ -169,7 +169,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
|
|||||||
if (!(vma->vm_flags & VM_WRITE))
|
if (!(vma->vm_flags & VM_WRITE))
|
||||||
goto out_unlock_mmap;
|
goto out_unlock_mmap;
|
||||||
|
|
||||||
ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
|
ret = follow_pte(vma, mmio_addr, &ptep, &ptl);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock_mmap;
|
goto out_unlock_mmap;
|
||||||
|
|
||||||
@ -308,7 +308,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
|
|||||||
if (!(vma->vm_flags & VM_WRITE))
|
if (!(vma->vm_flags & VM_WRITE))
|
||||||
goto out_unlock_mmap;
|
goto out_unlock_mmap;
|
||||||
|
|
||||||
ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
|
ret = follow_pte(vma, mmio_addr, &ptep, &ptl);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock_mmap;
|
goto out_unlock_mmap;
|
||||||
|
|
||||||
|
@ -954,10 +954,7 @@ static int follow_phys(struct vm_area_struct *vma, unsigned long *prot,
|
|||||||
pte_t *ptep, pte;
|
pte_t *ptep, pte;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
|
|
||||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
if (follow_pte(vma, vma->vm_start, &ptep, &ptl))
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (follow_pte(vma->vm_mm, vma->vm_start, &ptep, &ptl))
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pte = ptep_get(ptep);
|
pte = ptep_get(ptep);
|
||||||
|
@ -518,7 +518,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
|
|||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
|
ret = follow_pte(vma, vaddr, &ptep, &ptl);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
bool unlocked = false;
|
bool unlocked = false;
|
||||||
|
|
||||||
@ -532,7 +532,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
|
ret = follow_pte(vma, vaddr, &ptep, &ptl);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -187,8 +187,7 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_pages; i++) {
|
||||||
ret = follow_pte(vma->vm_mm,
|
ret = follow_pte(vma, memmap->vma_base + i * PAGE_SIZE,
|
||||||
memmap->vma_base + i * PAGE_SIZE,
|
|
||||||
&ptep, &ptl);
|
&ptep, &ptl);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
@ -2420,7 +2420,7 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
|||||||
unsigned long end, unsigned long floor, unsigned long ceiling);
|
unsigned long end, unsigned long floor, unsigned long ceiling);
|
||||||
int
|
int
|
||||||
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
||||||
int follow_pte(struct mm_struct *mm, unsigned long address,
|
int follow_pte(struct vm_area_struct *vma, unsigned long address,
|
||||||
pte_t **ptepp, spinlock_t **ptlp);
|
pte_t **ptepp, spinlock_t **ptlp);
|
||||||
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
||||||
void *buf, int len, int write);
|
void *buf, int len, int write);
|
||||||
|
15
mm/memory.c
15
mm/memory.c
@ -5926,7 +5926,7 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* follow_pte - look up PTE at a user virtual address
|
* follow_pte - look up PTE at a user virtual address
|
||||||
* @mm: the mm_struct of the target address space
|
* @vma: the memory mapping
|
||||||
* @address: user virtual address
|
* @address: user virtual address
|
||||||
* @ptepp: location to store found PTE
|
* @ptepp: location to store found PTE
|
||||||
* @ptlp: location to store the lock for the PTE
|
* @ptlp: location to store the lock for the PTE
|
||||||
@ -5945,15 +5945,19 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
|||||||
*
|
*
|
||||||
* Return: zero on success, -ve otherwise.
|
* Return: zero on success, -ve otherwise.
|
||||||
*/
|
*/
|
||||||
int follow_pte(struct mm_struct *mm, unsigned long address,
|
int follow_pte(struct vm_area_struct *vma, unsigned long address,
|
||||||
pte_t **ptepp, spinlock_t **ptlp)
|
pte_t **ptepp, spinlock_t **ptlp)
|
||||||
{
|
{
|
||||||
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
p4d_t *p4d;
|
p4d_t *p4d;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
|
|
||||||
|
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||||
|
goto out;
|
||||||
|
|
||||||
pgd = pgd_offset(mm, address);
|
pgd = pgd_offset(mm, address);
|
||||||
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
||||||
goto out;
|
goto out;
|
||||||
@ -6007,11 +6011,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
|||||||
int offset = offset_in_page(addr);
|
int offset = offset_in_page(addr);
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
|
if (follow_pte(vma, addr, &ptep, &ptl))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
pte = ptep_get(ptep);
|
pte = ptep_get(ptep);
|
||||||
pte_unmap_unlock(ptep, ptl);
|
pte_unmap_unlock(ptep, ptl);
|
||||||
@ -6026,7 +6027,7 @@ retry:
|
|||||||
if (!maddr)
|
if (!maddr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
|
if (follow_pte(vma, addr, &ptep, &ptl))
|
||||||
goto out_unmap;
|
goto out_unmap;
|
||||||
|
|
||||||
if (!pte_same(pte, ptep_get(ptep))) {
|
if (!pte_same(pte, ptep_get(ptep))) {
|
||||||
|
@ -2902,7 +2902,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
|
|||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
|
r = follow_pte(vma, addr, &ptep, &ptl);
|
||||||
if (r) {
|
if (r) {
|
||||||
/*
|
/*
|
||||||
* get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
|
* get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
|
||||||
@ -2917,7 +2917,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
|
r = follow_pte(vma, addr, &ptep, &ptl);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user