virt: acrn: stop using follow_pfn
Patch series "remove follow_pfn". This series open codes follow_pfn in the only remaining caller, although the code there remains questionable. It then also moves follow_phys into the only user and simplifies it a bit. This patch (of 3): Switch from follow_pfn to follow_pte so that we can get rid of follow_pfn. Note that this doesn't fix any of the pre-existing raciness and lack of permission checking in the code. Link: https://lkml.kernel.org/r/20240324234542.2038726-1-hch@lst.de Link: https://lkml.kernel.org/r/20240324234542.2038726-2-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fei Li <fei1.li@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
85109a8a9a
commit
1b265da7ea
@ -172,18 +172,24 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
|
||||
mmap_read_lock(current->mm);
|
||||
vma = vma_lookup(current->mm, memmap->vma_base);
|
||||
if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
|
||||
spinlock_t *ptl;
|
||||
pte_t *ptep;
|
||||
|
||||
if ((memmap->vma_base + memmap->len) > vma->vm_end) {
|
||||
mmap_read_unlock(current->mm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = follow_pfn(vma, memmap->vma_base, &pfn);
|
||||
mmap_read_unlock(current->mm);
|
||||
ret = follow_pte(vma->vm_mm, memmap->vma_base, &ptep, &ptl);
|
||||
if (ret < 0) {
|
||||
mmap_read_unlock(current->mm);
|
||||
dev_dbg(acrn_dev.this_device,
|
||||
"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
|
||||
return ret;
|
||||
}
|
||||
pfn = pte_pfn(ptep_get(ptep));
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
mmap_read_unlock(current->mm);
|
||||
|
||||
return acrn_mm_region_add(vm, memmap->user_vm_pa,
|
||||
PFN_PHYS(pfn), memmap->len,
|
||||
|
Loading…
Reference in New Issue
Block a user