1

mm/mmap: reposition vma iterator in mmap_region()

Instead of moving (or leaving) the vma iterator pointing at the previous
vma, leave it pointing at the insert location.  Pointing the vma iterator
at the insert location allows for a cleaner walk of the vma tree for
MAP_FIXED and the no expansion cases.

The vma_prev() call in the case of merging the previous vma is equivalent
to vma_iter_prev_range(), since the vma iterator will be pointing to the
location just before the previous vma.

This change needs to export abort_munmap_vmas() from mm/vma.

Link: https://lkml.kernel.org/r/20240830040101.822209-12-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Bert Karwatzki <spasswolf@web.de>
Cc: Jeff Xu <jeffxu@chromium.org>
Cc: Jiri Olsa <olsajiri@gmail.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Paul Moore <paul@paul-moore.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Liam R. Howlett 2024-08-30 00:00:51 -04:00 committed by Andrew Morton
parent 58e60f8284
commit d744f4acb8
3 changed files with 39 additions and 33 deletions

View File

@ -1401,21 +1401,23 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Prepare to unmap any existing mapping in the area */
error = vms_gather_munmap_vmas(&vms, &mas_detach);
if (error)
return error;
goto gather_failed;
/* Remove any existing mappings from the vma tree */
if (vma_iter_clear_gfp(&vmi, addr, end, GFP_KERNEL))
return -ENOMEM;
error = vma_iter_clear_gfp(&vmi, addr, end, GFP_KERNEL);
if (error)
goto clear_tree_failed;
/* Unmap any existing mapping in the area */
vms_complete_munmap_vmas(&vms, &mas_detach);
next = vms.next;
prev = vms.prev;
vma_prev(&vmi);
vma = NULL;
} else {
next = vma_next(&vmi);
prev = vma_prev(&vmi);
if (prev)
vma_iter_next_range(&vmi);
}
/*
@ -1428,11 +1430,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vm_flags |= VM_ACCOUNT;
}
if (vm_flags & VM_SPECIAL) {
if (prev)
vma_iter_next_range(&vmi);
if (vm_flags & VM_SPECIAL)
goto cannot_expand;
}
/* Attempt to expand an old mapping */
/* Check next */
@ -1453,19 +1452,21 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
merge_start = prev->vm_start;
vma = prev;
vm_pgoff = prev->vm_pgoff;
} else if (prev) {
vma_iter_next_range(&vmi);
vma_prev(&vmi); /* Equivalent to going to the previous range */
}
/* Actually expand, if possible */
if (vma &&
!vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
khugepaged_enter_vma(vma, vm_flags);
goto expanded;
if (vma) {
/* Actually expand, if possible */
if (!vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
khugepaged_enter_vma(vma, vm_flags);
goto expanded;
}
/* If the expand fails, then reposition the vma iterator */
if (unlikely(vma == prev))
vma_iter_set(&vmi, addr);
}
if (vma == prev)
vma_iter_set(&vmi, addr);
cannot_expand:
/*
@ -1624,6 +1625,11 @@ free_vma:
unacct_error:
if (charged)
vm_unacct_memory(charged);
clear_tree_failed:
if (vms.vma_count)
abort_munmap_vmas(&mas_detach);
gather_failed:
validate_mm(mm);
return error;
}

View File

@ -646,22 +646,6 @@ again:
uprobe_mmap(vp->insert);
}
/*
* abort_munmap_vmas - Undo any munmap work and free resources
*
* Reattach any detached vmas and free up the maple tree used to track the vmas.
*/
static inline void abort_munmap_vmas(struct ma_state *mas_detach)
{
struct vm_area_struct *vma;
mas_set(mas_detach, 0);
mas_for_each(mas_detach, vma, ULONG_MAX)
vma_mark_detached(vma, false);
__mt_destroy(mas_detach->tree);
}
/*
* vms_complete_munmap_vmas() - Finish the munmap() operation
* @vms: The vma munmap struct

View File

@ -116,6 +116,22 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
struct ma_state *mas_detach);
/*
* abort_munmap_vmas - Undo any munmap work and free resources
*
* Reattach any detached vmas and free up the maple tree used to track the vmas.
*/
static inline void abort_munmap_vmas(struct ma_state *mas_detach)
{
struct vm_area_struct *vma;
mas_set(mas_detach, 0);
mas_for_each(mas_detach, vma, ULONG_MAX)
vma_mark_detached(vma, false);
__mt_destroy(mas_detach->tree);
}
int
do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,