mm/vma: track start and end for munmap in vma_munmap_struct
Set the start and end address for munmap when the prev and next are gathered. This is needed to avoid incorrect addresses being used during the vms_complete_munmap_vmas() function if the prev/next vma are expanded. Add a new helper vms_complete_pte_clear(), which is needed later and will avoid growing the argument list to unmap_region() beyond the 9 it already has. Link: https://lkml.kernel.org/r/20240830040101.822209-13-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Bert Karwatzki <spasswolf@web.de> Cc: Jeff Xu <jeffxu@chromium.org> Cc: Jiri Olsa <olsajiri@gmail.com> Cc: Kees Cook <kees@kernel.org> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Mark Brown <broonie@kernel.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: "Paul E. McKenney" <paulmck@kernel.org> Cc: Paul Moore <paul@paul-moore.com> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d744f4acb8
commit
9c3ebeda8f
32
mm/vma.c
32
mm/vma.c
@ -646,6 +646,26 @@ again:
|
||||
uprobe_mmap(vp->insert);
|
||||
}
|
||||
|
||||
static void vms_complete_pte_clear(struct vma_munmap_struct *vms,
|
||||
struct ma_state *mas_detach, bool mm_wr_locked)
|
||||
{
|
||||
struct mmu_gather tlb;
|
||||
|
||||
/*
|
||||
* We can free page tables without write-locking mmap_lock because VMAs
|
||||
* were isolated before we downgraded mmap_lock.
|
||||
*/
|
||||
mas_set(mas_detach, 1);
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, vms->mm);
|
||||
update_hiwater_rss(vms->mm);
|
||||
unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, vms->vma_count, mm_wr_locked);
|
||||
mas_set(mas_detach, 1);
|
||||
/* start and end may be different if there is no prev or next vma. */
|
||||
free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, vms->unmap_end, mm_wr_locked);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
||||
/*
|
||||
* vms_complete_munmap_vmas() - Finish the munmap() operation
|
||||
* @vms: The vma munmap struct
|
||||
@ -667,13 +687,7 @@ void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
|
||||
if (vms->unlock)
|
||||
mmap_write_downgrade(mm);
|
||||
|
||||
/*
|
||||
* We can free page tables without write-locking mmap_lock because VMAs
|
||||
* were isolated before we downgraded mmap_lock.
|
||||
*/
|
||||
mas_set(mas_detach, 1);
|
||||
unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
|
||||
vms->start, vms->end, vms->vma_count, !vms->unlock);
|
||||
vms_complete_pte_clear(vms, mas_detach, !vms->unlock);
|
||||
/* Update high watermark before we lower total_vm */
|
||||
update_hiwater_vm(mm);
|
||||
/* Stat accounting */
|
||||
@ -745,6 +759,8 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
|
||||
goto start_split_failed;
|
||||
}
|
||||
vms->prev = vma_prev(vms->vmi);
|
||||
if (vms->prev)
|
||||
vms->unmap_start = vms->prev->vm_end;
|
||||
|
||||
/*
|
||||
* Detach a range of VMAs from the mm. Using next as a temp variable as
|
||||
@ -805,6 +821,8 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
|
||||
}
|
||||
|
||||
vms->next = vma_next(vms->vmi);
|
||||
if (vms->next)
|
||||
vms->unmap_end = vms->next->vm_start;
|
||||
|
||||
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
|
||||
/* Make sure no VMAs are about to be lost. */
|
||||
|
6
mm/vma.h
6
mm/vma.h
@ -38,6 +38,8 @@ struct vma_munmap_struct {
|
||||
struct list_head *uf; /* Userfaultfd list_head */
|
||||
unsigned long start; /* Aligned start addr (inclusive) */
|
||||
unsigned long end; /* Aligned end addr (exclusive) */
|
||||
unsigned long unmap_start; /* Unmap PTE start */
|
||||
unsigned long unmap_end; /* Unmap PTE end */
|
||||
int vma_count; /* Number of vmas that will be removed */
|
||||
unsigned long nr_pages; /* Number of pages being removed */
|
||||
unsigned long locked_vm; /* Number of locked pages */
|
||||
@ -78,6 +80,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end, pgoff_t pgoff);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* init_vma_munmap() - Initializer wrapper for vma_munmap_struct
|
||||
* @vms: The vma munmap struct
|
||||
@ -108,7 +111,10 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
|
||||
vms->vma_count = 0;
|
||||
vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
|
||||
vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
|
||||
vms->unmap_start = FIRST_USER_ADDRESS;
|
||||
vms->unmap_end = USER_PGTABLES_CEILING;
|
||||
}
|
||||
#endif
|
||||
|
||||
int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
|
||||
struct ma_state *mas_detach);
|
||||
|
Loading…
Reference in New Issue
Block a user