1

mm: rename vma_pgoff_address back to vma_address

With all callers converted, we can use the nice shorter name.  Take this
opportunity to reorder the arguments to the logical order (larger object
first).

Link: https://lkml.kernel.org/r/20240328225831.1765286-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-03-28 22:58:29 +00:00 committed by Andrew Morton
parent 412ad5fbe9
commit e0abfbb671
4 changed files with 12 additions and 13 deletions

View File

@ -805,17 +805,16 @@ void mlock_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
/**
* vma_pgoff_address - Find the virtual address a page range is mapped at
* vma_address - Find the virtual address a page range is mapped at
* @vma: The vma which maps this object.
* @pgoff: The page offset within its object.
* @nr_pages: The number of pages to consider.
* @vma: The vma which maps this object.
*
* If any page in this range is mapped by this VMA, return the first address
* where any of these pages appear. Otherwise, return -EFAULT.
*/
static inline unsigned long
vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
struct vm_area_struct *vma)
static inline unsigned long vma_address(struct vm_area_struct *vma,
pgoff_t pgoff, unsigned long nr_pages)
{
unsigned long address;

View File

@ -455,7 +455,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
if (is_zone_device_page(p)) {
if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
tk->addr = vma_address(vma, fsdax_pgoff, 1);
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
} else
tk->size_shift = page_shift(compound_head(p));

View File

@ -334,7 +334,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
.flags = PVMW_SYNC,
};
pvmw.address = vma_pgoff_address(pgoff, 1, vma);
pvmw.address = vma_address(vma, pgoff, 1);
if (pvmw.address == -EFAULT)
return 0;
if (!page_vma_mapped_walk(&pvmw))

View File

@ -794,7 +794,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
/* The !page__anon_vma above handles KSM folios */
pgoff = folio->index + folio_page_idx(folio, page);
return vma_pgoff_address(pgoff, 1, vma);
return vma_address(vma, pgoff, 1);
}
/*
@ -1132,7 +1132,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
if (invalid_mkclean_vma(vma, NULL))
return 0;
pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
pvmw.address = vma_address(vma, pgoff, nr_pages);
VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
return page_vma_mkclean_one(&pvmw);
@ -2592,8 +2592,8 @@ static void rmap_walk_anon(struct folio *folio,
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_pgoff_address(pgoff_start,
folio_nr_pages(folio), vma);
unsigned long address = vma_address(vma, pgoff_start,
folio_nr_pages(folio));
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
@ -2654,8 +2654,8 @@ static void rmap_walk_file(struct folio *folio,
lookup:
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
unsigned long address = vma_pgoff_address(pgoff_start,
folio_nr_pages(folio), vma);
unsigned long address = vma_address(vma, pgoff_start,
folio_nr_pages(folio));
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();