mm/rmap: remove page_remove_rmap()
All callers are gone, let's remove it and some leftover traces. Link: https://lkml.kernel.org/r/20231220224504.646757-33-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Peter Xu <peterx@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Yin Fengwei <fengwei.yin@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5a0033f028
commit
4d8f7418e8
@ -241,8 +241,6 @@ void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
|
||||
folio_add_file_rmap_ptes(folio, page, 1, vma)
|
||||
void folio_add_file_rmap_pmd(struct folio *, struct page *,
|
||||
struct vm_area_struct *);
|
||||
void page_remove_rmap(struct page *, struct vm_area_struct *,
|
||||
bool compound);
|
||||
void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
|
||||
struct vm_area_struct *);
|
||||
#define folio_remove_rmap_pte(folio, page, vma) \
|
||||
@ -389,7 +387,7 @@ dup:
|
||||
*
|
||||
* This is similar to page_try_dup_anon_rmap(), however, not used during fork()
|
||||
* to duplicate a mapping, but instead to prepare for KSM or temporarily
|
||||
* unmapping a page (swap, migration) via page_remove_rmap().
|
||||
* unmapping a page (swap, migration) via folio_remove_rmap_*().
|
||||
*
|
||||
* Marking the page shared can only fail if the page may be pinned; device
|
||||
* private pages cannot get pinned and consequently this function cannot fail.
|
||||
|
10
mm/filemap.c
10
mm/filemap.c
@ -113,11 +113,11 @@
|
||||
* ->i_pages lock (try_to_unmap_one)
|
||||
* ->lruvec->lru_lock (follow_page->mark_page_accessed)
|
||||
* ->lruvec->lru_lock (check_pte_range->isolate_lru_page)
|
||||
* ->private_lock (page_remove_rmap->set_page_dirty)
|
||||
* ->i_pages lock (page_remove_rmap->set_page_dirty)
|
||||
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
|
||||
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
|
||||
* ->memcg->move_lock (page_remove_rmap->folio_memcg_lock)
|
||||
* ->private_lock (folio_remove_rmap_pte->set_page_dirty)
|
||||
* ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
|
||||
* bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
|
||||
* ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty)
|
||||
* ->memcg->move_lock (folio_remove_rmap_pte->folio_memcg_lock)
|
||||
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
|
||||
* ->inode->i_lock (zap_pte_range->set_page_dirty)
|
||||
* ->private_lock (zap_pte_range->block_dirty_folio)
|
||||
|
@ -651,7 +651,7 @@ folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
|
||||
* under page table lock for the pte/pmd being added or removed.
|
||||
*
|
||||
* mlock is usually called at the end of page_add_*_rmap(), munlock at
|
||||
* the end of page_remove_rmap(); but new anon folios are managed by
|
||||
* the end of folio_remove_rmap_*(); but new anon folios are managed by
|
||||
* folio_add_lru_vma() calling mlock_new_folio().
|
||||
*/
|
||||
void mlock_folio(struct folio *folio);
|
||||
|
@ -2315,8 +2315,8 @@ try_again:
|
||||
* We use page flags to determine what action should be taken, but
|
||||
* the flags can be modified by the error containment action. One
|
||||
* example is an mlocked page, where PG_mlocked is cleared by
|
||||
* page_remove_rmap() in try_to_unmap_one(). So to determine page status
|
||||
* correctly, we save a copy of the page flags at this time.
|
||||
* folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
|
||||
* status correctly, we save a copy of the page flags at this time.
|
||||
*/
|
||||
page_flags = p->flags;
|
||||
|
||||
|
23
mm/rmap.c
23
mm/rmap.c
@ -470,7 +470,7 @@ void __init anon_vma_init(void)
|
||||
/*
|
||||
* Getting a lock on a stable anon_vma from a page off the LRU is tricky!
|
||||
*
|
||||
* Since there is no serialization what so ever against page_remove_rmap()
|
||||
* Since there is no serialization what so ever against folio_remove_rmap_*()
|
||||
* the best this function can do is return a refcount increased anon_vma
|
||||
* that might have been relevant to this page.
|
||||
*
|
||||
@ -487,7 +487,7 @@ void __init anon_vma_init(void)
|
||||
* [ something equivalent to page_mapped_in_vma() ].
|
||||
*
|
||||
* Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
|
||||
* page_remove_rmap() that the anon_vma pointer from page->mapping is valid
|
||||
* folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid
|
||||
* if there is a mapcount, we can dereference the anon_vma after observing
|
||||
* those.
|
||||
*
|
||||
@ -1498,25 +1498,6 @@ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* page_remove_rmap - take down pte mapping from a page
|
||||
* @page: page to remove mapping from
|
||||
* @vma: the vm area from which the mapping is removed
|
||||
* @compound: uncharge the page as compound or small page
|
||||
*
|
||||
* The caller needs to hold the pte lock.
|
||||
*/
|
||||
void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
|
||||
bool compound)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
if (likely(!compound))
|
||||
folio_remove_rmap_pte(folio, page, vma);
|
||||
else
|
||||
folio_remove_rmap_pmd(folio, page, vma);
|
||||
}
|
||||
|
||||
static __always_inline void __folio_remove_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, struct vm_area_struct *vma,
|
||||
enum rmap_level level)
|
||||
|
Loading…
Reference in New Issue
Block a user