1

mm: refactor folio_undo_large_rmappable()

Folios of order <= 1 are not in deferred list, the check of order is added
into folio_undo_large_rmappable() from commit 8897277acf ("mm: support
order-1 folios in the page cache"), but there is a repeated check for
small folio (order 0) during each call of the
folio_undo_large_rmappable(), so only keep folio_order() check inside the
function.

In addition, move all the checks into header file to save a function call
for non-large-rmappable or empty deferred_list folio.

Link: https://lkml.kernel.org/r/20240521130315.46072-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kefeng Wang 2024-05-21 21:03:15 +08:00 committed by Andrew Morton
parent 8bf890c816
commit 593a10dabe
5 changed files with 22 additions and 27 deletions

View File

@ -3258,22 +3258,11 @@ out:
return ret; return ret;
} }
void folio_undo_large_rmappable(struct folio *folio) void __folio_undo_large_rmappable(struct folio *folio)
{ {
struct deferred_split *ds_queue; struct deferred_split *ds_queue;
unsigned long flags; unsigned long flags;
if (folio_order(folio) <= 1)
return;
/*
* At this point, there is no one trying to add the folio to
* deferred_list. If folio is not in deferred_list, it's safe
* to check without acquiring the split_queue_lock.
*/
if (data_race(list_empty(&folio->_deferred_list)))
return;
ds_queue = get_deferred_split_queue(folio); ds_queue = get_deferred_split_queue(folio);
spin_lock_irqsave(&ds_queue->split_queue_lock, flags); spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (!list_empty(&folio->_deferred_list)) { if (!list_empty(&folio->_deferred_list)) {

View File

@ -622,7 +622,22 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
#endif #endif
} }
void folio_undo_large_rmappable(struct folio *folio); void __folio_undo_large_rmappable(struct folio *folio);
static inline void folio_undo_large_rmappable(struct folio *folio)
{
if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
return;
/*
* At this point, there is no one trying to add the folio to
* deferred_list. If folio is not in deferred_list, it's safe
* to check without acquiring the split_queue_lock.
*/
if (data_race(list_empty(&folio->_deferred_list)))
return;
__folio_undo_large_rmappable(folio);
}
static inline struct folio *page_rmappable_folio(struct page *page) static inline struct folio *page_rmappable_folio(struct page *page)
{ {

View File

@ -2661,8 +2661,7 @@ void free_unref_folios(struct folio_batch *folios)
unsigned long pfn = folio_pfn(folio); unsigned long pfn = folio_pfn(folio);
unsigned int order = folio_order(folio); unsigned int order = folio_order(folio);
if (order > 0 && folio_test_large_rmappable(folio)) folio_undo_large_rmappable(folio);
folio_undo_large_rmappable(folio);
if (!free_pages_prepare(&folio->page, order)) if (!free_pages_prepare(&folio->page, order))
continue; continue;
/* /*

View File

@ -123,8 +123,7 @@ void __folio_put(struct folio *folio)
} }
page_cache_release(folio); page_cache_release(folio);
if (folio_test_large(folio) && folio_test_large_rmappable(folio)) folio_undo_large_rmappable(folio);
folio_undo_large_rmappable(folio);
mem_cgroup_uncharge(folio); mem_cgroup_uncharge(folio);
free_unref_page(&folio->page, folio_order(folio)); free_unref_page(&folio->page, folio_order(folio));
} }
@ -1002,10 +1001,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
free_huge_folio(folio); free_huge_folio(folio);
continue; continue;
} }
if (folio_test_large(folio) && folio_undo_large_rmappable(folio);
folio_test_large_rmappable(folio))
folio_undo_large_rmappable(folio);
__page_cache_release(folio, &lruvec, &flags); __page_cache_release(folio, &lruvec, &flags);
if (j != i) if (j != i)

View File

@ -1439,9 +1439,7 @@ free_it:
*/ */
nr_reclaimed += nr_pages; nr_reclaimed += nr_pages;
if (folio_test_large(folio) && folio_undo_large_rmappable(folio);
folio_test_large_rmappable(folio))
folio_undo_large_rmappable(folio);
if (folio_batch_add(&free_folios, folio) == 0) { if (folio_batch_add(&free_folios, folio) == 0) {
mem_cgroup_uncharge_folios(&free_folios); mem_cgroup_uncharge_folios(&free_folios);
try_to_unmap_flush(); try_to_unmap_flush();
@ -1848,9 +1846,7 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
if (unlikely(folio_put_testzero(folio))) { if (unlikely(folio_put_testzero(folio))) {
__folio_clear_lru_flags(folio); __folio_clear_lru_flags(folio);
if (folio_test_large(folio) && folio_undo_large_rmappable(folio);
folio_test_large_rmappable(folio))
folio_undo_large_rmappable(folio);
if (folio_batch_add(&free_folios, folio) == 0) { if (folio_batch_add(&free_folios, folio) == 0) {
spin_unlock_irq(&lruvec->lru_lock); spin_unlock_irq(&lruvec->lru_lock);
mem_cgroup_uncharge_folios(&free_folios); mem_cgroup_uncharge_folios(&free_folios);