1

mm/hugetlb_vmemmap: don't synchronize_rcu() without HVO

hugetlb_vmemmap_optimize_folio() and hugetlb_vmemmap_restore_folio() are
wrappers meant to be called regardless of whether HVO is enabled. 
Therefore, they should not call synchronize_rcu().  Otherwise, it
regresses use cases not enabling HVO.

So move synchronize_rcu() to __hugetlb_vmemmap_optimize_folio() and
__hugetlb_vmemmap_restore_folio(), and call it once for each batch of
folios when HVO is enabled.

Link: https://lkml.kernel.org/r/20240719042503.2752316-1-yuzhao@google.com
Fixes: bd225530a4 ("mm/hugetlb_vmemmap: fix race with speculative PFN walkers")
Signed-off-by: Yu Zhao <yuzhao@google.com>
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202407091001.1250ad4a-oliver.sang@intel.com
Reported-by: Janosch Frank <frankja@linux.ibm.com>
Tested-by: Marc Hartmayer <mhartmay@linux.ibm.com>
Acked-by: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Yu Zhao 2024-07-18 22:25:03 -06:00 committed by Andrew Morton
parent 9eace7e8e6
commit c2a967f6ab

View File

@ -43,6 +43,8 @@ struct vmemmap_remap_walk {
#define VMEMMAP_SPLIT_NO_TLB_FLUSH BIT(0) #define VMEMMAP_SPLIT_NO_TLB_FLUSH BIT(0)
/* Skip the TLB flush when we remap the PTE */ /* Skip the TLB flush when we remap the PTE */
#define VMEMMAP_REMAP_NO_TLB_FLUSH BIT(1) #define VMEMMAP_REMAP_NO_TLB_FLUSH BIT(1)
/* synchronize_rcu() to avoid writes from page_ref_add_unless() */
#define VMEMMAP_SYNCHRONIZE_RCU BIT(2)
unsigned long flags; unsigned long flags;
}; };
@ -457,6 +459,9 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
if (!folio_test_hugetlb_vmemmap_optimized(folio)) if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0; return 0;
if (flags & VMEMMAP_SYNCHRONIZE_RCU)
synchronize_rcu();
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
vmemmap_reuse = vmemmap_start; vmemmap_reuse = vmemmap_start;
vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE; vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
@ -489,10 +494,7 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
*/ */
int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio) int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
{ {
/* avoid writes from page_ref_add_unless() while unfolding vmemmap */ return __hugetlb_vmemmap_restore_folio(h, folio, VMEMMAP_SYNCHRONIZE_RCU);
synchronize_rcu();
return __hugetlb_vmemmap_restore_folio(h, folio, 0);
} }
/** /**
@ -515,14 +517,14 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct folio *folio, *t_folio; struct folio *folio, *t_folio;
long restored = 0; long restored = 0;
long ret = 0; long ret = 0;
unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
/* avoid writes from page_ref_add_unless() while unfolding vmemmap */
synchronize_rcu();
list_for_each_entry_safe(folio, t_folio, folio_list, lru) { list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) { if (folio_test_hugetlb_vmemmap_optimized(folio)) {
ret = __hugetlb_vmemmap_restore_folio(h, folio, ret = __hugetlb_vmemmap_restore_folio(h, folio, flags);
VMEMMAP_REMAP_NO_TLB_FLUSH); /* only need to synchronize_rcu() once for each batch */
flags &= ~VMEMMAP_SYNCHRONIZE_RCU;
if (ret) if (ret)
break; break;
restored++; restored++;
@ -570,6 +572,9 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
return ret; return ret;
static_branch_inc(&hugetlb_optimize_vmemmap_key); static_branch_inc(&hugetlb_optimize_vmemmap_key);
if (flags & VMEMMAP_SYNCHRONIZE_RCU)
synchronize_rcu();
/* /*
* Very Subtle * Very Subtle
* If VMEMMAP_REMAP_NO_TLB_FLUSH is set, TLB flushing is not performed * If VMEMMAP_REMAP_NO_TLB_FLUSH is set, TLB flushing is not performed
@ -617,10 +622,7 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
{ {
LIST_HEAD(vmemmap_pages); LIST_HEAD(vmemmap_pages);
/* avoid writes from page_ref_add_unless() while folding vmemmap */ __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, VMEMMAP_SYNCHRONIZE_RCU);
synchronize_rcu();
__hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, 0);
free_vmemmap_page_list(&vmemmap_pages); free_vmemmap_page_list(&vmemmap_pages);
} }
@ -647,6 +649,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
{ {
struct folio *folio; struct folio *folio;
LIST_HEAD(vmemmap_pages); LIST_HEAD(vmemmap_pages);
unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
list_for_each_entry(folio, folio_list, lru) { list_for_each_entry(folio, folio_list, lru) {
int ret = hugetlb_vmemmap_split_folio(h, folio); int ret = hugetlb_vmemmap_split_folio(h, folio);
@ -663,14 +666,12 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all(); flush_tlb_all();
/* avoid writes from page_ref_add_unless() while folding vmemmap */
synchronize_rcu();
list_for_each_entry(folio, folio_list, lru) { list_for_each_entry(folio, folio_list, lru) {
int ret; int ret;
ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
VMEMMAP_REMAP_NO_TLB_FLUSH); /* only need to synchronize_rcu() once for each batch */
flags &= ~VMEMMAP_SYNCHRONIZE_RCU;
/* /*
* Pages to be freed may have been accumulated. If we * Pages to be freed may have been accumulated. If we
@ -684,8 +685,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all(); flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages); free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages); INIT_LIST_HEAD(&vmemmap_pages);
__hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
VMEMMAP_REMAP_NO_TLB_FLUSH);
} }
} }