1

14 hotfixes, 6 of which are cc:stable.

All except the nilfs2 fix affect MM and all are singletons - see the
 chagelogs for details.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZmOJLgAKCRDdBJ7gKXxA
 jinQAQC0AjAhN7zuxfCb9ljCsqyyAfsWbeyXAlqdhuRt2xZONgD+Nv2XwSUw0ZUv
 xHGgPodMCrmEvuLo048qRpdJRbYo8gw=
 =sM9B
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2024-06-07-15-24' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "14 hotfixes, 6 of which are cc:stable.

  All except the nilfs2 fix affect MM and all are singletons - see the
  chagelogs for details"

* tag 'mm-hotfixes-stable-2024-06-07-15-24' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  nilfs2: fix nilfs_empty_dir() misjudgment and long loop on I/O errors
  mm: fix xyz_noprof functions calling profiled functions
  codetag: avoid race at alloc_slab_obj_exts
  mm/hugetlb: do not call vma_add_reservation upon ENOMEM
  mm/ksm: fix ksm_zero_pages accounting
  mm/ksm: fix ksm_pages_scanned accounting
  kmsan: do not wipe out origin when doing partial unpoisoning
  vmalloc: check CONFIG_EXECMEM in is_vmalloc_or_module_addr()
  mm: page_alloc: fix highatomic typing in multi-block buddies
  nilfs2: fix potential kernel bug due to lack of writeback flag waiting
  memcg: remove the lockdep assert from __mod_objcg_mlstate()
  mm: arm64: fix the out-of-bounds issue in contpte_clear_young_dirty_ptes
  mm: huge_mm: fix undefined reference to `mthp_stats' for CONFIG_SYSFS=n
  mm: drop the 'anon_' prefix for swap-out mTHP counters
This commit is contained in:
Linus Torvalds 2024-06-07 17:01:10 -07:00
commit dc772f8237
21 changed files with 115 additions and 62 deletions

View File

@ -467,11 +467,11 @@ anon_fault_fallback_charge
instead falls back to using huge pages with lower orders or instead falls back to using huge pages with lower orders or
small pages even though the allocation was successful. small pages even though the allocation was successful.
anon_swpout swpout
is incremented every time a huge page is swapped out in one is incremented every time a huge page is swapped out in one
piece without splitting. piece without splitting.
anon_swpout_fallback swpout_fallback
is incremented if a huge page has to be split before swapout. is incremented if a huge page has to be split before swapout.
Usually because failed to allocate some continuous swap space Usually because failed to allocate some continuous swap space
for the huge page. for the huge page.

View File

@ -376,7 +376,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
* clearing access/dirty for the whole block. * clearing access/dirty for the whole block.
*/ */
unsigned long start = addr; unsigned long start = addr;
unsigned long end = start + nr; unsigned long end = start + nr * PAGE_SIZE;
if (pte_cont(__ptep_get(ptep + nr - 1))) if (pte_cont(__ptep_get(ptep + nr - 1)))
end = ALIGN(end, CONT_PTE_SIZE); end = ALIGN(end, CONT_PTE_SIZE);
@ -386,7 +386,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
ptep = contpte_align_down(ptep); ptep = contpte_align_down(ptep);
} }
__clear_young_dirty_ptes(vma, start, ptep, end - start, flags); __clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags);
} }
EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes); EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);

View File

@ -607,7 +607,7 @@ int nilfs_empty_dir(struct inode *inode)
kaddr = nilfs_get_folio(inode, i, &folio); kaddr = nilfs_get_folio(inode, i, &folio);
if (IS_ERR(kaddr)) if (IS_ERR(kaddr))
continue; return 0;
de = (struct nilfs_dir_entry *)kaddr; de = (struct nilfs_dir_entry *)kaddr;
kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1); kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);

View File

@ -1652,6 +1652,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
if (bh->b_folio != bd_folio) { if (bh->b_folio != bd_folio) {
if (bd_folio) { if (bd_folio) {
folio_lock(bd_folio); folio_lock(bd_folio);
folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio); folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio); folio_start_writeback(bd_folio);
folio_unlock(bd_folio); folio_unlock(bd_folio);
@ -1665,6 +1666,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
if (bh == segbuf->sb_super_root) { if (bh == segbuf->sb_super_root) {
if (bh->b_folio != bd_folio) { if (bh->b_folio != bd_folio) {
folio_lock(bd_folio); folio_lock(bd_folio);
folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio); folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio); folio_start_writeback(bd_folio);
folio_unlock(bd_folio); folio_unlock(bd_folio);
@ -1681,6 +1683,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
} }
if (bd_folio) { if (bd_folio) {
folio_lock(bd_folio); folio_lock(bd_folio);
folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio); folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio); folio_start_writeback(bd_folio);
folio_unlock(bd_folio); folio_unlock(bd_folio);

View File

@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
mm = get_task_mm(task); mm = get_task_mm(task);
if (mm) { if (mm) {
seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items); seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages); seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages); seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm)); seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
mmput(mm); mmput(mm);

View File

@ -269,8 +269,8 @@ enum mthp_stat_item {
MTHP_STAT_ANON_FAULT_ALLOC, MTHP_STAT_ANON_FAULT_ALLOC,
MTHP_STAT_ANON_FAULT_FALLBACK, MTHP_STAT_ANON_FAULT_FALLBACK,
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
MTHP_STAT_ANON_SWPOUT, MTHP_STAT_SWPOUT,
MTHP_STAT_ANON_SWPOUT_FALLBACK, MTHP_STAT_SWPOUT_FALLBACK,
__MTHP_STAT_COUNT __MTHP_STAT_COUNT
}; };
@ -278,6 +278,7 @@ struct mthp_stat {
unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
}; };
#ifdef CONFIG_SYSFS
DECLARE_PER_CPU(struct mthp_stat, mthp_stats); DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
static inline void count_mthp_stat(int order, enum mthp_stat_item item) static inline void count_mthp_stat(int order, enum mthp_stat_item item)
@ -287,6 +288,11 @@ static inline void count_mthp_stat(int order, enum mthp_stat_item item)
this_cpu_inc(mthp_stats.stats[order][item]); this_cpu_inc(mthp_stats.stats[order][item]);
} }
#else
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
{
}
#endif
#define transparent_hugepage_use_zero_page() \ #define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \ (transparent_hugepage_flags & \

View File

@ -33,16 +33,27 @@ void __ksm_exit(struct mm_struct *mm);
*/ */
#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
extern unsigned long ksm_zero_pages; extern atomic_long_t ksm_zero_pages;
static inline void ksm_map_zero_page(struct mm_struct *mm)
{
atomic_long_inc(&ksm_zero_pages);
atomic_long_inc(&mm->ksm_zero_pages);
}
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{ {
if (is_ksm_zero_pte(pte)) { if (is_ksm_zero_pte(pte)) {
ksm_zero_pages--; atomic_long_dec(&ksm_zero_pages);
mm->ksm_zero_pages--; atomic_long_dec(&mm->ksm_zero_pages);
} }
} }
static inline long mm_ksm_zero_pages(struct mm_struct *mm)
{
return atomic_long_read(&mm->ksm_zero_pages);
}
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{ {
if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))

View File

@ -985,7 +985,7 @@ struct mm_struct {
* Represent how many empty pages are merged with kernel zero * Represent how many empty pages are merged with kernel zero
* pages when enabling KSM use_zero_pages. * pages when enabling KSM use_zero_pages.
*/ */
unsigned long ksm_zero_pages; atomic_long_t ksm_zero_pages;
#endif /* CONFIG_KSM */ #endif /* CONFIG_KSM */
#ifdef CONFIG_LRU_GEN_WALKS_MMU #ifdef CONFIG_LRU_GEN_WALKS_MMU
struct { struct {

View File

@ -1000,7 +1000,7 @@ struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
do { do {
cpuset_mems_cookie = read_mems_allowed_begin(); cpuset_mems_cookie = read_mems_allowed_begin();
n = cpuset_mem_spread_node(); n = cpuset_mem_spread_node();
folio = __folio_alloc_node(gfp, order, n); folio = __folio_alloc_node_noprof(gfp, order, n);
} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
return folio; return folio;

View File

@ -558,15 +558,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT); DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK); DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
static struct attribute *stats_attrs[] = { static struct attribute *stats_attrs[] = {
&anon_fault_alloc_attr.attr, &anon_fault_alloc_attr.attr,
&anon_fault_fallback_attr.attr, &anon_fault_fallback_attr.attr,
&anon_fault_fallback_charge_attr.attr, &anon_fault_fallback_charge_attr.attr,
&anon_swpout_attr.attr, &swpout_attr.attr,
&anon_swpout_fallback_attr.attr, &swpout_fallback_attr.attr,
NULL, NULL,
}; };

View File

@ -5768,8 +5768,20 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* do_exit() will not see it, and will keep the reservation * do_exit() will not see it, and will keep the reservation
* forever. * forever.
*/ */
if (adjust_reservation && vma_needs_reservation(h, vma, address)) if (adjust_reservation) {
vma_add_reservation(h, vma, address); int rc = vma_needs_reservation(h, vma, address);
if (rc < 0)
/* Pressumably allocate_file_region_entries failed
* to allocate a file_region struct. Clear
* hugetlb_restore_reserve so that global reserve
* count will not be incremented by free_huge_folio.
* Act as if we consumed the reservation.
*/
folio_clear_hugetlb_restore_reserve(page_folio(page));
else if (rc)
vma_add_reservation(h, vma, address);
}
tlb_remove_page_size(tlb, page, huge_page_size(h)); tlb_remove_page_size(tlb, page, huge_page_size(h));
/* /*

View File

@ -196,8 +196,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
u32 origin, bool checked) u32 origin, bool checked)
{ {
u64 address = (u64)addr; u64 address = (u64)addr;
void *shadow_start; u32 *shadow_start, *origin_start;
u32 *origin_start;
size_t pad = 0; size_t pad = 0;
KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size)); KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
@ -225,8 +224,16 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
origin_start = origin_start =
(u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN); (u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) /*
origin_start[i] = origin; * If the new origin is non-zero, assume that the shadow byte is also non-zero,
* and unconditionally overwrite the old origin slot.
* If the new origin is zero, overwrite the old origin slot iff the
* corresponding shadow slot is zero.
*/
for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
if (origin || !shadow_start[i])
origin_start[i] = origin;
}
} }
struct page *kmsan_vmalloc_to_page_or_null(void *vaddr) struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)

View File

@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly;
static bool ksm_smart_scan = true; static bool ksm_smart_scan = true;
/* The number of zero pages which is placed by KSM */ /* The number of zero pages which is placed by KSM */
unsigned long ksm_zero_pages; atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
/* The number of pages that have been skipped due to "smart scanning" */ /* The number of pages that have been skipped due to "smart scanning" */
static unsigned long ksm_pages_skipped; static unsigned long ksm_pages_skipped;
@ -1429,8 +1429,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
* the dirty bit in zero page's PTE is set. * the dirty bit in zero page's PTE is set.
*/ */
newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
ksm_zero_pages++; ksm_map_zero_page(mm);
mm->ksm_zero_pages++;
/* /*
* We're replacing an anonymous page with a zero page, which is * We're replacing an anonymous page with a zero page, which is
* not anonymous. We need to do proper accounting otherwise we * not anonymous. We need to do proper accounting otherwise we
@ -2754,18 +2753,16 @@ static void ksm_do_scan(unsigned int scan_npages)
{ {
struct ksm_rmap_item *rmap_item; struct ksm_rmap_item *rmap_item;
struct page *page; struct page *page;
unsigned int npages = scan_npages;
while (npages-- && likely(!freezing(current))) { while (scan_npages-- && likely(!freezing(current))) {
cond_resched(); cond_resched();
rmap_item = scan_get_next_rmap_item(&page); rmap_item = scan_get_next_rmap_item(&page);
if (!rmap_item) if (!rmap_item)
return; return;
cmp_and_merge_page(page, rmap_item); cmp_and_merge_page(page, rmap_item);
put_page(page); put_page(page);
ksm_pages_scanned++;
} }
ksm_pages_scanned += scan_npages - npages;
} }
static int ksmd_should_run(void) static int ksmd_should_run(void)
@ -3376,7 +3373,7 @@ static void wait_while_offlining(void)
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
long ksm_process_profit(struct mm_struct *mm) long ksm_process_profit(struct mm_struct *mm)
{ {
return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE - return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
} }
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
@ -3665,7 +3662,7 @@ KSM_ATTR_RO(pages_skipped);
static ssize_t ksm_zero_pages_show(struct kobject *kobj, static ssize_t ksm_zero_pages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
return sysfs_emit(buf, "%ld\n", ksm_zero_pages); return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
} }
KSM_ATTR_RO(ksm_zero_pages); KSM_ATTR_RO(ksm_zero_pages);
@ -3674,7 +3671,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
{ {
long general_profit; long general_profit;
general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE - general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
ksm_rmap_items * sizeof(struct ksm_rmap_item); ksm_rmap_items * sizeof(struct ksm_rmap_item);
return sysfs_emit(buf, "%ld\n", general_profit); return sysfs_emit(buf, "%ld\n", general_profit);

View File

@ -3147,8 +3147,6 @@ static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
struct lruvec *lruvec; struct lruvec *lruvec;
lockdep_assert_irqs_disabled();
rcu_read_lock(); rcu_read_lock();
memcg = obj_cgroup_memcg(objcg); memcg = obj_cgroup_memcg(objcg);
lruvec = mem_cgroup_lruvec(memcg, pgdat); lruvec = mem_cgroup_lruvec(memcg, pgdat);

View File

@ -273,7 +273,7 @@ mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
{ {
mempool_t *pool; mempool_t *pool;
pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
if (!pool) if (!pool)
return NULL; return NULL;

View File

@ -1955,10 +1955,12 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
} }
/* /*
* Reserve a pageblock for exclusive use of high-order atomic allocations if * Reserve the pageblock(s) surrounding an allocation request for
* there are no empty page blocks that contain a page with a suitable order * exclusive use of high-order atomic allocations if there are no
* empty page blocks that contain a page with a suitable order
*/ */
static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) static void reserve_highatomic_pageblock(struct page *page, int order,
struct zone *zone)
{ {
int mt; int mt;
unsigned long max_managed, flags; unsigned long max_managed, flags;
@ -1984,10 +1986,17 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
/* Yoink! */ /* Yoink! */
mt = get_pageblock_migratetype(page); mt = get_pageblock_migratetype(page);
/* Only reserve normal pageblocks (i.e., they can merge with others) */ /* Only reserve normal pageblocks (i.e., they can merge with others) */
if (migratetype_is_mergeable(mt)) if (!migratetype_is_mergeable(mt))
if (move_freepages_block(zone, page, mt, goto out_unlock;
MIGRATE_HIGHATOMIC) != -1)
zone->nr_reserved_highatomic += pageblock_nr_pages; if (order < pageblock_order) {
if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
goto out_unlock;
zone->nr_reserved_highatomic += pageblock_nr_pages;
} else {
change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
zone->nr_reserved_highatomic += 1 << order;
}
out_unlock: out_unlock:
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
@ -1999,7 +2008,7 @@ out_unlock:
* intense memory pressure but failed atomic allocations should be easier * intense memory pressure but failed atomic allocations should be easier
* to recover from than an OOM. * to recover from than an OOM.
* *
* If @force is true, try to unreserve a pageblock even though highatomic * If @force is true, try to unreserve pageblocks even though highatomic
* pageblock is exhausted. * pageblock is exhausted.
*/ */
static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
@ -2041,6 +2050,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* adjust the count once. * adjust the count once.
*/ */
if (is_migrate_highatomic(mt)) { if (is_migrate_highatomic(mt)) {
unsigned long size;
/* /*
* It should never happen but changes to * It should never happen but changes to
* locking could inadvertently allow a per-cpu * locking could inadvertently allow a per-cpu
@ -2048,9 +2058,9 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* while unreserving so be safe and watch for * while unreserving so be safe and watch for
* underflows. * underflows.
*/ */
zone->nr_reserved_highatomic -= min( size = max(pageblock_nr_pages, 1UL << order);
pageblock_nr_pages, size = min(size, zone->nr_reserved_highatomic);
zone->nr_reserved_highatomic); zone->nr_reserved_highatomic -= size;
} }
/* /*
@ -2062,11 +2072,19 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* of pageblocks that cannot be completely freed * of pageblocks that cannot be completely freed
* may increase. * may increase.
*/ */
ret = move_freepages_block(zone, page, mt, if (order < pageblock_order)
ac->migratetype); ret = move_freepages_block(zone, page, mt,
ac->migratetype);
else {
move_to_free_list(page, zone, order, mt,
ac->migratetype);
change_pageblock_range(page, order,
ac->migratetype);
ret = 1;
}
/* /*
* Reserving this block already succeeded, so this should * Reserving the block(s) already succeeded,
* not fail on zone boundaries. * so this should not fail on zone boundaries.
*/ */
WARN_ON_ONCE(ret == -1); WARN_ON_ONCE(ret == -1);
if (ret > 0) { if (ret > 0) {
@ -3406,7 +3424,7 @@ try_this_zone:
* if the pageblock should be reserved for the future * if the pageblock should be reserved for the future
*/ */
if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
reserve_highatomic_pageblock(page, zone); reserve_highatomic_pageblock(page, order, zone);
return page; return page;
} else { } else {

View File

@ -217,7 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio)
count_memcg_folio_events(folio, THP_SWPOUT, 1); count_memcg_folio_events(folio, THP_SWPOUT, 1);
count_vm_event(THP_SWPOUT); count_vm_event(THP_SWPOUT);
} }
count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT); count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
#endif #endif
count_vm_events(PSWPOUT, folio_nr_pages(folio)); count_vm_events(PSWPOUT, folio_nr_pages(folio));
} }

View File

@ -1952,7 +1952,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
new_exts |= MEMCG_DATA_OBJEXTS; new_exts |= MEMCG_DATA_OBJEXTS;
#endif #endif
old_exts = slab->obj_exts; old_exts = READ_ONCE(slab->obj_exts);
handle_failed_objexts_alloc(old_exts, vec, objects); handle_failed_objexts_alloc(old_exts, vec, objects);
if (new_slab) { if (new_slab) {
/* /*
@ -1961,7 +1961,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
* be simply assigned. * be simply assigned.
*/ */
slab->obj_exts = new_exts; slab->obj_exts = new_exts;
} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
/* /*
* If the slab is already in use, somebody can allocate and * If the slab is already in use, somebody can allocate and
* assign slabobj_exts in parallel. In this case the existing * assign slabobj_exts in parallel. In this case the existing

View File

@ -705,7 +705,7 @@ void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flag
if (oldsize >= newsize) if (oldsize >= newsize)
return (void *)p; return (void *)p;
newp = kvmalloc(newsize, flags); newp = kvmalloc_noprof(newsize, flags);
if (!newp) if (!newp)
return NULL; return NULL;
memcpy(newp, p, oldsize); memcpy(newp, p, oldsize);
@ -726,7 +726,7 @@ void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
if (unlikely(check_mul_overflow(n, size, &bytes))) if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL; return NULL;
return __vmalloc(bytes, flags); return __vmalloc_noprof(bytes, flags);
} }
EXPORT_SYMBOL(__vmalloc_array_noprof); EXPORT_SYMBOL(__vmalloc_array_noprof);
@ -737,7 +737,7 @@ EXPORT_SYMBOL(__vmalloc_array_noprof);
*/ */
void *vmalloc_array_noprof(size_t n, size_t size) void *vmalloc_array_noprof(size_t n, size_t size)
{ {
return __vmalloc_array(n, size, GFP_KERNEL); return __vmalloc_array_noprof(n, size, GFP_KERNEL);
} }
EXPORT_SYMBOL(vmalloc_array_noprof); EXPORT_SYMBOL(vmalloc_array_noprof);
@ -749,7 +749,7 @@ EXPORT_SYMBOL(vmalloc_array_noprof);
*/ */
void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
{ {
return __vmalloc_array(n, size, flags | __GFP_ZERO); return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
} }
EXPORT_SYMBOL(__vcalloc_noprof); EXPORT_SYMBOL(__vcalloc_noprof);
@ -760,7 +760,7 @@ EXPORT_SYMBOL(__vcalloc_noprof);
*/ */
void *vcalloc_noprof(size_t n, size_t size) void *vcalloc_noprof(size_t n, size_t size)
{ {
return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
} }
EXPORT_SYMBOL(vcalloc_noprof); EXPORT_SYMBOL(vcalloc_noprof);

View File

@ -722,7 +722,7 @@ int is_vmalloc_or_module_addr(const void *x)
* and fall back on vmalloc() if that fails. Others * and fall back on vmalloc() if that fails. Others
* just put it in the vmalloc space. * just put it in the vmalloc space.
*/ */
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
unsigned long addr = (unsigned long)kasan_reset_tag(x); unsigned long addr = (unsigned long)kasan_reset_tag(x);
if (addr >= MODULES_VADDR && addr < MODULES_END) if (addr >= MODULES_VADDR && addr < MODULES_END)
return 1; return 1;

View File

@ -1227,7 +1227,7 @@ retry:
THP_SWPOUT_FALLBACK, 1); THP_SWPOUT_FALLBACK, 1);
count_vm_event(THP_SWPOUT_FALLBACK); count_vm_event(THP_SWPOUT_FALLBACK);
} }
count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK); count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
#endif #endif
if (!add_to_swap(folio)) if (!add_to_swap(folio))
goto activate_locked_split; goto activate_locked_split;