19 hotfixes. 13 are cc:stable.
There's a focus on fixes for the memfd_pin_folios() work which was added into 6.11. Apart from that, the usual shower of singleton fixes. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZvbhSAAKCRDdBJ7gKXxA jp8CAP47txk2c+tBLggog2MkQamADY5l5MT6E3fYq3ghSiKtVQEAnqX3LiQJ02tB o9LcPcVrM90QntpKrLP1CpWCVdR+zA8= =e0QC -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2024-09-27-09-45' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "19 hotfixes. 13 are cc:stable. There's a focus on fixes for the memfd_pin_folios() work which was added into 6.11. Apart from that, the usual shower of singleton fixes" * tag 'mm-hotfixes-stable-2024-09-27-09-45' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: ocfs2: fix uninit-value in ocfs2_get_block() zram: don't free statically defined names memory tiers: use default_dram_perf_ref_source in log message Revert "list: test: fix tests for list_cut_position()" kselftests: mm: fix wrong __NR_userfaultfd value compiler.h: specify correct attribute for .rodata..c_jump_table mm/damon/Kconfig: update DAMON doc URL mm: kfence: fix elapsed time for allocated/freed track ocfs2: fix deadlock in ocfs2_get_system_file_inode ocfs2: reserve space for inline xattr before attaching reflink tree mm: migrate: annotate data-race in migrate_folio_unmap() mm/hugetlb: simplify refs in memfd_alloc_folio mm/gup: fix memfd_pin_folios alloc race panic mm/gup: fix memfd_pin_folios hugetlb page allocation mm/hugetlb: fix memfd_pin_folios resv_huge_pages leak mm/hugetlb: fix memfd_pin_folios free_huge_pages leak mm/filemap: fix filemap_get_folios_contig THP panic mm: make SPLIT_PTE_PTLOCKS depend on SMP tools: fix shared radix-tree build
This commit is contained in:
commit
eee280841e
@ -2115,8 +2115,10 @@ static void zram_destroy_comps(struct zram *zram)
|
||||
zram->num_active_comps--;
|
||||
}
|
||||
|
||||
for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
|
||||
kfree(zram->comp_algs[prio]);
|
||||
for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
|
||||
/* Do not free statically defined compression algorithms */
|
||||
if (zram->comp_algs[prio] != default_compressor)
|
||||
kfree(zram->comp_algs[prio]);
|
||||
zram->comp_algs[prio] = NULL;
|
||||
}
|
||||
|
||||
|
@ -156,9 +156,8 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
|
||||
err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
|
||||
&ext_flags);
|
||||
if (err) {
|
||||
mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
|
||||
"%llu, NULL)\n", err, inode, (unsigned long long)iblock,
|
||||
(unsigned long long)p_blkno);
|
||||
mlog(ML_ERROR, "get_blocks() failed, inode: 0x%p, "
|
||||
"block: %llu\n", inode, (unsigned long long)iblock);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
|
@ -973,7 +973,13 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
|
||||
}
|
||||
|
||||
while (done < nr) {
|
||||
down_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
|
||||
rc = -EAGAIN;
|
||||
mlog(ML_ERROR,
|
||||
"Inode #%llu ip_alloc_sem is temporarily unavailable\n",
|
||||
(unsigned long long)OCFS2_I(inode)->ip_blkno);
|
||||
break;
|
||||
}
|
||||
rc = ocfs2_extent_map_get_blocks(inode, v_block + done,
|
||||
&p_block, &p_count, NULL);
|
||||
up_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "namei.h"
|
||||
#include "ocfs2_trace.h"
|
||||
#include "file.h"
|
||||
#include "symlink.h"
|
||||
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
@ -4148,8 +4149,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
|
||||
int ret;
|
||||
struct inode *inode = d_inode(old_dentry);
|
||||
struct buffer_head *new_bh = NULL;
|
||||
struct ocfs2_inode_info *oi = OCFS2_I(inode);
|
||||
|
||||
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
|
||||
if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
|
||||
ret = -EINVAL;
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
@ -4175,6 +4177,26 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) &&
|
||||
(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
|
||||
/*
|
||||
* Adjust extent record count to reserve space for extended attribute.
|
||||
* Inline data count had been adjusted in ocfs2_duplicate_inline_data().
|
||||
*/
|
||||
struct ocfs2_inode_info *new_oi = OCFS2_I(new_inode);
|
||||
|
||||
if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
|
||||
!(ocfs2_inode_is_fast_symlink(new_inode))) {
|
||||
struct ocfs2_dinode *new_di = (struct ocfs2_dinode *)new_bh->b_data;
|
||||
struct ocfs2_dinode *old_di = (struct ocfs2_dinode *)old_bh->b_data;
|
||||
struct ocfs2_extent_list *el = &new_di->id2.i_list;
|
||||
int inline_size = le16_to_cpu(old_di->i_xattr_inline_size);
|
||||
|
||||
le16_add_cpu(&el->l_count, -(inline_size /
|
||||
sizeof(struct ocfs2_extent_rec)));
|
||||
}
|
||||
}
|
||||
|
||||
ret = ocfs2_create_reflink_node(inode, old_bh,
|
||||
new_inode, new_bh, preserve);
|
||||
if (ret) {
|
||||
@ -4182,7 +4204,7 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
|
||||
goto inode_unlock;
|
||||
}
|
||||
|
||||
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
|
||||
if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
|
||||
ret = ocfs2_reflink_xattrs(inode, old_bh,
|
||||
new_inode, new_bh,
|
||||
preserve);
|
||||
|
@ -6511,16 +6511,7 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
|
||||
}
|
||||
|
||||
new_oi = OCFS2_I(args->new_inode);
|
||||
/*
|
||||
* Adjust extent record count to reserve space for extended attribute.
|
||||
* Inline data count had been adjusted in ocfs2_duplicate_inline_data().
|
||||
*/
|
||||
if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
|
||||
!(ocfs2_inode_is_fast_symlink(args->new_inode))) {
|
||||
struct ocfs2_extent_list *el = &new_di->id2.i_list;
|
||||
le16_add_cpu(&el->l_count, -(inline_size /
|
||||
sizeof(struct ocfs2_extent_rec)));
|
||||
}
|
||||
|
||||
spin_lock(&new_oi->ip_lock);
|
||||
new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
|
||||
new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
|
||||
|
@ -133,7 +133,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
|
||||
|
||||
/* Annotate a C jump table to allow objtool to follow the code flow */
|
||||
#define __annotate_jump_table __section(".rodata..c_jump_table")
|
||||
#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #")
|
||||
|
||||
#else /* !CONFIG_OBJTOOL */
|
||||
#define annotate_reachable()
|
||||
|
@ -692,6 +692,9 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
|
||||
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
|
||||
nodemask_t *nmask, gfp_t gfp_mask,
|
||||
bool allow_alloc_fallback);
|
||||
struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
|
||||
nodemask_t *nmask, gfp_t gfp_mask);
|
||||
|
||||
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
|
||||
pgoff_t idx);
|
||||
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
|
||||
@ -1059,6 +1062,13 @@ static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct folio *
|
||||
alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
|
||||
nodemask_t *nmask, gfp_t gfp_mask)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct folio *
|
||||
alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
|
||||
nodemask_t *nmask, gfp_t gfp_mask,
|
||||
|
@ -408,13 +408,10 @@ static void list_test_list_cut_position(struct kunit *test)
|
||||
|
||||
KUNIT_EXPECT_EQ(test, i, 2);
|
||||
|
||||
i = 0;
|
||||
list_for_each(cur, &list1) {
|
||||
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
|
||||
i++;
|
||||
}
|
||||
|
||||
KUNIT_EXPECT_EQ(test, i, 1);
|
||||
}
|
||||
|
||||
static void list_test_list_cut_before(struct kunit *test)
|
||||
@ -439,13 +436,10 @@ static void list_test_list_cut_before(struct kunit *test)
|
||||
|
||||
KUNIT_EXPECT_EQ(test, i, 1);
|
||||
|
||||
i = 0;
|
||||
list_for_each(cur, &list1) {
|
||||
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
|
||||
i++;
|
||||
}
|
||||
|
||||
KUNIT_EXPECT_EQ(test, i, 2);
|
||||
}
|
||||
|
||||
static void list_test_list_splice(struct kunit *test)
|
||||
|
@ -595,6 +595,7 @@ config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
|
||||
config SPLIT_PTE_PTLOCKS
|
||||
def_bool y
|
||||
depends on MMU
|
||||
depends on SMP
|
||||
depends on NR_CPUS >= 4
|
||||
depends on !ARM || CPU_CACHE_VIPT
|
||||
depends on !PARISC || PA20
|
||||
|
@ -9,7 +9,7 @@ config DAMON
|
||||
access frequency of each memory region. The information can be useful
|
||||
for performance-centric DRAM level memory management.
|
||||
|
||||
See https://damonitor.github.io/doc/html/latest-damon/index.html for
|
||||
See https://www.kernel.org/doc/html/latest/mm/damon/index.html for
|
||||
more information.
|
||||
|
||||
config DAMON_KUNIT_TEST
|
||||
|
@ -2196,6 +2196,10 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
|
||||
if (xa_is_value(folio))
|
||||
goto update_start;
|
||||
|
||||
/* If we landed in the middle of a THP, continue at its end. */
|
||||
if (xa_is_sibling(folio))
|
||||
goto update_start;
|
||||
|
||||
if (!folio_try_get(folio))
|
||||
goto retry;
|
||||
|
||||
|
1
mm/gup.c
1
mm/gup.c
@ -3700,6 +3700,7 @@ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
|
||||
ret = PTR_ERR(folio);
|
||||
if (ret != -EEXIST)
|
||||
goto err;
|
||||
folio = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
17
mm/hugetlb.c
17
mm/hugetlb.c
@ -2390,6 +2390,23 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
|
||||
return folio;
|
||||
}
|
||||
|
||||
struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
|
||||
nodemask_t *nmask, gfp_t gfp_mask)
|
||||
{
|
||||
struct folio *folio;
|
||||
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
|
||||
nmask);
|
||||
if (folio) {
|
||||
VM_BUG_ON(!h->resv_huge_pages);
|
||||
h->resv_huge_pages--;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
return folio;
|
||||
}
|
||||
|
||||
/* folio migration callback function */
|
||||
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
|
||||
nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
|
||||
|
@ -109,7 +109,7 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat
|
||||
const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
|
||||
u64 ts_sec = track->ts_nsec;
|
||||
unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
|
||||
u64 interval_nsec = local_clock() - meta->alloc_track.ts_nsec;
|
||||
u64 interval_nsec = local_clock() - track->ts_nsec;
|
||||
unsigned long rem_interval_nsec = do_div(interval_nsec, NSEC_PER_SEC);
|
||||
|
||||
/* Timestamp matches printk timestamp format. */
|
||||
|
20
mm/memfd.c
20
mm/memfd.c
@ -79,23 +79,25 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
|
||||
* alloc from. Also, the folio will be pinned for an indefinite
|
||||
* amount of time, so it is not expected to be migrated away.
|
||||
*/
|
||||
gfp_mask = htlb_alloc_mask(hstate_file(memfd));
|
||||
gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE);
|
||||
struct hstate *h = hstate_file(memfd);
|
||||
|
||||
folio = alloc_hugetlb_folio_nodemask(hstate_file(memfd),
|
||||
numa_node_id(),
|
||||
NULL,
|
||||
gfp_mask,
|
||||
false);
|
||||
if (folio && folio_try_get(folio)) {
|
||||
gfp_mask = htlb_alloc_mask(h);
|
||||
gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE);
|
||||
idx >>= huge_page_order(h);
|
||||
|
||||
folio = alloc_hugetlb_folio_reserve(h,
|
||||
numa_node_id(),
|
||||
NULL,
|
||||
gfp_mask);
|
||||
if (folio) {
|
||||
err = hugetlb_add_to_page_cache(folio,
|
||||
memfd->f_mapping,
|
||||
idx);
|
||||
if (err) {
|
||||
folio_put(folio);
|
||||
free_huge_folio(folio);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
folio_unlock(folio);
|
||||
return folio;
|
||||
}
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -768,10 +768,10 @@ int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
|
||||
pr_info(
|
||||
"memory-tiers: the performance of DRAM node %d mismatches that of the reference\n"
|
||||
"DRAM node %d.\n", nid, default_dram_perf_ref_nid);
|
||||
pr_info(" performance of reference DRAM node %d:\n",
|
||||
default_dram_perf_ref_nid);
|
||||
pr_info(" performance of reference DRAM node %d from %s:\n",
|
||||
default_dram_perf_ref_nid, default_dram_perf_ref_source);
|
||||
dump_hmem_attrs(&default_dram_perf, " ");
|
||||
pr_info(" performance of DRAM node %d:\n", nid);
|
||||
pr_info(" performance of DRAM node %d from %s:\n", nid, source);
|
||||
dump_hmem_attrs(perf, " ");
|
||||
pr_info(
|
||||
" disable default DRAM node performance based abstract distance algorithm.\n");
|
||||
|
@ -1196,7 +1196,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
|
||||
int rc = -EAGAIN;
|
||||
int old_page_state = 0;
|
||||
struct anon_vma *anon_vma = NULL;
|
||||
bool is_lru = !__folio_test_movable(src);
|
||||
bool is_lru = data_race(!__folio_test_movable(src));
|
||||
bool locked = false;
|
||||
bool dst_locked = false;
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/stat.h>
|
||||
#include <math.h>
|
||||
#include <asm-generic/unistd.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <pthread.h>
|
||||
#include <sys/resource.h>
|
||||
#include <assert.h>
|
||||
|
@ -1,4 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
#ifndef __MAPLE_SHARED_H__
|
||||
#define __MAPLE_SHARED_H__
|
||||
|
||||
#define CONFIG_DEBUG_MAPLE_TREE
|
||||
#define CONFIG_MAPLE_SEARCH
|
||||
@ -7,3 +9,5 @@
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include "linux/init.h"
|
||||
|
||||
#endif /* __MAPLE_SHARED_H__ */
|
||||
|
@ -1,4 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __SHARED_H__
|
||||
#define __SHARED_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bug.h>
|
||||
@ -31,3 +33,5 @@
|
||||
#ifndef dump_stack
|
||||
#define dump_stack() assert(0)
|
||||
#endif
|
||||
|
||||
#endif /* __SHARED_H__ */
|
||||
|
@ -15,7 +15,9 @@ SHARED_DEPS = Makefile ../shared/shared.mk ../shared/*.h generated/map-shift.h \
|
||||
../../../include/linux/maple_tree.h \
|
||||
../../../include/linux/radix-tree.h \
|
||||
../../../lib/radix-tree.h \
|
||||
../../../include/linux/idr.h
|
||||
../../../include/linux/idr.h \
|
||||
../../../lib/maple_tree.c \
|
||||
../../../lib/test_maple_tree.c
|
||||
|
||||
ifndef SHIFT
|
||||
SHIFT=3
|
||||
|
@ -1,4 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
#ifndef __XARRAY_SHARED_H__
|
||||
#define __XARRAY_SHARED_H__
|
||||
|
||||
#define XA_DEBUG
|
||||
#include "shared.h"
|
||||
|
||||
#endif /* __XARRAY_SHARED_H__ */
|
||||
|
Loading…
Reference in New Issue
Block a user