codetag: debug: introduce OBJEXTS_ALLOC_FAIL to mark failed slab_ext allocations
If slabobj_ext vector allocation for a slab object fails and later on it succeeds for another object in the same slab, the slabobj_ext for the original object will be NULL and will be flagged in case when CONFIG_MEM_ALLOC_PROFILING_DEBUG is enabled. Mark failed slabobj_ext vector allocations using a new objext_flags flag stored in the lower bits of slab->obj_exts. When new allocation succeeds it marks all tag references in the same slabobj_ext vector as empty to avoid warnings implemented by CONFIG_MEM_ALLOC_PROFILING_DEBUG checks. Link: https://lkml.kernel.org/r/20240321163705.3067592-36-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Tested-by: Kees Cook <keescook@chromium.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alex Gaynor <alex.gaynor@gmail.com> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andreas Hindborg <a.hindborg@samsung.com> Cc: Benno Lossin <benno.lossin@proton.me> Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Gary Guo <gary@garyguo.net> Cc: Kent Overstreet <kent.overstreet@linux.dev> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d224eb0287
commit
09c46563ff
@ -366,8 +366,10 @@ enum page_memcg_data_flags {
|
|||||||
#endif /* CONFIG_MEMCG */
|
#endif /* CONFIG_MEMCG */
|
||||||
|
|
||||||
enum objext_flags {
|
enum objext_flags {
|
||||||
|
/* slabobj_ext vector failed to allocate */
|
||||||
|
OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG,
|
||||||
/* the next bit after the last actual flag */
|
/* the next bit after the last actual flag */
|
||||||
__NR_OBJEXTS_FLAGS = __FIRST_OBJEXT_FLAG,
|
__NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
|
#define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
|
||||||
|
48
mm/slub.c
48
mm/slub.c
@ -1891,9 +1891,33 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void mark_failed_objexts_alloc(struct slab *slab)
|
||||||
|
{
|
||||||
|
slab->obj_exts = OBJEXTS_ALLOC_FAIL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
|
||||||
|
struct slabobj_ext *vec, unsigned int objects)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If vector previously failed to allocate then we have live
|
||||||
|
* objects with no tag reference. Mark all references in this
|
||||||
|
* vector as empty to avoid warnings later on.
|
||||||
|
*/
|
||||||
|
if (obj_exts & OBJEXTS_ALLOC_FAIL) {
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
for (i = 0; i < objects; i++)
|
||||||
|
set_codetag_empty(&vec[i].ref);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
|
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
|
||||||
|
|
||||||
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
|
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
|
||||||
|
static inline void mark_failed_objexts_alloc(struct slab *slab) {}
|
||||||
|
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
|
||||||
|
struct slabobj_ext *vec, unsigned int objects) {}
|
||||||
|
|
||||||
#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
|
#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
|
||||||
|
|
||||||
@ -1909,29 +1933,37 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
|||||||
gfp_t gfp, bool new_slab)
|
gfp_t gfp, bool new_slab)
|
||||||
{
|
{
|
||||||
unsigned int objects = objs_per_slab(s, slab);
|
unsigned int objects = objs_per_slab(s, slab);
|
||||||
unsigned long obj_exts;
|
unsigned long new_exts;
|
||||||
void *vec;
|
unsigned long old_exts;
|
||||||
|
struct slabobj_ext *vec;
|
||||||
|
|
||||||
gfp &= ~OBJCGS_CLEAR_MASK;
|
gfp &= ~OBJCGS_CLEAR_MASK;
|
||||||
/* Prevent recursive extension vector allocation */
|
/* Prevent recursive extension vector allocation */
|
||||||
gfp |= __GFP_NO_OBJ_EXT;
|
gfp |= __GFP_NO_OBJ_EXT;
|
||||||
vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
|
vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
|
||||||
slab_nid(slab));
|
slab_nid(slab));
|
||||||
if (!vec)
|
if (!vec) {
|
||||||
return -ENOMEM;
|
/* Mark vectors which failed to allocate */
|
||||||
|
if (new_slab)
|
||||||
|
mark_failed_objexts_alloc(slab);
|
||||||
|
|
||||||
obj_exts = (unsigned long)vec;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
new_exts = (unsigned long)vec;
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
obj_exts |= MEMCG_DATA_OBJEXTS;
|
new_exts |= MEMCG_DATA_OBJEXTS;
|
||||||
#endif
|
#endif
|
||||||
|
old_exts = slab->obj_exts;
|
||||||
|
handle_failed_objexts_alloc(old_exts, vec, objects);
|
||||||
if (new_slab) {
|
if (new_slab) {
|
||||||
/*
|
/*
|
||||||
* If the slab is brand new and nobody can yet access its
|
* If the slab is brand new and nobody can yet access its
|
||||||
* obj_exts, no synchronization is required and obj_exts can
|
* obj_exts, no synchronization is required and obj_exts can
|
||||||
* be simply assigned.
|
* be simply assigned.
|
||||||
*/
|
*/
|
||||||
slab->obj_exts = obj_exts;
|
slab->obj_exts = new_exts;
|
||||||
} else if (cmpxchg(&slab->obj_exts, 0, obj_exts)) {
|
} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
|
||||||
/*
|
/*
|
||||||
* If the slab is already in use, somebody can allocate and
|
* If the slab is already in use, somebody can allocate and
|
||||||
* assign slabobj_exts in parallel. In this case the existing
|
* assign slabobj_exts in parallel. In this case the existing
|
||||||
|
Loading…
Reference in New Issue
Block a user