1

kasan: revert eviction of stack traces in generic mode

This partially reverts commits cc478e0b6b, 63b85ac56a, 08d7c94d96,
a414d4286f, and 773688a6cb to make use of variable-sized stack depot
records, since eviction of stack entries from stack depot forces fixed-
sized stack records.  Care was taken to retain the code cleanups by the
above commits.

Eviction was added to generic KASAN as a response to alleviating the
additional memory usage from fixed-sized stack records, but this still
uses more memory than previously.

With the re-introduction of variable-sized records for stack depot, we can
just switch back to non-evictable stack records again, and return back to
the previous performance and memory usage baseline.

Before (observed after a KASAN kernel boot):

  pools: 597
  refcounted_allocations: 17547
  refcounted_frees: 6477
  refcounted_in_use: 11070
  freelist_size: 3497
  persistent_count: 12163
  persistent_bytes: 1717008

After:

  pools: 319
  refcounted_allocations: 0
  refcounted_frees: 0
  refcounted_in_use: 0
  freelist_size: 0
  persistent_count: 29397
  persistent_bytes: 5183536

As can be seen from the counters, with a generic KASAN config, refcounted
allocations and evictions are no longer used.  Due to using variable-sized
records, I observe a reduction of 278 stack depot pools (saving 4448 KiB)
with my test setup.

Link: https://lkml.kernel.org/r/20240129100708.39460-2-elver@google.com
Fixes: cc478e0b6b ("kasan: avoid resetting aux_lock")
Fixes: 63b85ac56a ("kasan: stop leaking stack trace handles")
Fixes: 08d7c94d96 ("kasan: memset free track in qlink_free")
Fixes: a414d4286f ("kasan: handle concurrent kasan_record_aux_stack calls")
Fixes: 773688a6cb ("kasan: use stack_depot_put for Generic mode")
Signed-off-by: Marco Elver <elver@google.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Tested-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Marco Elver 2024-01-29 11:07:02 +01:00 committed by Andrew Morton
parent 31639fd6ce
commit 711d349174
4 changed files with 14 additions and 77 deletions

View File

@ -65,8 +65,7 @@ void kasan_save_track(struct kasan_track *track, gfp_t flags)
{
depot_stack_handle_t stack;
stack = kasan_save_stack(flags,
STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
kasan_set_track(track, stack);
}
@ -266,10 +265,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
return true;
/*
* If the object is not put into quarantine, it will likely be quickly
* reallocated. Thus, release its metadata now.
* Note: Keep per-object metadata to allow KASAN print stack traces for
* use-after-free-before-realloc bugs.
*/
kasan_release_object_meta(cache, object);
/* Let slab put the object onto the freelist. */
return false;

View File

@ -485,16 +485,6 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
if (alloc_meta) {
/* Zero out alloc meta to mark it as invalid. */
__memset(alloc_meta, 0, sizeof(*alloc_meta));
/*
* Prepare the lock for saving auxiliary stack traces.
* Temporarily disable KASAN bug reporting to allow instrumented
* raw_spin_lock_init to access aux_lock, which resides inside
* of a redzone.
*/
kasan_disable_current();
raw_spin_lock_init(&alloc_meta->aux_lock);
kasan_enable_current();
}
/*
@ -506,18 +496,8 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
static void release_alloc_meta(struct kasan_alloc_meta *meta)
{
/* Evict the stack traces from stack depot. */
stack_depot_put(meta->alloc_track.stack);
stack_depot_put(meta->aux_stack[0]);
stack_depot_put(meta->aux_stack[1]);
/*
* Zero out alloc meta to mark it as invalid but keep aux_lock
* initialized to avoid having to reinitialize it when another object
* is allocated in the same slot.
*/
__memset(&meta->alloc_track, 0, sizeof(meta->alloc_track));
__memset(meta->aux_stack, 0, sizeof(meta->aux_stack));
/* Zero out alloc meta to mark it as invalid. */
__memset(meta, 0, sizeof(*meta));
}
static void release_free_meta(const void *object, struct kasan_free_meta *meta)
@ -529,27 +509,10 @@ static void release_free_meta(const void *object, struct kasan_free_meta *meta)
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
return;
/* Evict the stack trace from the stack depot. */
stack_depot_put(meta->free_track.stack);
/* Mark free meta as invalid. */
*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
}
void kasan_release_object_meta(struct kmem_cache *cache, const void *object)
{
struct kasan_alloc_meta *alloc_meta;
struct kasan_free_meta *free_meta;
alloc_meta = kasan_get_alloc_meta(cache, object);
if (alloc_meta)
release_alloc_meta(alloc_meta);
free_meta = kasan_get_free_meta(cache, object);
if (free_meta)
release_free_meta(object, free_meta);
}
size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
{
struct kasan_cache *info = &cache->kasan_info;
@ -574,8 +537,6 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta;
void *object;
depot_stack_handle_t new_handle, old_handle;
unsigned long flags;
if (is_kfence_address(addr) || !slab)
return;
@ -586,33 +547,18 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
if (!alloc_meta)
return;
new_handle = kasan_save_stack(0, depot_flags);
/*
* Temporarily disable KASAN bug reporting to allow instrumented
* spinlock functions to access aux_lock, which resides inside of a
* redzone.
*/
kasan_disable_current();
raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags);
old_handle = alloc_meta->aux_stack[1];
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = new_handle;
raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags);
kasan_enable_current();
stack_depot_put(old_handle);
alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
}
void kasan_record_aux_stack(void *addr)
{
return __kasan_record_aux_stack(addr,
STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
}
void kasan_record_aux_stack_noalloc(void *addr)
{
return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
return __kasan_record_aux_stack(addr, 0);
}
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
@ -623,7 +569,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
if (!alloc_meta)
return;
/* Evict previous stack traces (might exist for krealloc or mempool). */
/* Invalidate previous stack traces (might exist for krealloc or mempool). */
release_alloc_meta(alloc_meta);
kasan_save_track(&alloc_meta->alloc_track, flags);
@ -637,7 +583,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object)
if (!free_meta)
return;
/* Evict previous stack trace (might exist for mempool). */
/* Invalidate previous stack trace (might exist for mempool). */
release_free_meta(object, free_meta);
kasan_save_track(&free_meta->free_track, 0);

View File

@ -6,7 +6,6 @@
#include <linux/kasan.h>
#include <linux/kasan-tags.h>
#include <linux/kfence.h>
#include <linux/spinlock.h>
#include <linux/stackdepot.h>
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@ -265,13 +264,6 @@ struct kasan_global {
struct kasan_alloc_meta {
struct kasan_track alloc_track;
/* Free track is stored in kasan_free_meta. */
/*
* aux_lock protects aux_stack from accesses from concurrent
* kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping
* on RT kernels, as kasan_record_aux_stack_noalloc can be called from
* non-sleepable contexts.
*/
raw_spinlock_t aux_lock;
depot_stack_handle_t aux_stack[2];
};
@ -398,10 +390,8 @@ struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object);
void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
void kasan_release_object_meta(struct kmem_cache *cache, const void *object);
#else
static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
static inline void kasan_release_object_meta(struct kmem_cache *cache, const void *object) { }
#endif
depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags);

View File

@ -145,7 +145,10 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
void *object = qlink_to_object(qlink, cache);
struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object);
kasan_release_object_meta(cache, object);
/*
* Note: Keep per-object metadata to allow KASAN print stack traces for
* use-after-free-before-realloc bugs.
*/
/*
* If init_on_free is enabled and KASAN's free metadata is stored in