[PATCH v4 21/22] kasan: use stack_depot_put for Generic mode

From: andrey . konovalov
Date: Mon Nov 20 2023 - 12:51:59 EST


From: Andrey Konovalov <andreyknvl@xxxxxxxxxx>

Evict alloc/free stack traces from the stack depot for Generic KASAN
once they are evicted from the quaratine.

For auxiliary stack traces, evict the oldest stack trace once a new one
is saved (KASAN only keeps references to the last two).

Also evict all saved stack traces on krealloc.

To avoid double-evicting and mis-evicting stack traces (in case KASAN's
metadata was corrupted), reset KASAN's per-object metadata that stores
stack depot handles when the object is initialized and when it's evicted
from the quarantine.

Note that stack_depot_put is no-op if the handle is 0.

Reviewed-by: Marco Elver <elver@xxxxxxxxxx>
Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
---
mm/kasan/common.c | 3 ++-
mm/kasan/generic.c | 22 ++++++++++++++++++----
mm/kasan/quarantine.c | 26 ++++++++++++++++++++------
3 files changed, 40 insertions(+), 11 deletions(-)

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 825a0240ec02..b5d8bd26fced 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -50,7 +50,8 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
void kasan_set_track(struct kasan_track *track, gfp_t flags)
{
track->pid = current->pid;
- track->stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
+ track->stack = kasan_save_stack(flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
}

#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 5d168c9afb32..50cc519e23f4 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -449,10 +449,14 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
{
struct kasan_alloc_meta *alloc_meta;
+ struct kasan_free_meta *free_meta;

alloc_meta = kasan_get_alloc_meta(cache, object);
if (alloc_meta)
__memset(alloc_meta, 0, sizeof(*alloc_meta));
+ free_meta = kasan_get_free_meta(cache, object);
+ if (free_meta)
+ __memset(free_meta, 0, sizeof(*free_meta));
}

size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
@@ -489,18 +493,20 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
if (!alloc_meta)
return;

+ stack_depot_put(alloc_meta->aux_stack[1]);
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
}

void kasan_record_aux_stack(void *addr)
{
- return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
+ return __kasan_record_aux_stack(addr,
+ STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
}

void kasan_record_aux_stack_noalloc(void *addr)
{
- return __kasan_record_aux_stack(addr, 0);
+ return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
}

void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
@@ -508,8 +514,16 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
struct kasan_alloc_meta *alloc_meta;

alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
- kasan_set_track(&alloc_meta->alloc_track, flags);
+ if (!alloc_meta)
+ return;
+
+ /* Evict previous stack traces (might exist for krealloc). */
+ stack_depot_put(alloc_meta->alloc_track.stack);
+ stack_depot_put(alloc_meta->aux_stack[0]);
+ stack_depot_put(alloc_meta->aux_stack[1]);
+ __memset(alloc_meta, 0, sizeof(*alloc_meta));
+
+ kasan_set_track(&alloc_meta->alloc_track, flags);
}

void kasan_save_free_info(struct kmem_cache *cache, void *object)
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index ca4529156735..265ca2bbe2dd 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -143,11 +143,22 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
{
void *object = qlink_to_object(qlink, cache);
- struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
+ struct kasan_alloc_meta *alloc_meta = kasan_get_alloc_meta(cache, object);
+ struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object);
unsigned long flags;

- if (IS_ENABLED(CONFIG_SLAB))
- local_irq_save(flags);
+ if (alloc_meta) {
+ stack_depot_put(alloc_meta->alloc_track.stack);
+ stack_depot_put(alloc_meta->aux_stack[0]);
+ stack_depot_put(alloc_meta->aux_stack[1]);
+ __memset(alloc_meta, 0, sizeof(*alloc_meta));
+ }
+
+ if (free_meta &&
+ *(u8 *)kasan_mem_to_shadow(object) == KASAN_SLAB_FREETRACK) {
+ stack_depot_put(free_meta->free_track.stack);
+ free_meta->free_track.stack = 0;
+ }

/*
* If init_on_free is enabled and KASAN's free metadata is stored in
@@ -157,14 +168,17 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
*/
if (slab_want_init_on_free(cache) &&
cache->kasan_info.free_meta_offset == 0)
- memzero_explicit(meta, sizeof(*meta));
+ memzero_explicit(free_meta, sizeof(*free_meta));

/*
- * As the object now gets freed from the quarantine, assume that its
- * free track is no longer valid.
+ * As the object now gets freed from the quarantine,
+ * take note that its free track is no longer exists.
*/
*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;

+ if (IS_ENABLED(CONFIG_SLAB))
+ local_irq_save(flags);
+
___cache_free(cache, object, _THIS_IP_);

if (IS_ENABLED(CONFIG_SLAB))
--
2.25.1