[PATCH RFC v3 2/9] mm/slub: introduce __kmem_cache_free_bulk() without free hooks

From: Vlastimil Babka
Date: Wed Nov 29 2023 - 04:54:43 EST


Currently, when __kmem_cache_alloc_bulk() fails, it frees back the
objects that were allocated before the failure, using
kmem_cache_free_bulk(). Because kmem_cache_free_bulk() calls the free
hooks (kasan etc.) and those expect objects processed by the post alloc
hooks, slab_post_alloc_hook() is called before kmem_cache_free_bulk().

This is wasteful, although not a big concern in practice for the very
rare error path. But in order to efficiently handle percpu array batch
refill and free in the following patch, we will also need a variant of
kmem_cache_free_bulk() that avoids the free hooks. So introduce it first
and use it in the error path too.

As a consequence, __kmem_cache_alloc_bulk() no longer needs the objcg
parameter, remove it.

Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx>
---
mm/slub.c | 33 ++++++++++++++++++++++++++-------
1 file changed, 26 insertions(+), 7 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index f0cd55bb4e11..16748aeada8f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3919,6 +3919,27 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
return same;
}

+/*
+ * Internal bulk free of objects that were not initialised by the post alloc
+ * hooks and thus should not be processed by the free hooks
+ */
+static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+{
+ if (!size)
+ return;
+
+ do {
+ struct detached_freelist df;
+
+ size = build_detached_freelist(s, size, p, &df);
+ if (!df.slab)
+ continue;
+
+ do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
+ _RET_IP_);
+ } while (likely(size));
+}
+
/* Note that interrupts must be enabled when calling this function. */
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{
@@ -3940,7 +3961,7 @@ EXPORT_SYMBOL(kmem_cache_free_bulk);

#ifndef CONFIG_SLUB_TINY
static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p, struct obj_cgroup *objcg)
+ size_t size, void **p)
{
struct kmem_cache_cpu *c;
unsigned long irqflags;
@@ -4004,14 +4025,13 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,

error:
slub_put_cpu_ptr(s->cpu_slab);
- slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
- kmem_cache_free_bulk(s, i, p);
+ __kmem_cache_free_bulk(s, i, p);
return 0;

}
#else /* CONFIG_SLUB_TINY */
static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p, struct obj_cgroup *objcg)
+ size_t size, void **p)
{
int i;

@@ -4034,8 +4054,7 @@ static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
return i;

error:
- slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
- kmem_cache_free_bulk(s, i, p);
+ __kmem_cache_free_bulk(s, i, p);
return 0;
}
#endif /* CONFIG_SLUB_TINY */
@@ -4055,7 +4074,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
if (unlikely(!s))
return 0;

- i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
+ i = __kmem_cache_alloc_bulk(s, flags, size, p);

/*
* memcg and kmem_cache debug support and memory initialization.

--
2.43.0