[PATCH] slab: move kmem_cache_free to common code

From: Glauber Costa
Date: Mon Oct 22 2012 - 07:20:26 EST


In the effort of commonizing the slab allocators, it would be better if
we had a single entry-point for kmem_cache_free. At the same time, the
different layout of the allocators lead to routines that are necessarily
different.

Because we would also want the allocators to be able to free objects in
their fast paths without issuing a call instruction, we will rely on the
pre-processor to aid us.

Signed-off-by: Glauber Costa <glommer@xxxxxxxxxxxxx>
CC: Christoph Lameter <cl@xxxxxxxxx>
CC: Pekka Enberg <penberg@xxxxxxxxxx>
CC: David Rientjes <rientjes@xxxxxxxxxx>
---
mm/slab.c | 15 +++------------
mm/slab.h | 26 ++++++++++++++++++++++++++
mm/slob.c | 6 ++----
mm/slub.c | 6 ++----
4 files changed, 33 insertions(+), 20 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 98b3460..bceffcc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3903,15 +3903,8 @@ void *__kmalloc(size_t size, gfp_t flags)
EXPORT_SYMBOL(__kmalloc);
#endif

-/**
- * kmem_cache_free - Deallocate an object
- * @cachep: The cache the allocation was from.
- * @objp: The previously allocated object.
- *
- * Free an object which was previously allocated from this
- * cache.
- */
-void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+static __always_inline void
+__kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;

@@ -3921,10 +3914,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
debug_check_no_obj_freed(objp, cachep->object_size);
__cache_free(cachep, objp, __builtin_return_address(0));
local_irq_restore(flags);
-
- trace_kmem_cache_free(_RET_IP_, objp);
}
-EXPORT_SYMBOL(kmem_cache_free);
+KMEM_CACHE_FREE(__kmem_cache_free);

/**
* kfree - free previously allocated memory
diff --git a/mm/slab.h b/mm/slab.h
index 66a62d3..32063f8 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -92,4 +92,30 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos);
+
+/*
+ * What goes below for kmem_cache_free is not pretty. But because this
+ * is an extremely hot path, we would like to avoid function calls as
+ * much as we can.
+ *
+ * As ugly as it is, this is a way to guarantee that different allocators,
+ * with different layouts, and therefore, different free functions, can
+ * still live in different files and inline the whole of kmem_cache_free.
+ */
+/**
+ * kmem_cache_free - Deallocate an object
+ * @cachep: The cache the allocation was from.
+ * @objp: The previously allocated object.
+ *
+ * Free an object which was previously allocated from this
+ * cache.
+ *
+ */
+#define KMEM_CACHE_FREE(allocator_fn) \
+void kmem_cache_free(struct kmem_cache *s, void *x) \
+{ \
+ allocator_fn(s, x); \
+ trace_kmem_cache_free(_RET_IP_, x); \
+} \
+EXPORT_SYMBOL(kmem_cache_free)
#endif
diff --git a/mm/slob.c b/mm/slob.c
index 3edfeaa..1580371 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -571,7 +571,7 @@ static void kmem_rcu_free(struct rcu_head *head)
__kmem_cache_free(b, slob_rcu->size);
}

-void kmem_cache_free(struct kmem_cache *c, void *b)
+static __always_inline void do_kmem_cache_free(struct kmem_cache *c, void *b)
{
kmemleak_free_recursive(b, c->flags);
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
@@ -582,10 +582,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
} else {
__kmem_cache_free(b, c->size);
}
-
- trace_kmem_cache_free(_RET_IP_, b);
}
-EXPORT_SYMBOL(kmem_cache_free);
+KMEM_CACHE_FREE(do_kmem_cache_free);

unsigned int kmem_cache_size(struct kmem_cache *c)
{
diff --git a/mm/slub.c b/mm/slub.c
index 259bc2c..7dd41d7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2606,7 +2606,7 @@ redo:

}

-void kmem_cache_free(struct kmem_cache *s, void *x)
+static __always_inline void __kmem_cache_free(struct kmem_cache *s, void *x)
{
struct page *page;

@@ -2620,10 +2620,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
}

slab_free(s, page, x, _RET_IP_);
-
- trace_kmem_cache_free(_RET_IP_, x);
}
-EXPORT_SYMBOL(kmem_cache_free);
+KMEM_CACHE_FREE(__kmem_cache_free);

/*
* Object placement in a slab is made very easy because we always start at
--
1.7.11.7


--------------030003060004030207070807--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/