[PATCH v2 10/21] mm/slab: move struct kmem_cache_cpu declaration to slub.c

From: Vlastimil Babka
Date: Mon Nov 20 2023 - 13:35:14 EST


Nothing outside SLUB itself accesses the struct kmem_cache_cpu fields so
it does not need to be declared in slub_def.h. This allows also to move
enum stat_item.

Reviewed-by: Kees Cook <keescook@xxxxxxxxxxxx>
Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx>
---
include/linux/slub_def.h | 54 ------------------------------------------------
mm/slub.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 54 insertions(+), 54 deletions(-)

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index deb90cf4bffb..a0229ea42977 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -12,60 +12,6 @@
#include <linux/reciprocal_div.h>
#include <linux/local_lock.h>

-enum stat_item {
- ALLOC_FASTPATH, /* Allocation from cpu slab */
- ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
- FREE_FASTPATH, /* Free to cpu slab */
- FREE_SLOWPATH, /* Freeing not to cpu slab */
- FREE_FROZEN, /* Freeing to frozen slab */
- FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
- FREE_REMOVE_PARTIAL, /* Freeing removes last object */
- ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
- ALLOC_SLAB, /* Cpu slab acquired from page allocator */
- ALLOC_REFILL, /* Refill cpu slab from slab freelist */
- ALLOC_NODE_MISMATCH, /* Switching cpu slab */
- FREE_SLAB, /* Slab freed to the page allocator */
- CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
- DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
- DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
- DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
- DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
- DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
- DEACTIVATE_BYPASS, /* Implicit deactivation */
- ORDER_FALLBACK, /* Number of times fallback was necessary */
- CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
- CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
- CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
- CPU_PARTIAL_FREE, /* Refill cpu partial on free */
- CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
- CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
- NR_SLUB_STAT_ITEMS
-};
-
-#ifndef CONFIG_SLUB_TINY
-/*
- * When changing the layout, make sure freelist and tid are still compatible
- * with this_cpu_cmpxchg_double() alignment requirements.
- */
-struct kmem_cache_cpu {
- union {
- struct {
- void **freelist; /* Pointer to next available object */
- unsigned long tid; /* Globally unique transaction id */
- };
- freelist_aba_t freelist_tid;
- };
- struct slab *slab; /* The slab from which we are allocating */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- struct slab *partial; /* Partially allocated frozen slabs */
-#endif
- local_lock_t lock; /* Protects the fields above */
-#ifdef CONFIG_SLUB_STATS
- unsigned stat[NR_SLUB_STAT_ITEMS];
-#endif
-};
-#endif /* CONFIG_SLUB_TINY */
-
#ifdef CONFIG_SLUB_CPU_PARTIAL
#define slub_percpu_partial(c) ((c)->partial)

diff --git a/mm/slub.c b/mm/slub.c
index 3e01731783df..979932d046fd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -330,6 +330,60 @@ static void debugfs_slab_add(struct kmem_cache *);
static inline void debugfs_slab_add(struct kmem_cache *s) { }
#endif

+enum stat_item {
+ ALLOC_FASTPATH, /* Allocation from cpu slab */
+ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
+ FREE_FASTPATH, /* Free to cpu slab */
+ FREE_SLOWPATH, /* Freeing not to cpu slab */
+ FREE_FROZEN, /* Freeing to frozen slab */
+ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
+ FREE_REMOVE_PARTIAL, /* Freeing removes last object */
+ ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
+ ALLOC_SLAB, /* Cpu slab acquired from page allocator */
+ ALLOC_REFILL, /* Refill cpu slab from slab freelist */
+ ALLOC_NODE_MISMATCH, /* Switching cpu slab */
+ FREE_SLAB, /* Slab freed to the page allocator */
+ CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
+ DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
+ DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
+ DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
+ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
+ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+ DEACTIVATE_BYPASS, /* Implicit deactivation */
+ ORDER_FALLBACK, /* Number of times fallback was necessary */
+ CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */
+ CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */
+ CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
+ CPU_PARTIAL_FREE, /* Refill cpu partial on free */
+ CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
+ CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
+ NR_SLUB_STAT_ITEMS
+};
+
+#ifndef CONFIG_SLUB_TINY
+/*
+ * When changing the layout, make sure freelist and tid are still compatible
+ * with this_cpu_cmpxchg_double() alignment requirements.
+ */
+struct kmem_cache_cpu {
+ union {
+ struct {
+ void **freelist; /* Pointer to next available object */
+ unsigned long tid; /* Globally unique transaction id */
+ };
+ freelist_aba_t freelist_tid;
+ };
+ struct slab *slab; /* The slab from which we are allocating */
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ struct slab *partial; /* Partially allocated frozen slabs */
+#endif
+ local_lock_t lock; /* Protects the fields above */
+#ifdef CONFIG_SLUB_STATS
+ unsigned int stat[NR_SLUB_STAT_ITEMS];
+#endif
+};
+#endif /* CONFIG_SLUB_TINY */
+
static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS

--
2.42.1