Re: [PATCH 4/4] mm/slab: move slab merge from slab_common.c to slub.c

From: Hyeonggon Yoo
Date: Tue Nov 21 2023 - 03:54:53 EST


On Mon, Nov 20, 2023 at 6:13 PM <sxwjean@xxxxxx> wrote:
>
> From: Xiongwei Song <xiongwei.song@xxxxxxxxxxxxx>
>
> Since slab allocator has been removed. There is no users about slab
> merge except slub. This commit is almost to revert
> commit 423c929cbbec ("mm/slab_common: commonize slab merge logic").
>
> Also change all prefix of slab merge related functions, variables and
> definitions from "slab/SLAB" to"slub/SLUB".

Could you please elaborate a little bit?
I am not sure if I understand what the last two patches of this series
are useful for.

- Why rename variable/function/macro names?
- Why move merge related functions from slab_common.c to slub.c?
(I mean merging slab_common.c and slub.c into single file might make sense
but why move only some parts of one into the other?)

> Signed-off-by: Xiongwei Song <xiongwei.song@xxxxxxxxxxxxx>
> ---
> mm/slab.h | 3 --
> mm/slab_common.c | 98 ----------------------------------------------
> mm/slub.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++-
> 3 files changed, 99 insertions(+), 102 deletions(-)
>
> diff --git a/mm/slab.h b/mm/slab.h
> index 8d20f8c6269d..cd52e705ce28 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -429,9 +429,6 @@ extern void create_boot_cache(struct kmem_cache *, const char *name,
>
> unsigned int calculate_alignment(slab_flags_t flags,
> unsigned int align, unsigned int size);
> -int slab_unmergeable(struct kmem_cache *s);
> -struct kmem_cache *find_mergeable(unsigned size, unsigned align,
> - slab_flags_t flags, const char *name, void (*ctor)(void *));
> struct kmem_cache *
> __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
> slab_flags_t flags, void (*ctor)(void *));
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 62eb77fdedf2..6960ae5c35ee 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -45,36 +45,6 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
> static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
> slab_caches_to_rcu_destroy_workfn);
>
> -/*
> - * Set of flags that will prevent slab merging
> - */
> -#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
> - SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
> - SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge())
> -
> -#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
> - SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
> -
> -/*
> - * Merge control. If this is set then no merging of slab caches will occur.
> - */
> -static bool slub_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
> -
> -static int __init setup_slab_nomerge(char *str)
> -{
> - slub_nomerge = true;
> - return 1;
> -}
> -
> -static int __init setup_slab_merge(char *str)
> -{
> - slub_nomerge = false;
> - return 1;
> -}
> -
> -__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
> -__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
> -
> /*
> * Determine the size of a slab object
> */
> @@ -130,74 +100,6 @@ unsigned int calculate_alignment(slab_flags_t flags,
> return ALIGN(align, sizeof(void *));
> }
>
> -/*
> - * Find a mergeable slab cache
> - */
> -int slab_unmergeable(struct kmem_cache *s)
> -{
> - if (slub_nomerge || (s->flags & SLAB_NEVER_MERGE))
> - return 1;
> -
> - if (s->ctor)
> - return 1;
> -
> -#ifdef CONFIG_HARDENED_USERCOPY
> - if (s->usersize)
> - return 1;
> -#endif
> -
> - /*
> - * We may have set a slab to be unmergeable during bootstrap.
> - */
> - if (s->refcount < 0)
> - return 1;
> -
> - return 0;
> -}
> -
> -struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
> - slab_flags_t flags, const char *name, void (*ctor)(void *))
> -{
> - struct kmem_cache *s;
> -
> - if (slub_nomerge)
> - return NULL;
> -
> - if (ctor)
> - return NULL;
> -
> - size = ALIGN(size, sizeof(void *));
> - align = calculate_alignment(flags, align, size);
> - size = ALIGN(size, align);
> - flags = kmem_cache_flags(size, flags, name);
> -
> - if (flags & SLAB_NEVER_MERGE)
> - return NULL;
> -
> - list_for_each_entry_reverse(s, &slab_caches, list) {
> - if (slab_unmergeable(s))
> - continue;
> -
> - if (size > s->size)
> - continue;
> -
> - if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
> - continue;
> - /*
> - * Check if alignment is compatible.
> - * Courtesy of Adrian Drzewiecki
> - */
> - if ((s->size & ~(align - 1)) != s->size)
> - continue;
> -
> - if (s->size - size >= sizeof(void *))
> - continue;
> -
> - return s;
> - }
> - return NULL;
> -}
> -
> static struct kmem_cache *create_cache(const char *name,
> unsigned int object_size, unsigned int align,
> slab_flags_t flags, unsigned int useroffset,
> diff --git a/mm/slub.c b/mm/slub.c
> index ae1e6e635253..435d9ed140e4 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -709,6 +709,104 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
> return false;
> }
>
> +/*
> + * Set of flags that will prevent slab merging
> + */
> +#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
> + SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
> + SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge())
> +
> +#define SLUB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
> + SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
> +
> +/*
> + * Merge control. If this is set then no merging of slab caches will occur.
> + */
> +static bool slub_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
> +
> +static int __init setup_slub_nomerge(char *str)
> +{
> + slub_nomerge = true;
> + return 1;
> +}
> +
> +static int __init setup_slub_merge(char *str)
> +{
> + slub_nomerge = false;
> + return 1;
> +}
> +
> +__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
> +__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
> +
> +/*
> + * Find a mergeable slab cache
> + */
> +static inline int slub_unmergeable(struct kmem_cache *s)
> +{
> + if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
> + return 1;
> +
> + if (s->ctor)
> + return 1;
> +
> +#ifdef CONFIG_HARDENED_USERCOPY
> + if (s->usersize)
> + return 1;
> +#endif
> +
> + /*
> + * We may have set a slab to be unmergeable during bootstrap.
> + */
> + if (s->refcount < 0)
> + return 1;
> +
> + return 0;
> +}
> +
> +static struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
> + slab_flags_t flags, const char *name, void (*ctor)(void *))
> +{
> + struct kmem_cache *s;
> +
> + if (slub_nomerge)
> + return NULL;
> +
> + if (ctor)
> + return NULL;
> +
> + size = ALIGN(size, sizeof(void *));
> + align = calculate_alignment(flags, align, size);
> + size = ALIGN(size, align);
> + flags = kmem_cache_flags(size, flags, name);
> +
> + if (flags & SLUB_NEVER_MERGE)
> + return NULL;
> +
> + list_for_each_entry_reverse(s, &slab_caches, list) {
> + if (slub_unmergeable(s))
> + continue;
> +
> + if (size > s->size)
> + continue;
> +
> + if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
> + continue;
> + /*
> + * Check if alignment is compatible.
> + * Courtesy of Adrian Drzewiecki
> + */
> + if ((s->size & ~(align - 1)) != s->size)
> + continue;
> +
> + if (s->size - size >= sizeof(void *))
> + continue;
> +
> + return s;
> + }
> + return NULL;
> +}
> +
> #ifdef CONFIG_SLUB_DEBUG
> static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
> static DEFINE_SPINLOCK(object_map_lock);
> @@ -6679,7 +6777,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
> int err;
> const char *name;
> struct kset *kset = cache_kset(s);
> - int unmergeable = slab_unmergeable(s);
> + int unmergeable = slub_unmergeable(s);
>
> if (!unmergeable && disable_higher_order_debug &&
> (slub_debug & DEBUG_METADATA_FLAGS))
> --
> 2.34.1
>