[PATCH 5.9 58/75] mm: memcg/slab: fix obj_cgroup_charge() return value handling

From: Greg Kroah-Hartman
Date: Thu Dec 10 2020 - 10:08:44 EST


From: Roman Gushchin <guro@xxxxxx>

commit becaba65f62f88e553ec92ed98370e9d2b18e629 upstream.

Commit 10befea91b61 ("mm: memcg/slab: use a single set of kmem_caches
for all allocations") introduced a regression into the handling of the
obj_cgroup_charge() return value. If a non-zero value is returned
(indicating of exceeding one of memory.max limits), the allocation
should fail, instead of falling back to non-accounted mode.

To make the code more readable, move memcg_slab_pre_alloc_hook() and
memcg_slab_post_alloc_hook() calling conditions into bodies of these
hooks.

Fixes: 10befea91b61 ("mm: memcg/slab: use a single set of kmem_caches for all allocations")
Signed-off-by: Roman Gushchin <guro@xxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Reviewed-by: Shakeel Butt <shakeelb@xxxxxxxxxx>
Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: <stable@xxxxxxxxxxxxxxx>
Link: https://lkml.kernel.org/r/20201127161828.GD840171@xxxxxxxxxxxxxxxxxxxxxxxxxxx
Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
mm/slab.h | 42 +++++++++++++++++++++++++-----------------
1 file changed, 25 insertions(+), 17 deletions(-)

--- a/mm/slab.h
+++ b/mm/slab.h
@@ -275,25 +275,35 @@ static inline size_t obj_full_size(struc
return s->size + sizeof(struct obj_cgroup *);
}

-static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
- size_t objects,
- gfp_t flags)
+/*
+ * Returns false if the allocation should fail.
+ */
+static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup **objcgp,
+ size_t objects, gfp_t flags)
{
struct obj_cgroup *objcg;

+ if (!memcg_kmem_enabled())
+ return true;
+
+ if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
+ return true;
+
if (memcg_kmem_bypass())
- return NULL;
+ return true;

objcg = get_obj_cgroup_from_current();
if (!objcg)
- return NULL;
+ return true;

if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
obj_cgroup_put(objcg);
- return NULL;
+ return false;
}

- return objcg;
+ *objcgp = objcg;
+ return true;
}

static inline void mod_objcg_state(struct obj_cgroup *objcg,
@@ -319,7 +329,7 @@ static inline void memcg_slab_post_alloc
unsigned long off;
size_t i;

- if (!objcg)
+ if (!memcg_kmem_enabled() || !objcg)
return;

flags &= ~__GFP_ACCOUNT;
@@ -404,11 +414,11 @@ static inline void memcg_free_page_obj_c
{
}

-static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
- size_t objects,
- gfp_t flags)
+static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup **objcgp,
+ size_t objects, gfp_t flags)
{
- return NULL;
+ return true;
}

static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
@@ -512,9 +522,8 @@ static inline struct kmem_cache *slab_pr
if (should_failslab(s, flags))
return NULL;

- if (memcg_kmem_enabled() &&
- ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
- *objcgp = memcg_slab_pre_alloc_hook(s, size, flags);
+ if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
+ return NULL;

return s;
}
@@ -533,8 +542,7 @@ static inline void slab_post_alloc_hook(
s->flags, flags);
}

- if (memcg_kmem_enabled())
- memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
+ memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
}

#ifndef CONFIG_SLOB