[PATCH RFC 1/3] slub: keep full slabs on list for per memcg caches

From: Vladimir Davydov
Date: Tue May 13 2014 - 09:49:04 EST


Currently full slabs are only kept on per-node lists for debugging, but
we need this feature to reparent per memcg caches, so let's enable it
for them too.

Signed-off-by: Vladimir Davydov <vdavydov@xxxxxxxxxxxxx>
---
mm/slab.h | 2 ++
mm/slub.c | 91 +++++++++++++++++++++++++++++++++++++++++--------------------
2 files changed, 63 insertions(+), 30 deletions(-)

diff --git a/mm/slab.h b/mm/slab.h
index 961a3fb1f5a2..0eca922ed7a0 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -288,6 +288,8 @@ struct kmem_cache_node {
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
atomic_long_t total_objects;
+#endif
+#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_SLUB_DEBUG)
struct list_head full;
#endif
#endif
diff --git a/mm/slub.c b/mm/slub.c
index 4d5002f518b1..6019c315a2f9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -132,6 +132,11 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#endif
}

+static inline bool kmem_cache_tracks_full(struct kmem_cache *s)
+{
+ return !is_root_cache(s) || kmem_cache_debug(s);
+}
+
/*
* Issues still to be resolved:
*
@@ -998,28 +1003,6 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
debug_check_no_obj_freed(x, s->object_size);
}

-/*
- * Tracking of fully allocated slabs for debugging purposes.
- */
-static void add_full(struct kmem_cache *s,
- struct kmem_cache_node *n, struct page *page)
-{
- if (!(s->flags & SLAB_STORE_USER))
- return;
-
- lockdep_assert_held(&n->list_lock);
- list_add(&page->lru, &n->full);
-}
-
-static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
-{
- if (!(s->flags & SLAB_STORE_USER))
- return;
-
- lockdep_assert_held(&n->list_lock);
- list_del(&page->lru);
-}
-
/* Tracking of the number of slabs for debugging purposes */
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{
@@ -1259,10 +1242,6 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, u8 val) { return 1; }
-static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
- struct page *page) {}
-static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
- struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name,
void (*ctor)(void *))
@@ -1557,6 +1536,33 @@ static inline void remove_partial(struct kmem_cache_node *n,
__remove_partial(n, page);
}

+#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_MEMCG_KMEM)
+static inline void add_full(struct kmem_cache *s,
+ struct kmem_cache_node *n, struct page *page)
+{
+ if (is_root_cache(s) && !(s->flags & SLAB_STORE_USER))
+ return;
+
+ lockdep_assert_held(&n->list_lock);
+ list_add(&page->lru, &n->full);
+}
+
+static inline void remove_full(struct kmem_cache *s,
+ struct kmem_cache_node *n, struct page *page)
+{
+ if (is_root_cache(s) && !(s->flags & SLAB_STORE_USER))
+ return;
+
+ lockdep_assert_held(&n->list_lock);
+ list_del(&page->lru);
+}
+#else
+static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
+ struct page *page) {}
+static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
+ struct page *page) {}
+#endif
+
/*
* Remove slab from the partial list, freeze it and
* return the pointer to the freelist.
@@ -1896,7 +1902,7 @@ redo:
}
} else {
m = M_FULL;
- if (kmem_cache_debug(s) && !lock) {
+ if (kmem_cache_tracks_full(s) && !lock) {
lock = 1;
/*
* This also ensures that the scanning of full
@@ -2257,8 +2263,14 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
struct page new;
unsigned long counters;
void *freelist;
+ struct kmem_cache_node *n = NULL;

do {
+ if (unlikely(n)) {
+ spin_unlock(&n->list_lock);
+ n = NULL;
+ }
+
freelist = page->freelist;
counters = page->counters;

@@ -2268,11 +2280,21 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
new.inuse = page->objects;
new.frozen = freelist != NULL;

+ if (kmem_cache_tracks_full(s) && !new.frozen) {
+ n = get_node(s, page_to_nid(page));
+ spin_lock(&n->list_lock);
+ }
+
} while (!__cmpxchg_double_slab(s, page,
freelist, counters,
NULL, new.counters,
"get_freelist"));

+ if (n) {
+ add_full(s, n, page);
+ spin_unlock(&n->list_lock);
+ }
+
return freelist;
}

@@ -2575,7 +2597,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
new.inuse--;
if ((!new.inuse || !prior) && !was_frozen) {

- if (kmem_cache_has_cpu_partial(s) && !prior) {
+ if (kmem_cache_has_cpu_partial(s) &&
+ !kmem_cache_tracks_full(s) && !prior) {

/*
* Slab was on no list before and will be
@@ -2587,6 +2610,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,

} else { /* Needs to be taken off a list */

+ if (kmem_cache_has_cpu_partial(s) && !prior)
+ new.frozen = 1;
+
n = get_node(s, page_to_nid(page));
/*
* Speculatively acquire the list_lock.
@@ -2606,6 +2632,12 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
object, new.counters,
"__slab_free"));

+ if (unlikely(n) && new.frozen && !was_frozen) {
+ remove_full(s, n, page);
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ n = NULL;
+ }
+
if (likely(!n)) {

/*
@@ -2633,8 +2665,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* then add it.
*/
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
- if (kmem_cache_debug(s))
- remove_full(s, n, page);
+ remove_full(s, n, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
--
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/