[PATCH] slab: setup cpu caches later on when interrupts are enabled

From: Pekka J Enberg
Date: Fri Jun 12 2009 - 09:11:24 EST


From: Pekka Enberg <penberg@xxxxxxxxxxxxxx>

Fixes the following boot-time warning:

[ 0.000000] ------------[ cut here ]------------
[ 0.000000] WARNING: at kernel/smp.c:369 smp_call_function_many+0x56/0x1bc()
[ 0.000000] Hardware name:
[ 0.000000] Modules linked in:
[ 0.000000] Pid: 0, comm: swapper Not tainted 2.6.30 #492
[ 0.000000] Call Trace:
[ 0.000000] [<ffffffff8149e021>] ? _spin_unlock+0x4f/0x5c
[ 0.000000] [<ffffffff8108f11b>] ? smp_call_function_many+0x56/0x1bc
[ 0.000000] [<ffffffff81061764>] warn_slowpath_common+0x7c/0xa9
[ 0.000000] [<ffffffff810617a5>] warn_slowpath_null+0x14/0x16
[ 0.000000] [<ffffffff8108f11b>] smp_call_function_many+0x56/0x1bc
[ 0.000000] [<ffffffff810f3e00>] ? do_ccupdate_local+0x0/0x54
[ 0.000000] [<ffffffff810f3e00>] ? do_ccupdate_local+0x0/0x54
[ 0.000000] [<ffffffff8108f2be>] smp_call_function+0x3d/0x68
[ 0.000000] [<ffffffff810f3e00>] ? do_ccupdate_local+0x0/0x54
[ 0.000000] [<ffffffff81066fd8>] on_each_cpu+0x31/0x7c
[ 0.000000] [<ffffffff810f64f5>] do_tune_cpucache+0x119/0x454
[ 0.000000] [<ffffffff81087080>] ? lockdep_init_map+0x94/0x10b
[ 0.000000] [<ffffffff818133b0>] ? kmem_cache_init+0x421/0x593
[ 0.000000] [<ffffffff810f69cf>] enable_cpucache+0x68/0xad
[ 0.000000] [<ffffffff818133c3>] kmem_cache_init+0x434/0x593
[ 0.000000] [<ffffffff8180987c>] ? mem_init+0x156/0x161
[ 0.000000] [<ffffffff817f8aae>] start_kernel+0x1cc/0x3b9
[ 0.000000] [<ffffffff817f829a>] x86_64_start_reservations+0xaa/0xae
[ 0.000000] [<ffffffff817f837f>] x86_64_start_kernel+0xe1/0xe8
[ 0.000000] ---[ end trace 4eaa2a86a8e2da22 ]---

Cc: Christoph Lameter <cl@xxxxxxxxxxxxxxxxxxxx>
Cc: Nick Piggin <npiggin@xxxxxxx>
Signed-off-by: Pekka Enberg <penberg@xxxxxxxxxxxxxx>
---
Note: this patch depends on the "[PATCH] slab: don't enable interrupts
during early boot" one that I posted earlier in the "Re: [PATCH v2]
slab,slub: ignore __GFP_WAIT if we're booting or suspending" thread.

mm/slab.c | 37 +++++++++++++++++++------------------
1 files changed, 19 insertions(+), 18 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index c8e217c..84368b8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -759,6 +759,7 @@ static enum {
NONE,
PARTIAL_AC,
PARTIAL_L3,
+ EARLY,
FULL
} g_cpucache_up;

@@ -767,7 +768,7 @@ static enum {
*/
int slab_is_available(void)
{
- return g_cpucache_up == FULL;
+ return g_cpucache_up >= EARLY;
}

static DEFINE_PER_CPU(struct delayed_work, reap_work);
@@ -1631,19 +1632,27 @@ void __init kmem_cache_init(void)
}
}

- /* 6) resize the head arrays to their final sizes */
- {
- struct kmem_cache *cachep;
- mutex_lock(&cache_chain_mutex);
- list_for_each_entry(cachep, &cache_chain, next)
- if (enable_cpucache(cachep, GFP_NOWAIT))
- BUG();
- mutex_unlock(&cache_chain_mutex);
- }
+ g_cpucache_up = EARLY;

/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
+}
+
+void __init kmem_cache_init_late(void)
+{
+ struct kmem_cache *cachep;
+
+ /*
+ * Interrupts are enabled now so all GFP allocations are safe.
+ */
+ slab_gfp_mask = __GFP_BITS_MASK;

+ /* 6) resize the head arrays to their final sizes */
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next)
+ if (enable_cpucache(cachep, GFP_NOWAIT))
+ BUG();
+ mutex_unlock(&cache_chain_mutex);

/* Done! */
g_cpucache_up = FULL;
@@ -1660,14 +1669,6 @@ void __init kmem_cache_init(void)
*/
}

-void __init kmem_cache_init_late(void)
-{
- /*
- * Interrupts are enabled now so all GFP allocations are safe.
- */
- slab_gfp_mask = __GFP_BITS_MASK;
-}
-
static int __init cpucache_init(void)
{
int cpu;
--
1.6.0.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/