Re: [PATCH] paravirt/locks: avoid modifying static key before jump_label_init()

From: Dou Liyang
Date: Wed Oct 25 2017 - 00:26:19 EST


Hi Juergen,

At 10/23/2017 09:49 PM, Juergen Gross wrote:
Don't try to set the static virt_spin_lock_key to a value before
jump_label_init() has been called, as this will result in a WARN().

Solve the problem by introducing a new lock_init() hook called after
jump_label_init() instead of doing the call inside of
smp_prepare_boot_cpu().

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
Based on kernel/git/tip/tip.git locking/core

I also found that WARN() in tip tree.

IMO, adding a hook in start_kernel() is not elegant. It will
affect other arches and increase the complexity of the system.

I like your original method.
So, I try to fix it by moving the native_pv_lock_init() from
native_smp_prepare_boot_cpu() to native_smp_prepare_cpus().

I hope it's useful to you.

Thanks,
dou.

==================<
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index aed1460..6b1335a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1323,6 +1323,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
pr_info("CPU0: ");
print_cpu_info(&cpu_data(0));

+ native_pv_lock_init();
+
uv_system_init();

set_mtrr_aps_delayed_init();
@@ -1350,7 +1352,6 @@ void __init native_smp_prepare_boot_cpu(void)
/* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask);
cpu_set_state_online(me);
- native_pv_lock_init();
}

void __init native_smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 5147140..570b2bc 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -236,6 +236,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
xen_raw_printk(m);
panic(m);
}
+ native_pv_lock_init();
+
xen_init_lock_cpu(0);

smp_store_boot_cpu_info();
---
arch/x86/include/asm/qspinlock.h | 5 +----
arch/x86/kernel/smpboot.c | 1 -
include/asm-generic/qspinlock.h | 6 ++++++
include/linux/spinlock.h | 4 ++++
include/linux/spinlock_up.h | 4 ++++
init/main.c | 2 ++
kernel/locking/spinlock.c | 7 +++++++
7 files changed, 24 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 308dfd0714c7..a53ef9ed0dc1 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -49,6 +49,7 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
#ifdef CONFIG_PARAVIRT
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);

+#define native_pv_lock_init native_pv_lock_init
void native_pv_lock_init(void) __init;

#define virt_spin_lock virt_spin_lock
@@ -70,10 +71,6 @@ static inline bool virt_spin_lock(struct qspinlock *lock)

return true;
}
-#else
-static inline void native_pv_lock_init(void)
-{
-}
#endif /* CONFIG_PARAVIRT */

#include <asm-generic/qspinlock.h>
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 361f91674ce5..55a3121dd479 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1385,7 +1385,6 @@ void __init native_smp_prepare_boot_cpu(void)
/* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask);
cpu_set_state_online(me);
- native_pv_lock_init();
}

void __init native_smp_cpus_done(unsigned int max_cpus)
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 66260777d644..42784a353401 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -111,6 +111,12 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
}
#endif

+#ifndef native_pv_lock_init
+static __always_inline void native_pv_lock_init(void)
+{
+}
+#endif
+
/*
* Remapping spinlock architecture specific functions to the corresponding
* queued spinlock functions.
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 69e079c5ff98..6654ff285e5c 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -420,4 +420,8 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))

+#ifdef CONFIG_SMP
+void lock_init(void) __init;
+#endif
+
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 612fb530af41..bc4787900ad7 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -80,4 +80,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define arch_read_can_lock(lock) (((void)(lock), 1))
#define arch_write_can_lock(lock) (((void)(lock), 1))

+static inline lock_init(void)
+{
+}
+
#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/init/main.c b/init/main.c
index 0ee9c6866ada..e5c9f9bcd311 100644
--- a/init/main.c
+++ b/init/main.c
@@ -88,6 +88,7 @@
#include <linux/io.h>
#include <linux/cache.h>
#include <linux/rodata_test.h>
+#include <linux/spinlock.h>

#include <asm/io.h>
#include <asm/bugs.h>
@@ -567,6 +568,7 @@ asmlinkage __visible void __init start_kernel(void)
sort_main_extable();
trap_init();
mm_init();
+ lock_init();

ftrace_init();

diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 4b082b5cac9e..f086e444c2ac 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -397,3 +397,10 @@ notrace int in_lock_functions(unsigned long addr)
&& addr < (unsigned long)__lock_text_end;
}
EXPORT_SYMBOL(in_lock_functions);
+
+void __init lock_init(void)
+{
+#ifdef CONFIG_QUEUED_SPINLOCKS
+ native_pv_lock_init();
+#endif
+}