[patch V2 22/24] jump_label: Provide static_key_slow_inc_cpuslocked()

From: Thomas Gleixner
Date: Tue Apr 18 2017 - 15:52:41 EST


Provide static_key_slow_inc_cpuslocked(), a variant that doesn't take
cpu_hotplug_lock().

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: jbaron@xxxxxxxxxx
Cc: bigeasy@xxxxxxxxxxxxx
Cc: rostedt@xxxxxxxxxxx
Link: http://lkml.kernel.org/r/20170418103422.636958338@xxxxxxxxxxxxx
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

---
include/linux/jump_label.h | 3 +++
kernel/jump_label.c | 21 +++++++++++++++++----
2 files changed, 20 insertions(+), 4 deletions(-)

--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -158,6 +158,7 @@ extern void arch_jump_label_transform_st
enum jump_label_type type);
extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
+extern void static_key_slow_inc_cpuslocked(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
@@ -213,6 +214,8 @@ static inline void static_key_slow_inc(s
atomic_inc(&key->enabled);
}

+#define static_key_slow_inc_cpuslocked static_key_slow_inc
+
static inline void static_key_slow_dec(struct static_key *key)
{
STATIC_KEY_CHECK_USE();
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -101,7 +101,7 @@ void static_key_disable(struct static_ke
}
EXPORT_SYMBOL_GPL(static_key_disable);

-void static_key_slow_inc(struct static_key *key)
+void __static_key_slow_inc(struct static_key *key)
{
int v, v1;

@@ -130,7 +130,6 @@ void static_key_slow_inc(struct static_k
* the all CPUs, for that to be serialized against CPU hot-plug
* we need to avoid CPUs coming online.
*/
- get_online_cpus();
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
@@ -140,10 +139,22 @@ void static_key_slow_inc(struct static_k
atomic_inc(&key->enabled);
}
jump_label_unlock();
+}
+
+void static_key_slow_inc(struct static_key *key)
+{
+ get_online_cpus();
+ __static_key_slow_inc(key);
put_online_cpus();
}
EXPORT_SYMBOL_GPL(static_key_slow_inc);

+void static_key_slow_inc_cpuslocked(struct static_key *key)
+{
+ __static_key_slow_inc(key);
+}
+EXPORT_SYMBOL_GPL(static_key_slow_inc_cpuslocked);
+
static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit, struct delayed_work *work)
{
@@ -154,7 +165,6 @@ static void __static_key_slow_dec(struct
* returns is unbalanced, because all other static_key_slow_inc()
* instances block while the update is in progress.
*/
- get_online_cpus();
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
WARN(atomic_read(&key->enabled) < 0,
"jump label: negative count!\n");
@@ -168,20 +178,23 @@ static void __static_key_slow_dec(struct
jump_label_update(key);
}
jump_label_unlock();
- put_online_cpus();
}

static void jump_label_update_timeout(struct work_struct *work)
{
struct static_key_deferred *key =
container_of(work, struct static_key_deferred, work.work);
+ get_online_cpus();
__static_key_slow_dec(&key->key, 0, NULL);
+ put_online_cpus();
}

void static_key_slow_dec(struct static_key *key)
{
STATIC_KEY_CHECK_USE();
+ get_online_cpus();
__static_key_slow_dec(key, 0, NULL);
+ put_online_cpus();
}
EXPORT_SYMBOL_GPL(static_key_slow_dec);