Re: [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()

From: Peter Zijlstra
Date: Tue Apr 18 2017 - 09:04:05 EST


On Tue, Apr 18, 2017 at 12:32:15PM +0200, Peter Zijlstra wrote:
> XXX maybe add an assertion that it is taken.

Compile tested only..

---
Subject: hotplug,lockdep: Verify the hotplut lock assumptions
From: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Date: Tue Apr 18 14:49:09 CEST 2017

With the recent work to avoid recursive hotplug read locks, various code
paths grew the assumption that hotplug lock is held.

Make sure to verify these assumptions to avoid future broken code.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
include/linux/cpu.h | 2 ++
kernel/cpu.c | 7 +++++++
kernel/jump_label.c | 2 ++
kernel/padata.c | 3 +--
kernel/stop_machine.c | 2 ++
5 files changed, 14 insertions(+), 2 deletions(-)

--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -105,6 +105,7 @@ extern void cpu_hotplug_begin(void);
extern void cpu_hotplug_done(void);
extern void get_online_cpus(void);
extern void put_online_cpus(void);
+extern void lockdep_assert_hotplug_held(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
@@ -118,6 +119,7 @@ static inline void cpu_hotplug_done(void
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
+static inline void lockdep_assert_hotplug_held(void) {}
#endif /* CONFIG_HOTPLUG_CPU */

#ifdef CONFIG_PM_SLEEP_SMP
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -229,6 +229,11 @@ void cpu_hotplug_done(void)
percpu_up_write(&cpu_hotplug_lock);
}

+void lockdep_assert_hotplug_held(void)
+{
+ percpu_rwsem_assert_held(&cpu_hotplug_lock);
+}
+
/*
* Wait for currently running CPU hotplug operations to complete (if any) and
* disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
@@ -1398,6 +1403,8 @@ int __cpuhp_setup_state_locked(enum cpuh
int cpu, ret = 0;
bool dynstate;

+ lockdep_assert_hotplug_held();
+
if (cpuhp_cb_check(state) || !name)
return -EINVAL;

--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -130,6 +130,7 @@ void __static_key_slow_inc(struct static
* the all CPUs, for that to be serialized against CPU hot-plug
* we need to avoid CPUs coming online.
*/
+ lockdep_assert_hotplug_held();
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
@@ -158,6 +159,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc_no
static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit, struct delayed_work *work)
{
+ lockdep_assert_hotplug_held();
/*
* The negative count check is valid even when a negative
* key->enabled is in use by static_key_slow_inc(); a
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -1008,11 +1008,10 @@ static struct padata_instance *padata_al
* parallel workers.
*
* @wq: workqueue to use for the allocated padata instance
- *
- * Must be called from a get_online_cpus() protected region
*/
struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
{
+ lockdep_assert_hotplug_held();
return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
}
EXPORT_SYMBOL(padata_alloc_possible);
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -561,6 +561,8 @@ int stop_machine_locked(cpu_stop_fn_t fn
.active_cpus = cpus,
};

+ lockdep_assert_hotplug_held();
+
if (!stop_machine_initialized) {
/*
* Handle the case where stop_machine() is called