Re: [PATCH 3/4] workqueue: Tag bound workers with KTHREAD_IS_PER_CPU

From: Peter Zijlstra
Date: Thu Jan 14 2021 - 10:37:07 EST


On Thu, Jan 14, 2021 at 01:21:26PM +0000, Valentin Schneider wrote:
> On 14/01/21 14:12, Peter Zijlstra wrote:

> > - WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
> > - pool->attrs->cpumask) < 0);
> > + kthread_park(worker->task);
>
> Don't we still need an affinity change here, to undo what was done in
> unbind_workers()?
>
> Would something like
>
> __kthread_bind_mask(worker->task, pool->attrs->cpumask, TASK_PARKED)
>
> even work?
>
> > kthread_set_per_cpu(worker->task, true);
> > + kthread_unpark(worker->task);

Nope, look at what kthread_unpark() does, what was missing was assigning
kthread->cpu though.

The below seems to actually work. Rescuer is still a problem though.

---
include/linux/kthread.h | 2 +-
kernel/kthread.c | 14 ++++++++------
kernel/sched/core.c | 19 ++++++++++++++++++-
kernel/smpboot.c | 2 +-
kernel/workqueue.c | 22 +++++++++++++---------
5 files changed, 41 insertions(+), 18 deletions(-)

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index fdd5a52e35d8..2484ed97e72f 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -33,7 +33,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu,
const char *namefmt);

-void kthread_set_per_cpu(struct task_struct *k, bool set);
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
bool kthread_is_per_cpu(struct task_struct *k);

/**
diff --git a/kernel/kthread.c b/kernel/kthread.c
index bead90275d2b..e0e4a423f184 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -497,19 +497,21 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
return p;
}

-void kthread_set_per_cpu(struct task_struct *k, bool set)
+void kthread_set_per_cpu(struct task_struct *k, int cpu)
{
struct kthread *kthread = to_kthread(k);
if (!kthread)
return;

- if (set) {
- WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
- WARN_ON_ONCE(k->nr_cpus_allowed != 1);
- set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
- } else {
+ WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
+
+ if (cpu < 0) {
clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+ return;
}
+
+ kthread->cpu = cpu;
+ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
}

bool kthread_is_per_cpu(struct task_struct *k)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 60b257d845fa..c2fdeeb6af2b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7589,7 +7589,24 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);

rq_lock_irqsave(rq, &rf);
- BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq));
+ if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
+ struct task_struct *g, *p;
+
+ pr_crit("CPU%d nr_running=%d\n", cpu, rq->nr_running);
+ rcu_read_lock();
+ for_each_process_thread(g, p) {
+ if (task_cpu(p) != cpu)
+ continue;
+
+ if (!task_on_rq_queued(p))
+ continue;
+
+ pr_crit("\tp=%s\n", p->comm);
+ }
+ rcu_read_unlock();
+
+ WARN_ON_ONCE(1);
+ }
rq_unlock_irqrestore(rq, &rf);

calc_load_migrate(rq);
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index b0abe575a524..f25208e8df83 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -188,7 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
kfree(td);
return PTR_ERR(tsk);
}
- kthread_set_per_cpu(tsk, true);
+ kthread_set_per_cpu(tsk, cpu);
/*
* Park the thread so that it could start right on the CPU
* when it is available.
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ec0771e4a3fb..b518fd67a792 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1862,7 +1862,7 @@ static void worker_attach_to_pool(struct worker *worker,
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
else
- kthread_set_per_cpu(worker->task, true);
+ kthread_set_per_cpu(worker->task, pool->cpu);

list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
@@ -1885,7 +1885,7 @@ static void worker_detach_from_pool(struct worker *worker)

mutex_lock(&wq_pool_attach_mutex);

- kthread_set_per_cpu(worker->task, false);
+ kthread_set_per_cpu(worker->task, -1);
list_del(&worker->node);
worker->pool = NULL;

@@ -2371,6 +2371,7 @@ static int worker_thread(void *__worker)
/* tell the scheduler that this is a workqueue worker */
set_pf_worker(true);
woke_up:
+ kthread_parkme();
raw_spin_lock_irq(&pool->lock);

/* am I supposed to die? */
@@ -2428,7 +2429,7 @@ static int worker_thread(void *__worker)
move_linked_works(work, &worker->scheduled, NULL);
process_scheduled_works(worker);
}
- } while (keep_working(pool));
+ } while (keep_working(pool) && !kthread_should_park());

worker_set_flags(worker, WORKER_PREP);
sleep:
@@ -2440,9 +2441,12 @@ static int worker_thread(void *__worker)
* event.
*/
worker_enter_idle(worker);
- __set_current_state(TASK_IDLE);
+ set_current_state(TASK_IDLE);
raw_spin_unlock_irq(&pool->lock);
- schedule();
+
+ if (!kthread_should_park())
+ schedule();
+
goto woke_up;
}

@@ -4923,7 +4927,7 @@ static void unbind_workers(int cpu)
raw_spin_unlock_irq(&pool->lock);

for_each_pool_worker(worker, pool) {
- kthread_set_per_cpu(worker->task, false);
+ kthread_set_per_cpu(worker->task, -1);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
}

@@ -4978,9 +4982,9 @@ static void rebind_workers(struct worker_pool *pool)
* from CPU_ONLINE, the following shouldn't fail.
*/
for_each_pool_worker(worker, pool) {
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
- pool->attrs->cpumask) < 0);
- kthread_set_per_cpu(worker->task, true);
+ WARN_ON_ONCE(kthread_park(worker->task) < 0);
+ kthread_set_per_cpu(worker->task, pool->cpu);
+ kthread_unpark(worker->task);
}

raw_spin_lock_irq(&pool->lock);