sched: Fix stop_one_cpu_nowait() vs hotplug
Kuyo reported sporadic failures on a sched_setaffinity() vs CPU hotplug stress-test -- notably affine_move_task() remains stuck in wait_for_completion(), leading to a hung-task detector warning. Specifically, it was reported that stop_one_cpu_nowait(.fn = migration_cpu_stop) returns false -- this stopper is responsible for the matching complete(). The race scenario is: CPU0 CPU1 // doing _cpu_down() __set_cpus_allowed_ptr() task_rq_lock(); takedown_cpu() stop_machine_cpuslocked(take_cpu_down..) <PREEMPT: cpu_stopper_thread() MULTI_STOP_PREPARE ... __set_cpus_allowed_ptr_locked() affine_move_task() task_rq_unlock(); <PREEMPT: cpu_stopper_thread()\> ack_state() MULTI_STOP_RUN take_cpu_down() __cpu_disable(); stop_machine_park(); stopper->enabled = false; /> /> stop_one_cpu_nowait(.fn = migration_cpu_stop); if (stopper->enabled) // false!!! That is, by doing stop_one_cpu_nowait() after dropping rq-lock, the stopper thread gets a chance to preempt and allows the cpu-down for the target CPU to complete. OTOH, since stop_one_cpu_nowait() / cpu_stop_queue_work() needs to issue a wakeup, it must not be ran under the scheduler locks. Solve this apparent contradiction by keeping preemption disabled over the unlock + queue_stopper combination: preempt_disable(); task_rq_unlock(...); if (!stop_pending) stop_one_cpu_nowait(...) preempt_enable(); This respects the lock ordering contraints while still avoiding the above race. That is, if we find the CPU is online under rq-lock, the targeted stop_one_cpu_nowait() must succeed. Apply this pattern to all similar stop_one_cpu_nowait() invocations. Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()") Reported-by: "Kuyo Chang (張建文)" <Kuyo.Chang@mediatek.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: "Kuyo Chang (張建文)" <Kuyo.Chang@mediatek.com> Link: https://lkml.kernel.org/r/20231010200442.GA16515@noisy.programming.kicks-ass.net
This commit is contained in:
parent
0c2924079f
commit
f0498d2a54
@ -2645,9 +2645,11 @@ static int migration_cpu_stop(void *data)
|
|||||||
* it.
|
* it.
|
||||||
*/
|
*/
|
||||||
WARN_ON_ONCE(!pending->stop_pending);
|
WARN_ON_ONCE(!pending->stop_pending);
|
||||||
|
preempt_disable();
|
||||||
task_rq_unlock(rq, p, &rf);
|
task_rq_unlock(rq, p, &rf);
|
||||||
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
|
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
|
||||||
&pending->arg, &pending->stop_work);
|
&pending->arg, &pending->stop_work);
|
||||||
|
preempt_enable();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
@ -2967,12 +2969,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
|
|||||||
complete = true;
|
complete = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
task_rq_unlock(rq, p, rf);
|
task_rq_unlock(rq, p, rf);
|
||||||
|
|
||||||
if (push_task) {
|
if (push_task) {
|
||||||
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
|
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
|
||||||
p, &rq->push_work);
|
p, &rq->push_work);
|
||||||
}
|
}
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
if (complete)
|
if (complete)
|
||||||
complete_all(&pending->done);
|
complete_all(&pending->done);
|
||||||
@ -3038,12 +3041,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
|
|||||||
if (flags & SCA_MIGRATE_ENABLE)
|
if (flags & SCA_MIGRATE_ENABLE)
|
||||||
p->migration_flags &= ~MDF_PUSH;
|
p->migration_flags &= ~MDF_PUSH;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
task_rq_unlock(rq, p, rf);
|
task_rq_unlock(rq, p, rf);
|
||||||
|
|
||||||
if (!stop_pending) {
|
if (!stop_pending) {
|
||||||
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
|
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
|
||||||
&pending->arg, &pending->stop_work);
|
&pending->arg, &pending->stop_work);
|
||||||
}
|
}
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
if (flags & SCA_MIGRATE_ENABLE)
|
if (flags & SCA_MIGRATE_ENABLE)
|
||||||
return 0;
|
return 0;
|
||||||
@ -9421,9 +9425,11 @@ static void balance_push(struct rq *rq)
|
|||||||
* Temporarily drop rq->lock such that we can wake-up the stop task.
|
* Temporarily drop rq->lock such that we can wake-up the stop task.
|
||||||
* Both preemption and IRQs are still disabled.
|
* Both preemption and IRQs are still disabled.
|
||||||
*/
|
*/
|
||||||
|
preempt_disable();
|
||||||
raw_spin_rq_unlock(rq);
|
raw_spin_rq_unlock(rq);
|
||||||
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
|
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
|
||||||
this_cpu_ptr(&push_work));
|
this_cpu_ptr(&push_work));
|
||||||
|
preempt_enable();
|
||||||
/*
|
/*
|
||||||
* At this point need_resched() is true and we'll take the loop in
|
* At this point need_resched() is true and we'll take the loop in
|
||||||
* schedule(). The next pick is obviously going to be the stop task
|
* schedule(). The next pick is obviously going to be the stop task
|
||||||
|
@ -2420,9 +2420,11 @@ skip:
|
|||||||
double_unlock_balance(this_rq, src_rq);
|
double_unlock_balance(this_rq, src_rq);
|
||||||
|
|
||||||
if (push_task) {
|
if (push_task) {
|
||||||
|
preempt_disable();
|
||||||
raw_spin_rq_unlock(this_rq);
|
raw_spin_rq_unlock(this_rq);
|
||||||
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
|
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
|
||||||
push_task, &src_rq->push_work);
|
push_task, &src_rq->push_work);
|
||||||
|
preempt_enable();
|
||||||
raw_spin_rq_lock(this_rq);
|
raw_spin_rq_lock(this_rq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -11254,13 +11254,15 @@ more_balance:
|
|||||||
busiest->push_cpu = this_cpu;
|
busiest->push_cpu = this_cpu;
|
||||||
active_balance = 1;
|
active_balance = 1;
|
||||||
}
|
}
|
||||||
raw_spin_rq_unlock_irqrestore(busiest, flags);
|
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
raw_spin_rq_unlock_irqrestore(busiest, flags);
|
||||||
if (active_balance) {
|
if (active_balance) {
|
||||||
stop_one_cpu_nowait(cpu_of(busiest),
|
stop_one_cpu_nowait(cpu_of(busiest),
|
||||||
active_load_balance_cpu_stop, busiest,
|
active_load_balance_cpu_stop, busiest,
|
||||||
&busiest->active_balance_work);
|
&busiest->active_balance_work);
|
||||||
}
|
}
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sd->nr_balance_failed = 0;
|
sd->nr_balance_failed = 0;
|
||||||
|
@ -2063,9 +2063,11 @@ retry:
|
|||||||
*/
|
*/
|
||||||
push_task = get_push_task(rq);
|
push_task = get_push_task(rq);
|
||||||
if (push_task) {
|
if (push_task) {
|
||||||
|
preempt_disable();
|
||||||
raw_spin_rq_unlock(rq);
|
raw_spin_rq_unlock(rq);
|
||||||
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
|
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
|
||||||
push_task, &rq->push_work);
|
push_task, &rq->push_work);
|
||||||
|
preempt_enable();
|
||||||
raw_spin_rq_lock(rq);
|
raw_spin_rq_lock(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2402,9 +2404,11 @@ skip:
|
|||||||
double_unlock_balance(this_rq, src_rq);
|
double_unlock_balance(this_rq, src_rq);
|
||||||
|
|
||||||
if (push_task) {
|
if (push_task) {
|
||||||
|
preempt_disable();
|
||||||
raw_spin_rq_unlock(this_rq);
|
raw_spin_rq_unlock(this_rq);
|
||||||
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
|
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
|
||||||
push_task, &src_rq->push_work);
|
push_task, &src_rq->push_work);
|
||||||
|
preempt_enable();
|
||||||
raw_spin_rq_lock(this_rq);
|
raw_spin_rq_lock(this_rq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user