sched: Fix race in task_call_func()
There is a very narrow race between schedule() and task_call_func(). CPU0 CPU1 __schedule() rq_lock(); prev_state = READ_ONCE(prev->__state); if (... && prev_state) { deactivate_tasl(rq, prev, ...) prev->on_rq = 0; task_call_func() raw_spin_lock_irqsave(p->pi_lock); state = READ_ONCE(p->__state); smp_rmb(); if (... || p->on_rq) // false!!! rq = __task_rq_lock() ret = func(); next = pick_next_task(); rq = context_switch(prev, next) prepare_lock_switch() spin_release(&__rq_lockp(rq)->dep_map...) So while the task is on it's way out, it still holds rq->lock for a little while, and right then task_call_func() comes in and figures it doesn't need rq->lock anymore (because the task is already dequeued -- but still running there) and then the __set_task_frozen() thing observes it's holding rq->lock and yells murder. Avoid this by waiting for p->on_cpu to get cleared, which guarantees the task is fully finished on the old CPU. ( While arguably the fixes tag is 'wrong' -- none of the previous task_call_func() users appears to care for this case. ) Fixes: f5d39b020809 ("freezer,sched: Rewrite core freezer logic") Reported-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://lkml.kernel.org/r/Y1kdRNNfUeAU+FNl@hirez.programming.kicks-ass.net
This commit is contained in:
parent
448dca8c88
commit
91dabf33ae
@ -4200,6 +4200,40 @@ out:
|
||||
return success;
|
||||
}
|
||||
|
||||
static bool __task_needs_rq_lock(struct task_struct *p)
|
||||
{
|
||||
unsigned int state = READ_ONCE(p->__state);
|
||||
|
||||
/*
|
||||
* Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
|
||||
* the task is blocked. Make sure to check @state since ttwu() can drop
|
||||
* locks at the end, see ttwu_queue_wakelist().
|
||||
*/
|
||||
if (state == TASK_RUNNING || state == TASK_WAKING)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Ensure we load p->on_rq after p->__state, otherwise it would be
|
||||
* possible to, falsely, observe p->on_rq == 0.
|
||||
*
|
||||
* See try_to_wake_up() for a longer comment.
|
||||
*/
|
||||
smp_rmb();
|
||||
if (p->on_rq)
|
||||
return true;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Ensure the task has finished __schedule() and will not be referenced
|
||||
* anymore. Again, see try_to_wake_up() for a longer comment.
|
||||
*/
|
||||
smp_rmb();
|
||||
smp_cond_load_acquire(&p->on_cpu, !VAL);
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* task_call_func - Invoke a function on task in fixed state
|
||||
* @p: Process for which the function is to be invoked, can be @current.
|
||||
@ -4217,28 +4251,12 @@ out:
|
||||
int task_call_func(struct task_struct *p, task_call_f func, void *arg)
|
||||
{
|
||||
struct rq *rq = NULL;
|
||||
unsigned int state;
|
||||
struct rq_flags rf;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
|
||||
|
||||
state = READ_ONCE(p->__state);
|
||||
|
||||
/*
|
||||
* Ensure we load p->on_rq after p->__state, otherwise it would be
|
||||
* possible to, falsely, observe p->on_rq == 0.
|
||||
*
|
||||
* See try_to_wake_up() for a longer comment.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
* Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
|
||||
* the task is blocked. Make sure to check @state since ttwu() can drop
|
||||
* locks at the end, see ttwu_queue_wakelist().
|
||||
*/
|
||||
if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq)
|
||||
if (__task_needs_rq_lock(p))
|
||||
rq = __task_rq_lock(p, &rf);
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user