sched: Simplify yield_to()
Use guards to reduce gotos and simplify control flow. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
92c2ec5bc1
commit
7a50f76674
@ -8888,55 +8888,46 @@ int __sched yield_to(struct task_struct *p, bool preempt)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
struct rq *rq, *p_rq;
|
||||
unsigned long flags;
|
||||
int yielded = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
rq = this_rq();
|
||||
scoped_guard (irqsave) {
|
||||
rq = this_rq();
|
||||
|
||||
again:
|
||||
p_rq = task_rq(p);
|
||||
/*
|
||||
* If we're the only runnable task on the rq and target rq also
|
||||
* has only one task, there's absolutely no point in yielding.
|
||||
*/
|
||||
if (rq->nr_running == 1 && p_rq->nr_running == 1) {
|
||||
yielded = -ESRCH;
|
||||
goto out_irq;
|
||||
}
|
||||
|
||||
double_rq_lock(rq, p_rq);
|
||||
if (task_rq(p) != p_rq) {
|
||||
double_rq_unlock(rq, p_rq);
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (!curr->sched_class->yield_to_task)
|
||||
goto out_unlock;
|
||||
|
||||
if (curr->sched_class != p->sched_class)
|
||||
goto out_unlock;
|
||||
|
||||
if (task_on_cpu(p_rq, p) || !task_is_running(p))
|
||||
goto out_unlock;
|
||||
|
||||
yielded = curr->sched_class->yield_to_task(rq, p);
|
||||
if (yielded) {
|
||||
schedstat_inc(rq->yld_count);
|
||||
p_rq = task_rq(p);
|
||||
/*
|
||||
* Make p's CPU reschedule; pick_next_entity takes care of
|
||||
* fairness.
|
||||
* If we're the only runnable task on the rq and target rq also
|
||||
* has only one task, there's absolutely no point in yielding.
|
||||
*/
|
||||
if (preempt && rq != p_rq)
|
||||
resched_curr(p_rq);
|
||||
if (rq->nr_running == 1 && p_rq->nr_running == 1)
|
||||
return -ESRCH;
|
||||
|
||||
guard(double_rq_lock)(rq, p_rq);
|
||||
if (task_rq(p) != p_rq)
|
||||
goto again;
|
||||
|
||||
if (!curr->sched_class->yield_to_task)
|
||||
return 0;
|
||||
|
||||
if (curr->sched_class != p->sched_class)
|
||||
return 0;
|
||||
|
||||
if (task_on_cpu(p_rq, p) || !task_is_running(p))
|
||||
return 0;
|
||||
|
||||
yielded = curr->sched_class->yield_to_task(rq, p);
|
||||
if (yielded) {
|
||||
schedstat_inc(rq->yld_count);
|
||||
/*
|
||||
* Make p's CPU reschedule; pick_next_entity
|
||||
* takes care of fairness.
|
||||
*/
|
||||
if (preempt && rq != p_rq)
|
||||
resched_curr(p_rq);
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
double_rq_unlock(rq, p_rq);
|
||||
out_irq:
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (yielded > 0)
|
||||
if (yielded)
|
||||
schedule();
|
||||
|
||||
return yielded;
|
||||
|
Loading…
x
Reference in New Issue
Block a user