sched/fair: Use task_rcu_dereference()
Simplify task_numa_compare()'s task reference magic by using task_rcu_dereference(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Kirill Tkhai <ktkhai@parallels.com> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vladimir Davydov <vdavydov@parallels.com> Link: http://lkml.kernel.org/r/20160518195733.GA15914@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
150593bf86
commit
bac7857319
@ -1305,6 +1305,8 @@ static void task_numa_assign(struct task_numa_env *env,
|
||||
{
|
||||
if (env->best_task)
|
||||
put_task_struct(env->best_task);
|
||||
if (p)
|
||||
get_task_struct(p);
|
||||
|
||||
env->best_task = p;
|
||||
env->best_imp = imp;
|
||||
@ -1372,31 +1374,11 @@ static void task_numa_compare(struct task_numa_env *env,
|
||||
long imp = env->p->numa_group ? groupimp : taskimp;
|
||||
long moveimp = imp;
|
||||
int dist = env->dist;
|
||||
bool assigned = false;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
raw_spin_lock_irq(&dst_rq->lock);
|
||||
cur = dst_rq->curr;
|
||||
/*
|
||||
* No need to move the exiting task or idle task.
|
||||
*/
|
||||
if ((cur->flags & PF_EXITING) || is_idle_task(cur))
|
||||
cur = task_rcu_dereference(&dst_rq->curr);
|
||||
if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
|
||||
cur = NULL;
|
||||
else {
|
||||
/*
|
||||
* The task_struct must be protected here to protect the
|
||||
* p->numa_faults access in the task_weight since the
|
||||
* numa_faults could already be freed in the following path:
|
||||
* finish_task_switch()
|
||||
* --> put_task_struct()
|
||||
* --> __put_task_struct()
|
||||
* --> task_numa_free()
|
||||
*/
|
||||
get_task_struct(cur);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irq(&dst_rq->lock);
|
||||
|
||||
/*
|
||||
* Because we have preemption enabled we can get migrated around and
|
||||
@ -1479,7 +1461,6 @@ balance:
|
||||
*/
|
||||
if (!load_too_imbalanced(src_load, dst_load, env)) {
|
||||
imp = moveimp - 1;
|
||||
put_task_struct(cur);
|
||||
cur = NULL;
|
||||
goto assign;
|
||||
}
|
||||
@ -1505,16 +1486,9 @@ balance:
|
||||
env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
|
||||
|
||||
assign:
|
||||
assigned = true;
|
||||
task_numa_assign(env, cur, imp);
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
/*
|
||||
* The dst_rq->curr isn't assigned. The protection for task_struct is
|
||||
* finished.
|
||||
*/
|
||||
if (cur && !assigned)
|
||||
put_task_struct(cur);
|
||||
}
|
||||
|
||||
static void task_numa_find_cpu(struct task_numa_env *env,
|
||||
|
Loading…
Reference in New Issue
Block a user