sched: Use replace normalize_task() with __sched_setscheduler()
Reduce duplicate logic; normalize_task() is a simplified version of __sched_setscheduler(). Parametrize the difference and collapse. This reduces the amount of check_class_changed() sites. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124742.532642391@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
e3fca9e7cb
commit
dbc7f069b9
@ -3438,7 +3438,7 @@ static bool dl_param_changed(struct task_struct *p,
|
|||||||
|
|
||||||
static int __sched_setscheduler(struct task_struct *p,
|
static int __sched_setscheduler(struct task_struct *p,
|
||||||
const struct sched_attr *attr,
|
const struct sched_attr *attr,
|
||||||
bool user)
|
bool user, bool pi)
|
||||||
{
|
{
|
||||||
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
|
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
|
||||||
MAX_RT_PRIO - 1 - attr->sched_priority;
|
MAX_RT_PRIO - 1 - attr->sched_priority;
|
||||||
@ -3624,18 +3624,20 @@ change:
|
|||||||
p->sched_reset_on_fork = reset_on_fork;
|
p->sched_reset_on_fork = reset_on_fork;
|
||||||
oldprio = p->prio;
|
oldprio = p->prio;
|
||||||
|
|
||||||
/*
|
if (pi) {
|
||||||
* Take priority boosted tasks into account. If the new
|
/*
|
||||||
* effective priority is unchanged, we just store the new
|
* Take priority boosted tasks into account. If the new
|
||||||
* normal parameters and do not touch the scheduler class and
|
* effective priority is unchanged, we just store the new
|
||||||
* the runqueue. This will be done when the task deboost
|
* normal parameters and do not touch the scheduler class and
|
||||||
* itself.
|
* the runqueue. This will be done when the task deboost
|
||||||
*/
|
* itself.
|
||||||
new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
|
*/
|
||||||
if (new_effective_prio == oldprio) {
|
new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
|
||||||
__setscheduler_params(p, attr);
|
if (new_effective_prio == oldprio) {
|
||||||
task_rq_unlock(rq, p, &flags);
|
__setscheduler_params(p, attr);
|
||||||
return 0;
|
task_rq_unlock(rq, p, &flags);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
queued = task_on_rq_queued(p);
|
queued = task_on_rq_queued(p);
|
||||||
@ -3646,7 +3648,7 @@ change:
|
|||||||
put_prev_task(rq, p);
|
put_prev_task(rq, p);
|
||||||
|
|
||||||
prev_class = p->sched_class;
|
prev_class = p->sched_class;
|
||||||
__setscheduler(rq, p, attr, true);
|
__setscheduler(rq, p, attr, pi);
|
||||||
|
|
||||||
if (running)
|
if (running)
|
||||||
p->sched_class->set_curr_task(rq);
|
p->sched_class->set_curr_task(rq);
|
||||||
@ -3661,7 +3663,8 @@ change:
|
|||||||
check_class_changed(rq, p, prev_class, oldprio);
|
check_class_changed(rq, p, prev_class, oldprio);
|
||||||
task_rq_unlock(rq, p, &flags);
|
task_rq_unlock(rq, p, &flags);
|
||||||
|
|
||||||
rt_mutex_adjust_pi(p);
|
if (pi)
|
||||||
|
rt_mutex_adjust_pi(p);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3682,7 +3685,7 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
|
|||||||
attr.sched_policy = policy;
|
attr.sched_policy = policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
return __sched_setscheduler(p, &attr, check);
|
return __sched_setscheduler(p, &attr, check, true);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
|
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
|
||||||
@ -3703,7 +3706,7 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
|
|||||||
|
|
||||||
int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
|
int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
|
||||||
{
|
{
|
||||||
return __sched_setscheduler(p, attr, true);
|
return __sched_setscheduler(p, attr, true, true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(sched_setattr);
|
EXPORT_SYMBOL_GPL(sched_setattr);
|
||||||
|
|
||||||
@ -7361,32 +7364,12 @@ EXPORT_SYMBOL(___might_sleep);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_MAGIC_SYSRQ
|
#ifdef CONFIG_MAGIC_SYSRQ
|
||||||
static void normalize_task(struct rq *rq, struct task_struct *p)
|
|
||||||
{
|
|
||||||
const struct sched_class *prev_class = p->sched_class;
|
|
||||||
struct sched_attr attr = {
|
|
||||||
.sched_policy = SCHED_NORMAL,
|
|
||||||
};
|
|
||||||
int old_prio = p->prio;
|
|
||||||
int queued;
|
|
||||||
|
|
||||||
queued = task_on_rq_queued(p);
|
|
||||||
if (queued)
|
|
||||||
dequeue_task(rq, p, 0);
|
|
||||||
__setscheduler(rq, p, &attr, false);
|
|
||||||
if (queued) {
|
|
||||||
enqueue_task(rq, p, 0);
|
|
||||||
resched_curr(rq);
|
|
||||||
}
|
|
||||||
|
|
||||||
check_class_changed(rq, p, prev_class, old_prio);
|
|
||||||
}
|
|
||||||
|
|
||||||
void normalize_rt_tasks(void)
|
void normalize_rt_tasks(void)
|
||||||
{
|
{
|
||||||
struct task_struct *g, *p;
|
struct task_struct *g, *p;
|
||||||
unsigned long flags;
|
struct sched_attr attr = {
|
||||||
struct rq *rq;
|
.sched_policy = SCHED_NORMAL,
|
||||||
|
};
|
||||||
|
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
for_each_process_thread(g, p) {
|
for_each_process_thread(g, p) {
|
||||||
@ -7413,9 +7396,7 @@ void normalize_rt_tasks(void)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
rq = task_rq_lock(p, &flags);
|
__sched_setscheduler(p, &attr, false, false);
|
||||||
normalize_task(rq, p);
|
|
||||||
task_rq_unlock(rq, p, &flags);
|
|
||||||
}
|
}
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user