sched: Avoid division by zero
Patch a5004278f0525dcb9aa43703ef77bf371ea837cd (sched: Fix cgroup smp fairness) introduced the possibility of a divide-by-zero because load-balancing is not synchronized between sched_domains. This can cause the state of cpus to change between the first and second loop over the sched domain in tg_shares_up(). Reported-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Jes Sorensen <jes@sgi.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <1250855934.7538.30.camel@twins> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
cde7e5ca4e
commit
a8af7246c1
@ -1522,7 +1522,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
|
|||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
update_group_shares_cpu(struct task_group *tg, int cpu,
|
update_group_shares_cpu(struct task_group *tg, int cpu,
|
||||||
unsigned long sd_shares, unsigned long sd_rq_weight)
|
unsigned long sd_shares, unsigned long sd_rq_weight,
|
||||||
|
unsigned long sd_eff_weight)
|
||||||
{
|
{
|
||||||
unsigned long rq_weight;
|
unsigned long rq_weight;
|
||||||
unsigned long shares;
|
unsigned long shares;
|
||||||
@ -1535,13 +1536,15 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
|
|||||||
if (!rq_weight) {
|
if (!rq_weight) {
|
||||||
boost = 1;
|
boost = 1;
|
||||||
rq_weight = NICE_0_LOAD;
|
rq_weight = NICE_0_LOAD;
|
||||||
|
if (sd_rq_weight == sd_eff_weight)
|
||||||
|
sd_eff_weight += NICE_0_LOAD;
|
||||||
|
sd_rq_weight = sd_eff_weight;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* \Sum shares * rq_weight
|
* \Sum_j shares_j * rq_weight_i
|
||||||
* shares = -----------------------
|
* shares_i = -----------------------------
|
||||||
* \Sum rq_weight
|
* \Sum_j rq_weight_j
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
shares = (sd_shares * rq_weight) / sd_rq_weight;
|
shares = (sd_shares * rq_weight) / sd_rq_weight;
|
||||||
shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
|
shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
|
||||||
@ -1593,14 +1596,8 @@ static int tg_shares_up(struct task_group *tg, void *data)
|
|||||||
if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
|
if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
|
||||||
shares = tg->shares;
|
shares = tg->shares;
|
||||||
|
|
||||||
for_each_cpu(i, sched_domain_span(sd)) {
|
for_each_cpu(i, sched_domain_span(sd))
|
||||||
unsigned long sd_rq_weight = rq_weight;
|
update_group_shares_cpu(tg, i, shares, rq_weight, eff_weight);
|
||||||
|
|
||||||
if (!tg->cfs_rq[i]->rq_weight)
|
|
||||||
sd_rq_weight = eff_weight;
|
|
||||||
|
|
||||||
update_group_shares_cpu(tg, i, shares, sd_rq_weight);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user