rcu: Reduce synchronize_rcu_expedited() latency
The synchronize_rcu_expedited() function disables interrupts across a scan of all leaf rcu_node structures, which is not good for real-time scheduling latency on large systems (hundreds or especially thousands of CPUs). This commit therefore holds off CPU-hotplug operations using get_online_cpus(), and removes the prior acquisiion of the ->onofflock (which required disabling interrupts). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
bcfa57ce10
commit
1943c89de7
@ -799,34 +799,48 @@ void synchronize_rcu_expedited(void)
|
|||||||
snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
|
snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
|
||||||
smp_mb(); /* Above access cannot bleed into critical section. */
|
smp_mb(); /* Above access cannot bleed into critical section. */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Block CPU-hotplug operations. This means that any CPU-hotplug
|
||||||
|
* operation that finds an rcu_node structure with tasks in the
|
||||||
|
* process of being boosted will know that all tasks blocking
|
||||||
|
* this expedited grace period will already be in the process of
|
||||||
|
* being boosted. This simplifies the process of moving tasks
|
||||||
|
* from leaf to root rcu_node structures.
|
||||||
|
*/
|
||||||
|
get_online_cpus();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Acquire lock, falling back to synchronize_rcu() if too many
|
* Acquire lock, falling back to synchronize_rcu() if too many
|
||||||
* lock-acquisition failures. Of course, if someone does the
|
* lock-acquisition failures. Of course, if someone does the
|
||||||
* expedited grace period for us, just leave.
|
* expedited grace period for us, just leave.
|
||||||
*/
|
*/
|
||||||
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
|
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
|
||||||
|
if (ULONG_CMP_LT(snap,
|
||||||
|
ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
|
||||||
|
put_online_cpus();
|
||||||
|
goto mb_ret; /* Others did our work for us. */
|
||||||
|
}
|
||||||
if (trycount++ < 10) {
|
if (trycount++ < 10) {
|
||||||
udelay(trycount * num_online_cpus());
|
udelay(trycount * num_online_cpus());
|
||||||
} else {
|
} else {
|
||||||
|
put_online_cpus();
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count)))
|
|
||||||
goto mb_ret; /* Others did our work for us. */
|
|
||||||
}
|
}
|
||||||
if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count)))
|
if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
|
||||||
|
put_online_cpus();
|
||||||
goto unlock_mb_ret; /* Others did our work for us. */
|
goto unlock_mb_ret; /* Others did our work for us. */
|
||||||
|
}
|
||||||
|
|
||||||
/* force all RCU readers onto ->blkd_tasks lists. */
|
/* force all RCU readers onto ->blkd_tasks lists. */
|
||||||
synchronize_sched_expedited();
|
synchronize_sched_expedited();
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&rsp->onofflock, flags);
|
|
||||||
|
|
||||||
/* Initialize ->expmask for all non-leaf rcu_node structures. */
|
/* Initialize ->expmask for all non-leaf rcu_node structures. */
|
||||||
rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
|
rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
|
||||||
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||||
rnp->expmask = rnp->qsmaskinit;
|
rnp->expmask = rnp->qsmaskinit;
|
||||||
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Snapshot current state of ->blkd_tasks lists. */
|
/* Snapshot current state of ->blkd_tasks lists. */
|
||||||
@ -835,7 +849,7 @@ void synchronize_rcu_expedited(void)
|
|||||||
if (NUM_RCU_NODES > 1)
|
if (NUM_RCU_NODES > 1)
|
||||||
sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
|
sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
|
put_online_cpus();
|
||||||
|
|
||||||
/* Wait for snapshotted ->blkd_tasks lists to drain. */
|
/* Wait for snapshotted ->blkd_tasks lists to drain. */
|
||||||
rnp = rcu_get_root(rsp);
|
rnp = rcu_get_root(rsp);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user