rcu: Remove rsp parameter from rcu_node tree accessor macros
There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's rcu_node tree's accessor macros. This commit therefore removes the rsp parameter from those macros in kernel/rcu/rcu.h, and removes some now-unused rsp local variables while in the area. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
63d4c8c979
commit
aedf4ba984
@ -329,29 +329,23 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Returns first leaf rcu_node of the specified RCU flavor. */
|
/* Returns first leaf rcu_node of the specified RCU flavor. */
|
||||||
#define rcu_first_leaf_node(rsp) ((rsp)->level[rcu_num_lvls - 1])
|
#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
|
||||||
|
|
||||||
/* Is this rcu_node a leaf? */
|
/* Is this rcu_node a leaf? */
|
||||||
#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
|
#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
|
||||||
|
|
||||||
/* Is this rcu_node the last leaf? */
|
/* Is this rcu_node the last leaf? */
|
||||||
#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
|
#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do a full breadth-first scan of the rcu_node structures for the
|
* Do a full breadth-first scan of the {s,}rcu_node structures for the
|
||||||
* specified rcu_state structure.
|
* specified rcu_state structure.
|
||||||
*/
|
*/
|
||||||
#define rcu_for_each_node_breadth_first(rsp, rnp) \
|
#define srcu_for_each_node_breadth_first(sp, rnp) \
|
||||||
for ((rnp) = &(rsp)->node[0]; \
|
for ((rnp) = &(sp)->node[0]; \
|
||||||
(rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
|
(rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
|
||||||
|
#define rcu_for_each_node_breadth_first(rnp) \
|
||||||
/*
|
srcu_for_each_node_breadth_first(&rcu_state, rnp)
|
||||||
* Do a breadth-first scan of the non-leaf rcu_node structures for the
|
|
||||||
* specified rcu_state structure. Note that if there is a singleton
|
|
||||||
* rcu_node tree with but one rcu_node structure, this loop is a no-op.
|
|
||||||
*/
|
|
||||||
#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
|
|
||||||
for ((rnp) = &(rsp)->node[0]; !rcu_is_leaf_node(rsp, rnp); (rnp)++)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Scan the leaves of the rcu_node hierarchy for the specified rcu_state
|
* Scan the leaves of the rcu_node hierarchy for the specified rcu_state
|
||||||
@ -359,9 +353,9 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
|
|||||||
* one rcu_node structure, this loop -will- visit the rcu_node structure.
|
* one rcu_node structure, this loop -will- visit the rcu_node structure.
|
||||||
* It is still a leaf node, even if it is also the root node.
|
* It is still a leaf node, even if it is also the root node.
|
||||||
*/
|
*/
|
||||||
#define rcu_for_each_leaf_node(rsp, rnp) \
|
#define rcu_for_each_leaf_node(rnp) \
|
||||||
for ((rnp) = rcu_first_leaf_node(rsp); \
|
for ((rnp) = rcu_first_leaf_node(); \
|
||||||
(rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
|
(rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Iterate over all possible CPUs in a leaf RCU node.
|
* Iterate over all possible CPUs in a leaf RCU node.
|
||||||
|
@ -105,7 +105,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
|
|||||||
rcu_init_levelspread(levelspread, num_rcu_lvl);
|
rcu_init_levelspread(levelspread, num_rcu_lvl);
|
||||||
|
|
||||||
/* Each pass through this loop initializes one srcu_node structure. */
|
/* Each pass through this loop initializes one srcu_node structure. */
|
||||||
rcu_for_each_node_breadth_first(sp, snp) {
|
srcu_for_each_node_breadth_first(sp, snp) {
|
||||||
spin_lock_init(&ACCESS_PRIVATE(snp, lock));
|
spin_lock_init(&ACCESS_PRIVATE(snp, lock));
|
||||||
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
|
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
|
||||||
ARRAY_SIZE(snp->srcu_data_have_cbs));
|
ARRAY_SIZE(snp->srcu_data_have_cbs));
|
||||||
@ -561,7 +561,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
|
|||||||
|
|
||||||
/* Initiate callback invocation as needed. */
|
/* Initiate callback invocation as needed. */
|
||||||
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
|
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
|
||||||
rcu_for_each_node_breadth_first(sp, snp) {
|
srcu_for_each_node_breadth_first(sp, snp) {
|
||||||
spin_lock_irq_rcu_node(snp);
|
spin_lock_irq_rcu_node(snp);
|
||||||
cbs = false;
|
cbs = false;
|
||||||
last_lvl = snp >= sp->level[rcu_num_lvls - 1];
|
last_lvl = snp >= sp->level[rcu_num_lvls - 1];
|
||||||
|
@ -573,7 +573,7 @@ void show_rcu_gp_kthreads(void)
|
|||||||
for_each_rcu_flavor(rsp) {
|
for_each_rcu_flavor(rsp) {
|
||||||
pr_info("%s: wait state: %d ->state: %#lx\n",
|
pr_info("%s: wait state: %d ->state: %#lx\n",
|
||||||
rsp->name, rsp->gp_state, rsp->gp_kthread->state);
|
rsp->name, rsp->gp_state, rsp->gp_kthread->state);
|
||||||
rcu_for_each_node_breadth_first(rsp, rnp) {
|
rcu_for_each_node_breadth_first(rnp) {
|
||||||
if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
|
if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
|
||||||
continue;
|
continue;
|
||||||
pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
|
pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
|
||||||
@ -1276,7 +1276,7 @@ static void rcu_dump_cpu_stacks(void)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rcu_node *rnp;
|
struct rcu_node *rnp;
|
||||||
|
|
||||||
rcu_for_each_leaf_node(&rcu_state, rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
for_each_leaf_node_possible_cpu(rnp, cpu)
|
for_each_leaf_node_possible_cpu(rnp, cpu)
|
||||||
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
|
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
|
||||||
@ -1336,7 +1336,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
|
|||||||
*/
|
*/
|
||||||
pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
|
pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
|
||||||
print_cpu_stall_info_begin();
|
print_cpu_stall_info_begin();
|
||||||
rcu_for_each_leaf_node(rsp, rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
ndetected += rcu_print_task_stall(rnp);
|
ndetected += rcu_print_task_stall(rnp);
|
||||||
if (rnp->qsmask != 0) {
|
if (rnp->qsmask != 0) {
|
||||||
@ -1873,7 +1873,7 @@ static bool rcu_gp_init(void)
|
|||||||
* will handle subsequent offline CPUs.
|
* will handle subsequent offline CPUs.
|
||||||
*/
|
*/
|
||||||
rsp->gp_state = RCU_GP_ONOFF;
|
rsp->gp_state = RCU_GP_ONOFF;
|
||||||
rcu_for_each_leaf_node(rsp, rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
spin_lock(&rsp->ofl_lock);
|
spin_lock(&rsp->ofl_lock);
|
||||||
raw_spin_lock_irq_rcu_node(rnp);
|
raw_spin_lock_irq_rcu_node(rnp);
|
||||||
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
|
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
|
||||||
@ -1933,7 +1933,7 @@ static bool rcu_gp_init(void)
|
|||||||
* process finishes, because this kthread handles both.
|
* process finishes, because this kthread handles both.
|
||||||
*/
|
*/
|
||||||
rsp->gp_state = RCU_GP_INIT;
|
rsp->gp_state = RCU_GP_INIT;
|
||||||
rcu_for_each_node_breadth_first(rsp, rnp) {
|
rcu_for_each_node_breadth_first(rnp) {
|
||||||
rcu_gp_slow(gp_init_delay);
|
rcu_gp_slow(gp_init_delay);
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
rdp = this_cpu_ptr(&rcu_data);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
@ -2046,7 +2046,7 @@ static void rcu_gp_cleanup(void)
|
|||||||
*/
|
*/
|
||||||
new_gp_seq = rsp->gp_seq;
|
new_gp_seq = rsp->gp_seq;
|
||||||
rcu_seq_end(&new_gp_seq);
|
rcu_seq_end(&new_gp_seq);
|
||||||
rcu_for_each_node_breadth_first(rsp, rnp) {
|
rcu_for_each_node_breadth_first(rnp) {
|
||||||
raw_spin_lock_irq_rcu_node(rnp);
|
raw_spin_lock_irq_rcu_node(rnp);
|
||||||
if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
|
if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
|
||||||
dump_blkd_tasks(rnp, 10);
|
dump_blkd_tasks(rnp, 10);
|
||||||
@ -2606,9 +2606,8 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rsp))
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
struct rcu_node *rnp;
|
struct rcu_node *rnp;
|
||||||
struct rcu_state *rsp = &rcu_state;
|
|
||||||
|
|
||||||
rcu_for_each_leaf_node(rsp, rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
cond_resched_tasks_rcu_qs();
|
cond_resched_tasks_rcu_qs();
|
||||||
mask = 0;
|
mask = 0;
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
@ -3778,7 +3777,7 @@ static void __init rcu_init_one(void)
|
|||||||
|
|
||||||
init_swait_queue_head(&rsp->gp_wq);
|
init_swait_queue_head(&rsp->gp_wq);
|
||||||
init_swait_queue_head(&rsp->expedited_wq);
|
init_swait_queue_head(&rsp->expedited_wq);
|
||||||
rnp = rcu_first_leaf_node(rsp);
|
rnp = rcu_first_leaf_node();
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
while (i > rnp->grphi)
|
while (i > rnp->grphi)
|
||||||
rnp++;
|
rnp++;
|
||||||
@ -3878,7 +3877,7 @@ static void __init rcu_dump_rcu_node_tree(void)
|
|||||||
|
|
||||||
pr_info("rcu_node tree layout dump\n");
|
pr_info("rcu_node tree layout dump\n");
|
||||||
pr_info(" ");
|
pr_info(" ");
|
||||||
rcu_for_each_node_breadth_first(&rcu_state, rnp) {
|
rcu_for_each_node_breadth_first(rnp) {
|
||||||
if (rnp->level != level) {
|
if (rnp->level != level) {
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
pr_info(" ");
|
pr_info(" ");
|
||||||
|
@ -97,7 +97,7 @@ static void sync_exp_reset_tree_hotplug(void)
|
|||||||
* Each pass through the following loop propagates newly onlined
|
* Each pass through the following loop propagates newly onlined
|
||||||
* CPUs for the current rcu_node structure up the rcu_node tree.
|
* CPUs for the current rcu_node structure up the rcu_node tree.
|
||||||
*/
|
*/
|
||||||
rcu_for_each_leaf_node(&rcu_state, rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
if (rnp->expmaskinit == rnp->expmaskinitnext) {
|
if (rnp->expmaskinit == rnp->expmaskinitnext) {
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
@ -141,7 +141,7 @@ static void __maybe_unused sync_exp_reset_tree(void)
|
|||||||
struct rcu_node *rnp;
|
struct rcu_node *rnp;
|
||||||
|
|
||||||
sync_exp_reset_tree_hotplug();
|
sync_exp_reset_tree_hotplug();
|
||||||
rcu_for_each_node_breadth_first(&rcu_state, rnp) {
|
rcu_for_each_node_breadth_first(rnp) {
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
WARN_ON_ONCE(rnp->expmask);
|
WARN_ON_ONCE(rnp->expmask);
|
||||||
rnp->expmask = rnp->expmaskinit;
|
rnp->expmask = rnp->expmaskinit;
|
||||||
@ -438,14 +438,14 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
|
|||||||
trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
|
trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
|
||||||
|
|
||||||
/* Schedule work for each leaf rcu_node structure. */
|
/* Schedule work for each leaf rcu_node structure. */
|
||||||
rcu_for_each_leaf_node(&rcu_state, rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
rnp->exp_need_flush = false;
|
rnp->exp_need_flush = false;
|
||||||
if (!READ_ONCE(rnp->expmask))
|
if (!READ_ONCE(rnp->expmask))
|
||||||
continue; /* Avoid early boot non-existent wq. */
|
continue; /* Avoid early boot non-existent wq. */
|
||||||
rnp->rew.rew_func = func;
|
rnp->rew.rew_func = func;
|
||||||
if (!READ_ONCE(rcu_par_gp_wq) ||
|
if (!READ_ONCE(rcu_par_gp_wq) ||
|
||||||
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
|
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
|
||||||
rcu_is_last_leaf_node(&rcu_state, rnp)) {
|
rcu_is_last_leaf_node(rnp)) {
|
||||||
/* No workqueues yet or last leaf, do direct call. */
|
/* No workqueues yet or last leaf, do direct call. */
|
||||||
sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
|
sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
|
||||||
continue;
|
continue;
|
||||||
@ -462,7 +462,7 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for workqueue jobs (if any) to complete. */
|
/* Wait for workqueue jobs (if any) to complete. */
|
||||||
rcu_for_each_leaf_node(&rcu_state, rnp)
|
rcu_for_each_leaf_node(rnp)
|
||||||
if (rnp->exp_need_flush)
|
if (rnp->exp_need_flush)
|
||||||
flush_work(&rnp->rew.rew_work);
|
flush_work(&rnp->rew.rew_work);
|
||||||
}
|
}
|
||||||
@ -496,7 +496,7 @@ static void synchronize_sched_expedited_wait(void)
|
|||||||
pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
|
pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
|
||||||
rcu_state.name);
|
rcu_state.name);
|
||||||
ndetected = 0;
|
ndetected = 0;
|
||||||
rcu_for_each_leaf_node(&rcu_state, rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
ndetected += rcu_print_task_exp_stall(rnp);
|
ndetected += rcu_print_task_exp_stall(rnp);
|
||||||
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
@ -517,7 +517,7 @@ static void synchronize_sched_expedited_wait(void)
|
|||||||
rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
|
rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
|
||||||
if (ndetected) {
|
if (ndetected) {
|
||||||
pr_err("blocking rcu_node structures:");
|
pr_err("blocking rcu_node structures:");
|
||||||
rcu_for_each_node_breadth_first(&rcu_state, rnp) {
|
rcu_for_each_node_breadth_first(rnp) {
|
||||||
if (rnp == rnp_root)
|
if (rnp == rnp_root)
|
||||||
continue; /* printed unconditionally */
|
continue; /* printed unconditionally */
|
||||||
if (sync_rcu_preempt_exp_done_unlocked(rnp))
|
if (sync_rcu_preempt_exp_done_unlocked(rnp))
|
||||||
@ -529,7 +529,7 @@ static void synchronize_sched_expedited_wait(void)
|
|||||||
}
|
}
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
}
|
}
|
||||||
rcu_for_each_leaf_node(&rcu_state, rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
||||||
mask = leaf_node_cpu_bit(rnp, cpu);
|
mask = leaf_node_cpu_bit(rnp, cpu);
|
||||||
if (!(rnp->expmask & mask))
|
if (!(rnp->expmask & mask))
|
||||||
@ -561,7 +561,7 @@ static void rcu_exp_wait_wake(unsigned long s)
|
|||||||
*/
|
*/
|
||||||
mutex_lock(&rcu_state.exp_wake_mutex);
|
mutex_lock(&rcu_state.exp_wake_mutex);
|
||||||
|
|
||||||
rcu_for_each_node_breadth_first(&rcu_state, rnp) {
|
rcu_for_each_node_breadth_first(rnp) {
|
||||||
if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
|
if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
|
||||||
spin_lock(&rnp->exp_lock);
|
spin_lock(&rnp->exp_lock);
|
||||||
/* Recheck, avoid hang in case someone just arrived. */
|
/* Recheck, avoid hang in case someone just arrived. */
|
||||||
|
@ -687,7 +687,7 @@ static void rcu_print_detail_task_stall(void)
|
|||||||
struct rcu_node *rnp = rcu_get_root();
|
struct rcu_node *rnp = rcu_get_root();
|
||||||
|
|
||||||
rcu_print_detail_task_stall_rnp(rnp);
|
rcu_print_detail_task_stall_rnp(rnp);
|
||||||
rcu_for_each_leaf_node(&rcu_state, rnp)
|
rcu_for_each_leaf_node(rnp)
|
||||||
rcu_print_detail_task_stall_rnp(rnp);
|
rcu_print_detail_task_stall_rnp(rnp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1427,7 +1427,7 @@ static void __init rcu_spawn_boost_kthreads(void)
|
|||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
per_cpu(rcu_cpu_has_work, cpu) = 0;
|
per_cpu(rcu_cpu_has_work, cpu) = 0;
|
||||||
BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
|
BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
|
||||||
rcu_for_each_leaf_node(&rcu_state, rnp)
|
rcu_for_each_leaf_node(rnp)
|
||||||
(void)rcu_spawn_one_boost_kthread(rnp);
|
(void)rcu_spawn_one_boost_kthread(rnp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user