Merge branch 'linus' into sched/core, to resolve semantic conflict
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@ -409,7 +409,7 @@ bool rcu_eqs_special_set(int cpu)
|
||||
*
|
||||
* The caller must have disabled interrupts and must not be idle.
|
||||
*/
|
||||
void rcu_momentary_dyntick_idle(void)
|
||||
notrace void rcu_momentary_dyntick_idle(void)
|
||||
{
|
||||
int special;
|
||||
|
||||
@ -4076,7 +4076,6 @@ void rcu_cpu_starting(unsigned int cpu)
|
||||
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* The outgoing function has no further need of RCU, so remove it from
|
||||
* the rcu_node tree's ->qsmaskinitnext bit masks.
|
||||
@ -4116,6 +4115,7 @@ void rcu_report_dead(unsigned int cpu)
|
||||
rdp->cpu_started = false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* The outgoing CPU has just passed through the dying-idle state, and we
|
||||
* are being invoked from the CPU that was IPIed to continue the offline
|
||||
|
@ -249,13 +249,16 @@ static bool check_slow_task(struct task_struct *t, void *arg)
|
||||
|
||||
/*
|
||||
* Scan the current list of tasks blocked within RCU read-side critical
|
||||
* sections, printing out the tid of each.
|
||||
* sections, printing out the tid of each of the first few of them.
|
||||
*/
|
||||
static int rcu_print_task_stall(struct rcu_node *rnp)
|
||||
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
|
||||
__releases(rnp->lock)
|
||||
{
|
||||
int i = 0;
|
||||
int ndetected = 0;
|
||||
struct rcu_stall_chk_rdr rscr;
|
||||
struct task_struct *t;
|
||||
struct task_struct *ts[8];
|
||||
|
||||
if (!rcu_preempt_blocked_readers_cgp(rnp))
|
||||
return 0;
|
||||
@ -264,6 +267,14 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
|
||||
t = list_entry(rnp->gp_tasks->prev,
|
||||
struct task_struct, rcu_node_entry);
|
||||
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
|
||||
get_task_struct(t);
|
||||
ts[i++] = t;
|
||||
if (i >= ARRAY_SIZE(ts))
|
||||
break;
|
||||
}
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
for (i--; i; i--) {
|
||||
t = ts[i];
|
||||
if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
|
||||
pr_cont(" P%d", t->pid);
|
||||
else
|
||||
@ -273,6 +284,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
|
||||
".q"[rscr.rs.b.need_qs],
|
||||
".e"[rscr.rs.b.exp_hint],
|
||||
".l"[rscr.on_blkd_list]);
|
||||
put_task_struct(t);
|
||||
ndetected++;
|
||||
}
|
||||
pr_cont("\n");
|
||||
@ -293,8 +305,9 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
|
||||
* Because preemptible RCU does not exist, we never have to check for
|
||||
* tasks blocked within RCU read-side critical sections.
|
||||
*/
|
||||
static int rcu_print_task_stall(struct rcu_node *rnp)
|
||||
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
|
||||
{
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return 0;
|
||||
}
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
||||
@ -472,7 +485,6 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
|
||||
pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
|
||||
rcu_for_each_leaf_node(rnp) {
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
ndetected += rcu_print_task_stall(rnp);
|
||||
if (rnp->qsmask != 0) {
|
||||
for_each_leaf_node_possible_cpu(rnp, cpu)
|
||||
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
|
||||
@ -480,7 +492,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
|
||||
ndetected++;
|
||||
}
|
||||
}
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
|
Reference in New Issue
Block a user