Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu

Pull RCU updates from Paul E. McKenney:

 - Removal of spin_unlock_wait()
 - SRCU updates
 - Torture-test updates
 - Documentation updates
 - Miscellaneous fixes
 - CPU-hotplug fixes
 - Miscellaneous non-RCU fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2017-08-21 09:45:19 +02:00
84 changed files with 1212 additions and 1317 deletions

View File

@ -951,8 +951,13 @@ struct migration_arg {
static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int dest_cpu)
{
if (unlikely(!cpu_active(dest_cpu)))
return rq;
if (p->flags & PF_KTHREAD) {
if (unlikely(!cpu_online(dest_cpu)))
return rq;
} else {
if (unlikely(!cpu_active(dest_cpu)))
return rq;
}
/* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
@ -2635,6 +2640,16 @@ static struct rq *finish_task_switch(struct task_struct *prev)
prev_state = prev->state;
vtime_task_switch(prev);
perf_event_task_sched_in(prev, current);
/*
* The membarrier system call requires a full memory barrier
* after storing to rq->curr, before going back to user-space.
*
* TODO: This smp_mb__after_unlock_lock can go away if PPC end
* up adding a full barrier to switch_mm(), or we should figure
* out if a smp_mb__after_unlock_lock is really the proper API
* to use.
*/
smp_mb__after_unlock_lock();
finish_lock_switch(rq, prev);
finish_arch_post_lock_switch();
@ -3324,6 +3339,21 @@ static void __sched notrace __schedule(bool preempt)
if (likely(prev != next)) {
rq->nr_switches++;
rq->curr = next;
/*
* The membarrier system call requires each architecture
* to have a full memory barrier after updating
* rq->curr, before returning to user-space. For TSO
* (e.g. x86), the architecture must provide its own
* barrier in switch_mm(). For weakly ordered machines
* for which spin_unlock() acts as a full memory
* barrier, finish_lock_switch() in common code takes
* care of this barrier. For weakly ordered machines for
* which spin_unlock() acts as a RELEASE barrier (only
* arm64 and PowerPC), arm64 has a full barrier in
* switch_to(), and PowerPC has
* smp_mb__after_unlock_lock() before
* finish_lock_switch().
*/
++*switch_count;
trace_sched_switch(preempt, prev, next);
@ -3352,8 +3382,8 @@ void __noreturn do_task_dead(void)
* To avoid it, we have to wait for releasing tsk->pi_lock which
* is held by try_to_wake_up()
*/
smp_mb();
raw_spin_unlock_wait(&current->pi_lock);
raw_spin_lock_irq(&current->pi_lock);
raw_spin_unlock_irq(&current->pi_lock);
/* Causes final put_task_struct in finish_task_switch(): */
__set_current_state(TASK_DEAD);