Merge branch 'locking/urgent' into locking/core, to pick up dependent fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event,
|
||||
* this will always be called from the right CPU.
|
||||
*/
|
||||
cpuctx = __get_cpu_context(ctx);
|
||||
|
||||
/* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
|
||||
if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
|
||||
/*
|
||||
* We are removing the last cpu event in this context.
|
||||
* If that event is not active in this cpu, cpuctx->cgrp
|
||||
* should've been cleared by perf_cgroup_switch.
|
||||
*/
|
||||
WARN_ON_ONCE(!add && cpuctx->cgrp);
|
||||
return;
|
||||
}
|
||||
cpuctx->cgrp = add ? event->cgrp : NULL;
|
||||
}
|
||||
|
||||
@ -8018,6 +8029,7 @@ restart:
|
||||
* if <size> is not specified, the range is treated as a single address.
|
||||
*/
|
||||
enum {
|
||||
IF_ACT_NONE = -1,
|
||||
IF_ACT_FILTER,
|
||||
IF_ACT_START,
|
||||
IF_ACT_STOP,
|
||||
@ -8041,6 +8053,7 @@ static const match_table_t if_tokens = {
|
||||
{ IF_SRC_KERNEL, "%u/%u" },
|
||||
{ IF_SRC_FILEADDR, "%u@%s" },
|
||||
{ IF_SRC_KERNELADDR, "%u" },
|
||||
{ IF_ACT_NONE, NULL },
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -836,6 +836,7 @@ void __noreturn do_exit(long code)
|
||||
*/
|
||||
perf_event_exit_task(tsk);
|
||||
|
||||
sched_autogroup_exit_task(tsk);
|
||||
cgroup_exit(tsk);
|
||||
|
||||
/*
|
||||
|
@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
|
||||
|
||||
static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
|
||||
{
|
||||
if (!rt_mutex_has_waiters(lock))
|
||||
clear_rt_mutex_waiters(lock);
|
||||
unsigned long owner, *p = (unsigned long *) &lock->owner;
|
||||
|
||||
if (rt_mutex_has_waiters(lock))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The rbtree has no waiters enqueued, now make sure that the
|
||||
* lock->owner still has the waiters bit set, otherwise the
|
||||
* following can happen:
|
||||
*
|
||||
* CPU 0 CPU 1 CPU2
|
||||
* l->owner=T1
|
||||
* rt_mutex_lock(l)
|
||||
* lock(l->lock)
|
||||
* l->owner = T1 | HAS_WAITERS;
|
||||
* enqueue(T2)
|
||||
* boost()
|
||||
* unlock(l->lock)
|
||||
* block()
|
||||
*
|
||||
* rt_mutex_lock(l)
|
||||
* lock(l->lock)
|
||||
* l->owner = T1 | HAS_WAITERS;
|
||||
* enqueue(T3)
|
||||
* boost()
|
||||
* unlock(l->lock)
|
||||
* block()
|
||||
* signal(->T2) signal(->T3)
|
||||
* lock(l->lock)
|
||||
* dequeue(T2)
|
||||
* deboost()
|
||||
* unlock(l->lock)
|
||||
* lock(l->lock)
|
||||
* dequeue(T3)
|
||||
* ==> wait list is empty
|
||||
* deboost()
|
||||
* unlock(l->lock)
|
||||
* lock(l->lock)
|
||||
* fixup_rt_mutex_waiters()
|
||||
* if (wait_list_empty(l) {
|
||||
* l->owner = owner
|
||||
* owner = l->owner & ~HAS_WAITERS;
|
||||
* ==> l->owner = T1
|
||||
* }
|
||||
* lock(l->lock)
|
||||
* rt_mutex_unlock(l) fixup_rt_mutex_waiters()
|
||||
* if (wait_list_empty(l) {
|
||||
* owner = l->owner & ~HAS_WAITERS;
|
||||
* cmpxchg(l->owner, T1, NULL)
|
||||
* ===> Success (l->owner = NULL)
|
||||
*
|
||||
* l->owner = owner
|
||||
* ==> l->owner = T1
|
||||
* }
|
||||
*
|
||||
* With the check for the waiter bit in place T3 on CPU2 will not
|
||||
* overwrite. All tasks fiddling with the waiters bit are
|
||||
* serialized by l->lock, so nothing else can modify the waiters
|
||||
* bit. If the bit is set then nothing can change l->owner either
|
||||
* so the simple RMW is safe. The cmpxchg() will simply fail if it
|
||||
* happens in the middle of the RMW because the waiters bit is
|
||||
* still set.
|
||||
*/
|
||||
owner = READ_ONCE(*p);
|
||||
if (owner & RT_MUTEX_HAS_WAITERS)
|
||||
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p)
|
||||
|
||||
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
|
||||
{
|
||||
return (struct task_struct *)
|
||||
((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
|
||||
unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
|
||||
|
||||
return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -111,10 +111,13 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
||||
{
|
||||
if (tg != &root_task_group)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We can only assume the task group can't go away on us if
|
||||
* autogroup_move_group() can see us on ->thread_group list.
|
||||
* If we race with autogroup_move_group() the caller can use the old
|
||||
* value of signal->autogroup but in this case sched_move_task() will
|
||||
* be called again before autogroup_kref_put().
|
||||
*
|
||||
* However, there is no way sched_autogroup_exit_task() could tell us
|
||||
* to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
|
||||
*/
|
||||
if (p->flags & PF_EXITING)
|
||||
return false;
|
||||
@ -122,6 +125,16 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
||||
return true;
|
||||
}
|
||||
|
||||
void sched_autogroup_exit_task(struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* We are going to call exit_notify() and autogroup_move_group() can't
|
||||
* see this thread after that: we can no longer use signal->autogroup.
|
||||
* See the PF_EXITING check in task_wants_autogroup().
|
||||
*/
|
||||
sched_move_task(p);
|
||||
}
|
||||
|
||||
static void
|
||||
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
||||
{
|
||||
@ -138,13 +151,20 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
||||
}
|
||||
|
||||
p->signal->autogroup = autogroup_kref_get(ag);
|
||||
|
||||
if (!READ_ONCE(sysctl_sched_autogroup_enabled))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We can't avoid sched_move_task() after we changed signal->autogroup,
|
||||
* this process can already run with task_group() == prev->tg or we can
|
||||
* race with cgroup code which can read autogroup = prev under rq->lock.
|
||||
* In the latter case for_each_thread() can not miss a migrating thread,
|
||||
* cpu_cgroup_attach() must not be possible after cgroup_exit() and it
|
||||
* can't be removed from thread list, we hold ->siglock.
|
||||
*
|
||||
* If an exiting thread was already removed from thread list we rely on
|
||||
* sched_autogroup_exit_task().
|
||||
*/
|
||||
for_each_thread(p, t)
|
||||
sched_move_task(t);
|
||||
out:
|
||||
|
||||
unlock_task_sighand(p, &flags);
|
||||
autogroup_kref_put(prev);
|
||||
}
|
||||
|
Reference in New Issue
Block a user