sched/fair: Add tmp_alone_branch assertion
commit 5d299eabea5a251fbf66e8277704b874bbba92dc upstream. The magic in list_add_leaf_cfs_rq() requires that at the end of enqueue_task_fair(): rq->tmp_alone_branch == &rq->lead_cfs_rq_list If this is violated, list integrity is compromised for list entries and the tmp_alone_branch pointer might dangle. Also, reflow list_add_leaf_cfs_rq() while there. This looses one indentation level and generates a form that's convenient for the next patch. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Janne Huttunen <janne.huttunen@nokia.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
f0a31232b3
commit
6c11530ea4
@ -284,64 +284,69 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
|
||||
|
||||
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
if (!cfs_rq->on_list) {
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
int cpu = cpu_of(rq);
|
||||
/*
|
||||
* Ensure we either appear before our parent (if already
|
||||
* enqueued) or force our parent to appear after us when it is
|
||||
* enqueued. The fact that we always enqueue bottom-up
|
||||
* reduces this to two cases and a special case for the root
|
||||
* cfs_rq. Furthermore, it also means that we will always reset
|
||||
* tmp_alone_branch either when the branch is connected
|
||||
* to a tree or when we reach the beg of the tree
|
||||
*/
|
||||
if (cfs_rq->tg->parent &&
|
||||
cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
|
||||
/*
|
||||
* If parent is already on the list, we add the child
|
||||
* just before. Thanks to circular linked property of
|
||||
* the list, this means to put the child at the tail
|
||||
* of the list that starts by parent.
|
||||
*/
|
||||
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
|
||||
&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
|
||||
/*
|
||||
* The branch is now connected to its tree so we can
|
||||
* reset tmp_alone_branch to the beginning of the
|
||||
* list.
|
||||
*/
|
||||
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
|
||||
} else if (!cfs_rq->tg->parent) {
|
||||
/*
|
||||
* cfs rq without parent should be put
|
||||
* at the tail of the list.
|
||||
*/
|
||||
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
|
||||
&rq->leaf_cfs_rq_list);
|
||||
/*
|
||||
* We have reach the beg of a tree so we can reset
|
||||
* tmp_alone_branch to the beginning of the list.
|
||||
*/
|
||||
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
|
||||
} else {
|
||||
/*
|
||||
* The parent has not already been added so we want to
|
||||
* make sure that it will be put after us.
|
||||
* tmp_alone_branch points to the beg of the branch
|
||||
* where we will add parent.
|
||||
*/
|
||||
list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
|
||||
rq->tmp_alone_branch);
|
||||
/*
|
||||
* update tmp_alone_branch to points to the new beg
|
||||
* of the branch
|
||||
*/
|
||||
rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
|
||||
}
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
int cpu = cpu_of(rq);
|
||||
|
||||
cfs_rq->on_list = 1;
|
||||
if (cfs_rq->on_list)
|
||||
return;
|
||||
|
||||
cfs_rq->on_list = 1;
|
||||
|
||||
/*
|
||||
* Ensure we either appear before our parent (if already
|
||||
* enqueued) or force our parent to appear after us when it is
|
||||
* enqueued. The fact that we always enqueue bottom-up
|
||||
* reduces this to two cases and a special case for the root
|
||||
* cfs_rq. Furthermore, it also means that we will always reset
|
||||
* tmp_alone_branch either when the branch is connected
|
||||
* to a tree or when we reach the top of the tree
|
||||
*/
|
||||
if (cfs_rq->tg->parent &&
|
||||
cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
|
||||
/*
|
||||
* If parent is already on the list, we add the child
|
||||
* just before. Thanks to circular linked property of
|
||||
* the list, this means to put the child at the tail
|
||||
* of the list that starts by parent.
|
||||
*/
|
||||
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
|
||||
&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
|
||||
/*
|
||||
* The branch is now connected to its tree so we can
|
||||
* reset tmp_alone_branch to the beginning of the
|
||||
* list.
|
||||
*/
|
||||
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!cfs_rq->tg->parent) {
|
||||
/*
|
||||
* cfs rq without parent should be put
|
||||
* at the tail of the list.
|
||||
*/
|
||||
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
|
||||
&rq->leaf_cfs_rq_list);
|
||||
/*
|
||||
* We have reach the top of a tree so we can reset
|
||||
* tmp_alone_branch to the beginning of the list.
|
||||
*/
|
||||
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The parent has not already been added so we want to
|
||||
* make sure that it will be put after us.
|
||||
* tmp_alone_branch points to the begin of the branch
|
||||
* where we will add parent.
|
||||
*/
|
||||
list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
|
||||
/*
|
||||
* update tmp_alone_branch to points to the new begin
|
||||
* of the branch
|
||||
*/
|
||||
rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
|
||||
}
|
||||
|
||||
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
@ -352,7 +357,12 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
}
|
||||
}
|
||||
|
||||
/* Iterate through all leaf cfs_rq's on a runqueue: */
|
||||
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
|
||||
{
|
||||
SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
|
||||
}
|
||||
|
||||
/* Iterate through all cfs_rq's on a runqueue in bottom-up order */
|
||||
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
||||
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
|
||||
|
||||
@ -446,6 +456,10 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
|
||||
{
|
||||
}
|
||||
|
||||
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
||||
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
|
||||
|
||||
@ -5160,6 +5174,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (!se)
|
||||
add_nr_running(rq, 1);
|
||||
|
||||
assert_list_leaf_cfs_rq(rq);
|
||||
|
||||
hrtick_update(rq);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user