tasks: Add a count of task RCU users
Add a count of the number of RCU users (currently 1) of the task struct so that we can later add the scheduler case and get rid of the very subtle task_rcu_dereference(), and just use rcu_dereference(). As suggested by Oleg have the count overlap rcu_head so that no additional space in task_struct is required. Inspired-by: Linus Torvalds <torvalds@linux-foundation.org> Inspired-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King - ARM Linux admin <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/87woebdplt.fsf_-_@x220.int.ebiederm.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
42fd8baab3
commit
3fbd7ee285
@ -1147,7 +1147,10 @@ struct task_struct {
|
|||||||
|
|
||||||
struct tlbflush_unmap_batch tlb_ubc;
|
struct tlbflush_unmap_batch tlb_ubc;
|
||||||
|
|
||||||
|
union {
|
||||||
|
refcount_t rcu_users;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
|
};
|
||||||
|
|
||||||
/* Cache last used pipe for splice(): */
|
/* Cache last used pipe for splice(): */
|
||||||
struct pipe_inode_info *splice_pipe;
|
struct pipe_inode_info *splice_pipe;
|
||||||
|
@ -120,6 +120,7 @@ static inline void put_task_struct(struct task_struct *t)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct task_struct *task_rcu_dereference(struct task_struct **ptask);
|
struct task_struct *task_rcu_dereference(struct task_struct **ptask);
|
||||||
|
void put_task_struct_rcu_user(struct task_struct *task);
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
||||||
extern int arch_task_struct_size __read_mostly;
|
extern int arch_task_struct_size __read_mostly;
|
||||||
|
@ -182,6 +182,11 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
|
|||||||
put_task_struct(tsk);
|
put_task_struct(tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void put_task_struct_rcu_user(struct task_struct *task)
|
||||||
|
{
|
||||||
|
if (refcount_dec_and_test(&task->rcu_users))
|
||||||
|
call_rcu(&task->rcu, delayed_put_task_struct);
|
||||||
|
}
|
||||||
|
|
||||||
void release_task(struct task_struct *p)
|
void release_task(struct task_struct *p)
|
||||||
{
|
{
|
||||||
@ -222,7 +227,7 @@ repeat:
|
|||||||
|
|
||||||
write_unlock_irq(&tasklist_lock);
|
write_unlock_irq(&tasklist_lock);
|
||||||
release_thread(p);
|
release_thread(p);
|
||||||
call_rcu(&p->rcu, delayed_put_task_struct);
|
put_task_struct_rcu_user(p);
|
||||||
|
|
||||||
p = leader;
|
p = leader;
|
||||||
if (unlikely(zap_leader))
|
if (unlikely(zap_leader))
|
||||||
|
@ -902,10 +902,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
|||||||
if (orig->cpus_ptr == &orig->cpus_mask)
|
if (orig->cpus_ptr == &orig->cpus_mask)
|
||||||
tsk->cpus_ptr = &tsk->cpus_mask;
|
tsk->cpus_ptr = &tsk->cpus_mask;
|
||||||
|
|
||||||
/*
|
/* One for the user space visible state that goes away when reaped. */
|
||||||
* One for us, one for whoever does the "release_task()" (usually
|
refcount_set(&tsk->rcu_users, 1);
|
||||||
* parent)
|
/* One for the rcu users, and one for the scheduler */
|
||||||
*/
|
|
||||||
refcount_set(&tsk->usage, 2);
|
refcount_set(&tsk->usage, 2);
|
||||||
#ifdef CONFIG_BLK_DEV_IO_TRACE
|
#ifdef CONFIG_BLK_DEV_IO_TRACE
|
||||||
tsk->btrace_seq = 0;
|
tsk->btrace_seq = 0;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user