sched: Use lockdep-based checking on rcu_dereference()
Update the rcu_dereference() usages to take advantage of the new lockdep-based checking. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1266887105-1528-6-git-send-email-paulmck@linux.vnet.ibm.com> [ -v2: fix allmodconfig missing symbol export build failure on x86 ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a898def29e
commit
d11c563dd2
@ -28,6 +28,7 @@ struct css_id;
|
|||||||
extern int cgroup_init_early(void);
|
extern int cgroup_init_early(void);
|
||||||
extern int cgroup_init(void);
|
extern int cgroup_init(void);
|
||||||
extern void cgroup_lock(void);
|
extern void cgroup_lock(void);
|
||||||
|
extern int cgroup_lock_is_held(void);
|
||||||
extern bool cgroup_lock_live_group(struct cgroup *cgrp);
|
extern bool cgroup_lock_live_group(struct cgroup *cgrp);
|
||||||
extern void cgroup_unlock(void);
|
extern void cgroup_unlock(void);
|
||||||
extern void cgroup_fork(struct task_struct *p);
|
extern void cgroup_fork(struct task_struct *p);
|
||||||
@ -486,7 +487,9 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
|
|||||||
static inline struct cgroup_subsys_state *task_subsys_state(
|
static inline struct cgroup_subsys_state *task_subsys_state(
|
||||||
struct task_struct *task, int subsys_id)
|
struct task_struct *task, int subsys_id)
|
||||||
{
|
{
|
||||||
return rcu_dereference(task->cgroups->subsys[subsys_id]);
|
return rcu_dereference_check(task->cgroups->subsys[subsys_id],
|
||||||
|
rcu_read_lock_held() ||
|
||||||
|
cgroup_lock_is_held());
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct cgroup* task_cgroup(struct task_struct *task,
|
static inline struct cgroup* task_cgroup(struct task_struct *task,
|
||||||
|
@ -280,7 +280,7 @@ static inline void put_cred(const struct cred *_cred)
|
|||||||
* task or by holding tasklist_lock to prevent it from being unlinked.
|
* task or by holding tasklist_lock to prevent it from being unlinked.
|
||||||
*/
|
*/
|
||||||
#define __task_cred(task) \
|
#define __task_cred(task) \
|
||||||
((const struct cred *)(rcu_dereference((task)->real_cred)))
|
((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock))))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get_task_cred - Get another task's objective credentials
|
* get_task_cred - Get another task's objective credentials
|
||||||
|
@ -416,7 +416,9 @@ static noinline void __init_refok rest_init(void)
|
|||||||
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
|
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
|
||||||
numa_default_policy();
|
numa_default_policy();
|
||||||
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
|
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
|
||||||
|
rcu_read_lock();
|
||||||
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
|
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
|
||||||
|
rcu_read_unlock();
|
||||||
unlock_kernel();
|
unlock_kernel();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -166,6 +166,20 @@ static DEFINE_SPINLOCK(hierarchy_id_lock);
|
|||||||
*/
|
*/
|
||||||
static int need_forkexit_callback __read_mostly;
|
static int need_forkexit_callback __read_mostly;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PROVE_LOCKING
|
||||||
|
int cgroup_lock_is_held(void)
|
||||||
|
{
|
||||||
|
return lockdep_is_held(&cgroup_mutex);
|
||||||
|
}
|
||||||
|
#else /* #ifdef CONFIG_PROVE_LOCKING */
|
||||||
|
int cgroup_lock_is_held(void)
|
||||||
|
{
|
||||||
|
return mutex_is_locked(&cgroup_mutex);
|
||||||
|
}
|
||||||
|
#endif /* #else #ifdef CONFIG_PROVE_LOCKING */
|
||||||
|
|
||||||
|
EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
|
||||||
|
|
||||||
/* convenient tests for these bits */
|
/* convenient tests for these bits */
|
||||||
inline int cgroup_is_removed(const struct cgroup *cgrp)
|
inline int cgroup_is_removed(const struct cgroup *cgrp)
|
||||||
{
|
{
|
||||||
|
@ -85,7 +85,9 @@ static void __exit_signal(struct task_struct *tsk)
|
|||||||
BUG_ON(!sig);
|
BUG_ON(!sig);
|
||||||
BUG_ON(!atomic_read(&sig->count));
|
BUG_ON(!atomic_read(&sig->count));
|
||||||
|
|
||||||
sighand = rcu_dereference(tsk->sighand);
|
sighand = rcu_dereference_check(tsk->sighand,
|
||||||
|
rcu_read_lock_held() ||
|
||||||
|
lockdep_is_held(&tasklist_lock));
|
||||||
spin_lock(&sighand->siglock);
|
spin_lock(&sighand->siglock);
|
||||||
|
|
||||||
posix_cpu_timers_exit(tsk);
|
posix_cpu_timers_exit(tsk);
|
||||||
@ -170,8 +172,10 @@ void release_task(struct task_struct * p)
|
|||||||
repeat:
|
repeat:
|
||||||
tracehook_prepare_release_task(p);
|
tracehook_prepare_release_task(p);
|
||||||
/* don't need to get the RCU readlock here - the process is dead and
|
/* don't need to get the RCU readlock here - the process is dead and
|
||||||
* can't be modifying its own credentials */
|
* can't be modifying its own credentials. But shut RCU-lockdep up */
|
||||||
|
rcu_read_lock();
|
||||||
atomic_dec(&__task_cred(p)->user->processes);
|
atomic_dec(&__task_cred(p)->user->processes);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
proc_flush_task(p);
|
proc_flush_task(p);
|
||||||
|
|
||||||
@ -473,9 +477,11 @@ static void close_files(struct files_struct * files)
|
|||||||
/*
|
/*
|
||||||
* It is safe to dereference the fd table without RCU or
|
* It is safe to dereference the fd table without RCU or
|
||||||
* ->file_lock because this is the last reference to the
|
* ->file_lock because this is the last reference to the
|
||||||
* files structure.
|
* files structure. But use RCU to shut RCU-lockdep up.
|
||||||
*/
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
fdt = files_fdtable(files);
|
fdt = files_fdtable(files);
|
||||||
|
rcu_read_unlock();
|
||||||
for (;;) {
|
for (;;) {
|
||||||
unsigned long set;
|
unsigned long set;
|
||||||
i = j * __NFDBITS;
|
i = j * __NFDBITS;
|
||||||
@ -521,10 +527,12 @@ void put_files_struct(struct files_struct *files)
|
|||||||
* at the end of the RCU grace period. Otherwise,
|
* at the end of the RCU grace period. Otherwise,
|
||||||
* you can free files immediately.
|
* you can free files immediately.
|
||||||
*/
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
fdt = files_fdtable(files);
|
fdt = files_fdtable(files);
|
||||||
if (fdt != &files->fdtab)
|
if (fdt != &files->fdtab)
|
||||||
kmem_cache_free(files_cachep, files);
|
kmem_cache_free(files_cachep, files);
|
||||||
free_fdtable(fdt);
|
free_fdtable(fdt);
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,6 +86,7 @@ int max_threads; /* tunable limit on nr_threads */
|
|||||||
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
|
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
|
||||||
|
|
||||||
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
|
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
|
||||||
|
EXPORT_SYMBOL_GPL(tasklist_lock);
|
||||||
|
|
||||||
int nr_processes(void)
|
int nr_processes(void)
|
||||||
{
|
{
|
||||||
|
@ -78,10 +78,10 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
|
|||||||
int ret = NOTIFY_DONE;
|
int ret = NOTIFY_DONE;
|
||||||
struct notifier_block *nb, *next_nb;
|
struct notifier_block *nb, *next_nb;
|
||||||
|
|
||||||
nb = rcu_dereference(*nl);
|
nb = rcu_dereference_raw(*nl);
|
||||||
|
|
||||||
while (nb && nr_to_call) {
|
while (nb && nr_to_call) {
|
||||||
next_nb = rcu_dereference(nb->next);
|
next_nb = rcu_dereference_raw(nb->next);
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_NOTIFIERS
|
#ifdef CONFIG_DEBUG_NOTIFIERS
|
||||||
if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
|
if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
|
||||||
@ -309,7 +309,7 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
|
|||||||
* racy then it does not matter what the result of the test
|
* racy then it does not matter what the result of the test
|
||||||
* is, we re-check the list after having taken the lock anyway:
|
* is, we re-check the list after having taken the lock anyway:
|
||||||
*/
|
*/
|
||||||
if (rcu_dereference(nh->head)) {
|
if (rcu_dereference_raw(nh->head)) {
|
||||||
down_read(&nh->rwsem);
|
down_read(&nh->rwsem);
|
||||||
ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
|
ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
|
||||||
nr_calls);
|
nr_calls);
|
||||||
|
@ -367,7 +367,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
|
|||||||
struct task_struct *result = NULL;
|
struct task_struct *result = NULL;
|
||||||
if (pid) {
|
if (pid) {
|
||||||
struct hlist_node *first;
|
struct hlist_node *first;
|
||||||
first = rcu_dereference(pid->tasks[type].first);
|
first = rcu_dereference_check(pid->tasks[type].first, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock));
|
||||||
if (first)
|
if (first)
|
||||||
result = hlist_entry(first, struct task_struct, pids[(type)].node);
|
result = hlist_entry(first, struct task_struct, pids[(type)].node);
|
||||||
}
|
}
|
||||||
|
@ -645,6 +645,11 @@ static inline int cpu_of(struct rq *rq)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define for_each_domain_rd(p) \
|
||||||
|
rcu_dereference_check((p), \
|
||||||
|
rcu_read_lock_sched_held() || \
|
||||||
|
lockdep_is_held(&sched_domains_mutex))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
|
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
|
||||||
* See detach_destroy_domains: synchronize_sched for details.
|
* See detach_destroy_domains: synchronize_sched for details.
|
||||||
@ -653,7 +658,7 @@ static inline int cpu_of(struct rq *rq)
|
|||||||
* preempt-disabled sections.
|
* preempt-disabled sections.
|
||||||
*/
|
*/
|
||||||
#define for_each_domain(cpu, __sd) \
|
#define for_each_domain(cpu, __sd) \
|
||||||
for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
|
for (__sd = for_each_domain_rd(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
|
||||||
|
|
||||||
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
|
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
|
||||||
#define this_rq() (&__get_cpu_var(runqueues))
|
#define this_rq() (&__get_cpu_var(runqueues))
|
||||||
@ -1531,7 +1536,7 @@ static unsigned long target_load(int cpu, int type)
|
|||||||
|
|
||||||
static struct sched_group *group_of(int cpu)
|
static struct sched_group *group_of(int cpu)
|
||||||
{
|
{
|
||||||
struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
|
struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd);
|
||||||
|
|
||||||
if (!sd)
|
if (!sd)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -4888,7 +4893,7 @@ static void run_rebalance_domains(struct softirq_action *h)
|
|||||||
|
|
||||||
static inline int on_null_domain(int cpu)
|
static inline int on_null_domain(int cpu)
|
||||||
{
|
{
|
||||||
return !rcu_dereference(cpu_rq(cpu)->sd);
|
return !rcu_dereference_sched(cpu_rq(cpu)->sd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user