Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: - improve rwsem scalability - add uninitialized rwsem debugging check - reduce lockdep's stacktrace memory usage and add diagnostics - misc cleanups, code consolidation and constification * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: mutex: Fix up mutex_waiter usage locking/mutex: Use mutex flags macro instead of hard code locking/mutex: Make __mutex_owner static to mutex.c locking/qspinlock,x86: Clarify virt_spin_lock_key locking/rwsem: Check for operations on an uninitialized rwsem locking/rwsem: Make handoff writer optimistically spin on owner locking/lockdep: Report more stack trace statistics locking/lockdep: Reduce space occupied by stack traces stacktrace: Constify 'entries' arguments locking/lockdep: Make it clear that what lock_class::key points at is not modified
This commit is contained in:
@ -449,33 +449,101 @@ static void print_lockdep_off(const char *bug_msg)
|
||||
unsigned long nr_stack_trace_entries;
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
/**
|
||||
* struct lock_trace - single stack backtrace
|
||||
* @hash_entry: Entry in a stack_trace_hash[] list.
|
||||
* @hash: jhash() of @entries.
|
||||
* @nr_entries: Number of entries in @entries.
|
||||
* @entries: Actual stack backtrace.
|
||||
*/
|
||||
struct lock_trace {
|
||||
struct hlist_node hash_entry;
|
||||
u32 hash;
|
||||
u32 nr_entries;
|
||||
unsigned long entries[0] __aligned(sizeof(unsigned long));
|
||||
};
|
||||
#define LOCK_TRACE_SIZE_IN_LONGS \
|
||||
(sizeof(struct lock_trace) / sizeof(unsigned long))
|
||||
/*
|
||||
* Stack-trace: tightly packed array of stack backtrace
|
||||
* addresses. Protected by the graph_lock.
|
||||
* Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
|
||||
*/
|
||||
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
|
||||
static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
|
||||
|
||||
static int save_trace(struct lock_trace *trace)
|
||||
static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
|
||||
{
|
||||
unsigned long *entries = stack_trace + nr_stack_trace_entries;
|
||||
return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
|
||||
memcmp(t1->entries, t2->entries,
|
||||
t1->nr_entries * sizeof(t1->entries[0])) == 0;
|
||||
}
|
||||
|
||||
static struct lock_trace *save_trace(void)
|
||||
{
|
||||
struct lock_trace *trace, *t2;
|
||||
struct hlist_head *hash_head;
|
||||
u32 hash;
|
||||
unsigned int max_entries;
|
||||
|
||||
trace->offset = nr_stack_trace_entries;
|
||||
max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
|
||||
trace->nr_entries = stack_trace_save(entries, max_entries, 3);
|
||||
nr_stack_trace_entries += trace->nr_entries;
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
|
||||
BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
|
||||
|
||||
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
|
||||
trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
|
||||
max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
|
||||
LOCK_TRACE_SIZE_IN_LONGS;
|
||||
trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
|
||||
|
||||
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES -
|
||||
LOCK_TRACE_SIZE_IN_LONGS - 1) {
|
||||
if (!debug_locks_off_graph_unlock())
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
|
||||
dump_stack();
|
||||
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return 1;
|
||||
hash = jhash(trace->entries, trace->nr_entries *
|
||||
sizeof(trace->entries[0]), 0);
|
||||
trace->hash = hash;
|
||||
hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
|
||||
hlist_for_each_entry(t2, hash_head, hash_entry) {
|
||||
if (traces_identical(trace, t2))
|
||||
return t2;
|
||||
}
|
||||
nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
|
||||
hlist_add_head(&trace->hash_entry, hash_head);
|
||||
|
||||
return trace;
|
||||
}
|
||||
|
||||
/* Return the number of stack traces in the stack_trace[] array. */
|
||||
u64 lockdep_stack_trace_count(void)
|
||||
{
|
||||
struct lock_trace *trace;
|
||||
u64 c = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
|
||||
hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
|
||||
c++;
|
||||
}
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
/* Return the number of stack hash chains that have at least one stack trace. */
|
||||
u64 lockdep_stack_hash_count(void)
|
||||
{
|
||||
u64 c = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
|
||||
if (!hlist_empty(&stack_trace_hash[i]))
|
||||
c++;
|
||||
|
||||
return c;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -511,7 +579,7 @@ static const char *usage_str[] =
|
||||
};
|
||||
#endif
|
||||
|
||||
const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
|
||||
const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
|
||||
{
|
||||
return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
|
||||
}
|
||||
@ -1235,7 +1303,7 @@ static struct lock_list *alloc_list_entry(void)
|
||||
static int add_lock_to_list(struct lock_class *this,
|
||||
struct lock_class *links_to, struct list_head *head,
|
||||
unsigned long ip, int distance,
|
||||
struct lock_trace *trace)
|
||||
const struct lock_trace *trace)
|
||||
{
|
||||
struct lock_list *entry;
|
||||
/*
|
||||
@ -1249,7 +1317,7 @@ static int add_lock_to_list(struct lock_class *this,
|
||||
entry->class = this;
|
||||
entry->links_to = links_to;
|
||||
entry->distance = distance;
|
||||
entry->trace = *trace;
|
||||
entry->trace = trace;
|
||||
/*
|
||||
* Both allocation and removal are done under the graph lock; but
|
||||
* iteration is under RCU-sched; see look_up_lock_class() and
|
||||
@ -1470,11 +1538,10 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
|
||||
|
||||
}
|
||||
|
||||
static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
|
||||
static void print_lock_trace(const struct lock_trace *trace,
|
||||
unsigned int spaces)
|
||||
{
|
||||
unsigned long *entries = stack_trace + trace->offset;
|
||||
|
||||
stack_trace_print(entries, trace->nr_entries, spaces);
|
||||
stack_trace_print(trace->entries, trace->nr_entries, spaces);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1489,7 +1556,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
|
||||
printk("\n-> #%u", depth);
|
||||
print_lock_name(target->class);
|
||||
printk(KERN_CONT ":\n");
|
||||
print_lock_trace(&target->trace, 6);
|
||||
print_lock_trace(target->trace, 6);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1592,7 +1659,8 @@ static noinline void print_circular_bug(struct lock_list *this,
|
||||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
||||
return;
|
||||
|
||||
if (!save_trace(&this->trace))
|
||||
this->trace = save_trace();
|
||||
if (!this->trace)
|
||||
return;
|
||||
|
||||
depth = get_lock_depth(target);
|
||||
@ -1715,7 +1783,7 @@ check_path(struct lock_class *target, struct lock_list *src_entry,
|
||||
*/
|
||||
static noinline int
|
||||
check_noncircular(struct held_lock *src, struct held_lock *target,
|
||||
struct lock_trace *trace)
|
||||
struct lock_trace **const trace)
|
||||
{
|
||||
int ret;
|
||||
struct lock_list *uninitialized_var(target_entry);
|
||||
@ -1729,13 +1797,13 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
|
||||
ret = check_path(hlock_class(target), &src_entry, &target_entry);
|
||||
|
||||
if (unlikely(!ret)) {
|
||||
if (!trace->nr_entries) {
|
||||
if (!*trace) {
|
||||
/*
|
||||
* If save_trace fails here, the printing might
|
||||
* trigger a WARN but because of the !nr_entries it
|
||||
* should not do bad things.
|
||||
*/
|
||||
save_trace(trace);
|
||||
*trace = save_trace();
|
||||
}
|
||||
|
||||
print_circular_bug(&src_entry, target_entry, src, target);
|
||||
@ -1859,7 +1927,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
|
||||
|
||||
len += printk("%*s %s", depth, "", usage_str[bit]);
|
||||
len += printk(KERN_CONT " at:\n");
|
||||
print_lock_trace(class->usage_traces + bit, len);
|
||||
print_lock_trace(class->usage_traces[bit], len);
|
||||
}
|
||||
}
|
||||
printk("%*s }\n", depth, "");
|
||||
@ -1884,7 +1952,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
|
||||
do {
|
||||
print_lock_class_header(entry->class, depth);
|
||||
printk("%*s ... acquired at:\n", depth, "");
|
||||
print_lock_trace(&entry->trace, 2);
|
||||
print_lock_trace(entry->trace, 2);
|
||||
printk("\n");
|
||||
|
||||
if (depth == 0 && (entry != root)) {
|
||||
@ -1995,14 +2063,14 @@ print_bad_irq_dependency(struct task_struct *curr,
|
||||
print_lock_name(backwards_entry->class);
|
||||
pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
|
||||
|
||||
print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
|
||||
print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
|
||||
|
||||
pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
|
||||
print_lock_name(forwards_entry->class);
|
||||
pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
|
||||
pr_warn("...");
|
||||
|
||||
print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
|
||||
print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
|
||||
|
||||
pr_warn("\nother info that might help us debug this:\n\n");
|
||||
print_irq_lock_scenario(backwards_entry, forwards_entry,
|
||||
@ -2011,13 +2079,15 @@ print_bad_irq_dependency(struct task_struct *curr,
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
|
||||
if (!save_trace(&prev_root->trace))
|
||||
prev_root->trace = save_trace();
|
||||
if (!prev_root->trace)
|
||||
return;
|
||||
print_shortest_lock_dependencies(backwards_entry, prev_root);
|
||||
|
||||
pr_warn("\nthe dependencies between the lock to be acquired");
|
||||
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
|
||||
if (!save_trace(&next_root->trace))
|
||||
next_root->trace = save_trace();
|
||||
if (!next_root->trace)
|
||||
return;
|
||||
print_shortest_lock_dependencies(forwards_entry, next_root);
|
||||
|
||||
@ -2369,7 +2439,8 @@ check_deadlock(struct task_struct *curr, struct held_lock *next)
|
||||
*/
|
||||
static int
|
||||
check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||
struct held_lock *next, int distance, struct lock_trace *trace)
|
||||
struct held_lock *next, int distance,
|
||||
struct lock_trace **const trace)
|
||||
{
|
||||
struct lock_list *entry;
|
||||
int ret;
|
||||
@ -2444,8 +2515,11 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||
return ret;
|
||||
#endif
|
||||
|
||||
if (!trace->nr_entries && !save_trace(trace))
|
||||
return 0;
|
||||
if (!*trace) {
|
||||
*trace = save_trace();
|
||||
if (!*trace)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, all validations passed, add the new lock
|
||||
@ -2453,14 +2527,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||
*/
|
||||
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
|
||||
&hlock_class(prev)->locks_after,
|
||||
next->acquire_ip, distance, trace);
|
||||
next->acquire_ip, distance, *trace);
|
||||
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
|
||||
&hlock_class(next)->locks_before,
|
||||
next->acquire_ip, distance, trace);
|
||||
next->acquire_ip, distance, *trace);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
@ -2476,7 +2550,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||
static int
|
||||
check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
||||
{
|
||||
struct lock_trace trace = { .nr_entries = 0 };
|
||||
struct lock_trace *trace = NULL;
|
||||
int depth = curr->lockdep_depth;
|
||||
struct held_lock *hlock;
|
||||
|
||||
@ -3015,7 +3089,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
|
||||
print_lock(this);
|
||||
|
||||
pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
|
||||
print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
|
||||
print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
|
||||
|
||||
print_irqtrace_events(curr);
|
||||
pr_warn("\nother info that might help us debug this:\n");
|
||||
@ -3096,7 +3170,8 @@ print_irq_inversion_bug(struct task_struct *curr,
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
|
||||
if (!save_trace(&root->trace))
|
||||
root->trace = save_trace();
|
||||
if (!root->trace)
|
||||
return;
|
||||
print_shortest_lock_dependencies(other, root);
|
||||
|
||||
@ -3580,7 +3655,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
|
||||
hlock_class(this)->usage_mask |= new_mask;
|
||||
|
||||
if (!save_trace(hlock_class(this)->usage_traces + new_bit))
|
||||
if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
|
||||
return 0;
|
||||
|
||||
switch (new_bit) {
|
||||
@ -5157,6 +5232,12 @@ void __init lockdep_init(void)
|
||||
) / 1024
|
||||
);
|
||||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
|
||||
printk(" memory used for stack traces: %zu kB\n",
|
||||
(sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
|
||||
);
|
||||
#endif
|
||||
|
||||
printk(" per task-struct memory footprint: %zu bytes\n",
|
||||
sizeof(((struct task_struct *)NULL)->held_locks));
|
||||
}
|
||||
|
Reference in New Issue
Block a user