Merge tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull debugobjects fixes from Thomas Gleixner: "Two fixes for debugobjects: - Prevent the allocation path from waking up kswapd. That's a long standing issue due to the GFP_ATOMIC allocation flag. As debug objects can be invoked from pretty much any context waking kswapd can end up in arbitrary lock chains versus the waitqueue lock - Correct the explicit lockdep wait-type violation in debug_object_fill_pool()" * tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: debugobjects: Don't wake up kswapd from fill_pool() debugobjects,locking: Annotate debug_object_fill_pool() wait type violation
This commit is contained in:
@ -2263,6 +2263,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask)
|
||||
|
||||
static inline bool usage_skip(struct lock_list *entry, void *mask)
|
||||
{
|
||||
if (entry->class->lock_type == LD_LOCK_NORMAL)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Skip local_lock() for irq inversion detection.
|
||||
*
|
||||
@ -2289,14 +2292,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
|
||||
* As a result, we will skip local_lock(), when we search for irq
|
||||
* inversion bugs.
|
||||
*/
|
||||
if (entry->class->lock_type == LD_LOCK_PERCPU) {
|
||||
if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
|
||||
return false;
|
||||
if (entry->class->lock_type == LD_LOCK_PERCPU &&
|
||||
DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
/*
|
||||
* Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually
|
||||
* a lock and only used to override the wait_type.
|
||||
*/
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4768,7 +4773,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
|
||||
|
||||
for (; depth < curr->lockdep_depth; depth++) {
|
||||
struct held_lock *prev = curr->held_locks + depth;
|
||||
u8 prev_inner = hlock_class(prev)->wait_type_inner;
|
||||
struct lock_class *class = hlock_class(prev);
|
||||
u8 prev_inner = class->wait_type_inner;
|
||||
|
||||
if (prev_inner) {
|
||||
/*
|
||||
@ -4778,6 +4784,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
|
||||
* Also due to trylocks.
|
||||
*/
|
||||
curr_inner = min(curr_inner, prev_inner);
|
||||
|
||||
/*
|
||||
* Allow override for annotations -- this is typically
|
||||
* only valid/needed for code that only exists when
|
||||
* CONFIG_PREEMPT_RT=n.
|
||||
*/
|
||||
if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE))
|
||||
curr_inner = prev_inner;
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user