2021-08-16 00:29:00 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/*
* rtmutex API
*/
# include <linux/spinlock.h>
# include <linux/export.h>
# define RT_MUTEX_BUILD_MUTEX
# define WW_RT
# include "rtmutex.c"
kernel/locking: Add context to ww_mutex_trylock()
i915 will soon gain an eviction path that trylock a whole lot of locks
for eviction, getting dmesg failures like below:
BUG: MAX_LOCK_DEPTH too low!
turning off the locking correctness validator.
depth: 48 max: 48!
48 locks held by i915_selftest/5776:
#0: ffff888101a79240 (&dev->mutex){....}-{3:3}, at: __driver_attach+0x88/0x160
#1: ffffc900009778c0 (reservation_ww_class_acquire){+.+.}-{0:0}, at: i915_vma_pin.constprop.63+0x39/0x1b0 [i915]
#2: ffff88800cf74de8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_vma_pin.constprop.63+0x5f/0x1b0 [i915]
#3: ffff88810c7f9e38 (&vm->mutex/1){+.+.}-{3:3}, at: i915_vma_pin_ww+0x1c4/0x9d0 [i915]
#4: ffff88810bad5768 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
#5: ffff88810bad60e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
...
#46: ffff88811964d768 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
#47: ffff88811964e0e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
INFO: lockdep is turned off.
Fixing eviction to nest into ww_class_acquire is a high priority, but
it requires a rework of the entire driver, which can only be done one
step at a time.
As an intermediate solution, add an acquire context to
ww_mutex_trylock, which allows us to do proper nesting annotations on
the trylocks, making the above lockdep splat disappear.
This is also useful in regulator_lock_nested, which may avoid dropping
regulator_nesting_mutex in the uncontended path, so use it there.
TTM may be another user for this, where we could lock a buffer in a
fastpath with list locks held, without dropping all locks we hold.
[peterz: rework actual ww_mutex_trylock() implementations]
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/YUBGPdDDjKlxAuXJ@hirez.programming.kicks-ass.net
2021-09-09 12:32:18 +03:00
int ww_mutex_trylock ( struct ww_mutex * lock , struct ww_acquire_ctx * ww_ctx )
{
struct rt_mutex * rtm = & lock - > base ;
if ( ! ww_ctx )
return rt_mutex_trylock ( rtm ) ;
/*
* Reset the wounded flag after a kill . No other process can
* race and wound us here , since they can ' t have a valid owner
* pointer if we don ' t have any locks held .
*/
if ( ww_ctx - > acquired = = 0 )
ww_ctx - > wounded = 0 ;
if ( __rt_mutex_trylock ( & rtm - > rtmutex ) ) {
ww_mutex_set_context_fastpath ( lock , ww_ctx ) ;
2021-11-04 15:27:06 +03:00
mutex_acquire_nest ( & rtm - > dep_map , 0 , 1 , & ww_ctx - > dep_map , _RET_IP_ ) ;
kernel/locking: Add context to ww_mutex_trylock()
i915 will soon gain an eviction path that trylock a whole lot of locks
for eviction, getting dmesg failures like below:
BUG: MAX_LOCK_DEPTH too low!
turning off the locking correctness validator.
depth: 48 max: 48!
48 locks held by i915_selftest/5776:
#0: ffff888101a79240 (&dev->mutex){....}-{3:3}, at: __driver_attach+0x88/0x160
#1: ffffc900009778c0 (reservation_ww_class_acquire){+.+.}-{0:0}, at: i915_vma_pin.constprop.63+0x39/0x1b0 [i915]
#2: ffff88800cf74de8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_vma_pin.constprop.63+0x5f/0x1b0 [i915]
#3: ffff88810c7f9e38 (&vm->mutex/1){+.+.}-{3:3}, at: i915_vma_pin_ww+0x1c4/0x9d0 [i915]
#4: ffff88810bad5768 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
#5: ffff88810bad60e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
...
#46: ffff88811964d768 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
#47: ffff88811964e0e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
INFO: lockdep is turned off.
Fixing eviction to nest into ww_class_acquire is a high priority, but
it requires a rework of the entire driver, which can only be done one
step at a time.
As an intermediate solution, add an acquire context to
ww_mutex_trylock, which allows us to do proper nesting annotations on
the trylocks, making the above lockdep splat disappear.
This is also useful in regulator_lock_nested, which may avoid dropping
regulator_nesting_mutex in the uncontended path, so use it there.
TTM may be another user for this, where we could lock a buffer in a
fastpath with list locks held, without dropping all locks we hold.
[peterz: rework actual ww_mutex_trylock() implementations]
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/YUBGPdDDjKlxAuXJ@hirez.programming.kicks-ass.net
2021-09-09 12:32:18 +03:00
return 1 ;
}
return 0 ;
}
EXPORT_SYMBOL ( ww_mutex_trylock ) ;
2021-08-16 00:29:00 +03:00
static int __sched
__ww_rt_mutex_lock ( struct ww_mutex * lock , struct ww_acquire_ctx * ww_ctx ,
unsigned int state , unsigned long ip )
{
struct lockdep_map __maybe_unused * nest_lock = NULL ;
struct rt_mutex * rtm = & lock - > base ;
int ret ;
might_sleep ( ) ;
if ( ww_ctx ) {
if ( unlikely ( ww_ctx = = READ_ONCE ( lock - > ctx ) ) )
return - EALREADY ;
/*
* Reset the wounded flag after a kill . No other process can
* race and wound us here , since they can ' t have a valid owner
* pointer if we don ' t have any locks held .
*/
if ( ww_ctx - > acquired = = 0 )
ww_ctx - > wounded = 0 ;
# ifdef CONFIG_DEBUG_LOCK_ALLOC
nest_lock = & ww_ctx - > dep_map ;
# endif
}
mutex_acquire_nest ( & rtm - > dep_map , 0 , 0 , nest_lock , ip ) ;
if ( likely ( rt_mutex_cmpxchg_acquire ( & rtm - > rtmutex , NULL , current ) ) ) {
if ( ww_ctx )
ww_mutex_set_context_fastpath ( lock , ww_ctx ) ;
return 0 ;
}
ret = rt_mutex_slowlock ( & rtm - > rtmutex , ww_ctx , state ) ;
if ( ret )
mutex_release ( & rtm - > dep_map , ip ) ;
return ret ;
}
int __sched
ww_mutex_lock ( struct ww_mutex * lock , struct ww_acquire_ctx * ctx )
{
return __ww_rt_mutex_lock ( lock , ctx , TASK_UNINTERRUPTIBLE , _RET_IP_ ) ;
}
EXPORT_SYMBOL ( ww_mutex_lock ) ;
int __sched
ww_mutex_lock_interruptible ( struct ww_mutex * lock , struct ww_acquire_ctx * ctx )
{
return __ww_rt_mutex_lock ( lock , ctx , TASK_INTERRUPTIBLE , _RET_IP_ ) ;
}
EXPORT_SYMBOL ( ww_mutex_lock_interruptible ) ;
void __sched ww_mutex_unlock ( struct ww_mutex * lock )
{
struct rt_mutex * rtm = & lock - > base ;
__ww_mutex_unlock ( lock ) ;
mutex_release ( & rtm - > dep_map , _RET_IP_ ) ;
__rt_mutex_unlock ( & rtm - > rtmutex ) ;
}
EXPORT_SYMBOL ( ww_mutex_unlock ) ;