2013-11-19 12:10:12 -05:00
/*
* Copyright ( C ) 2014 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
2018-10-22 14:31:22 +02:00
# include <drm/drm_atomic.h>
2013-11-19 12:10:12 -05:00
# include <drm/drm_crtc.h>
2019-05-26 19:35:35 +02:00
# include <drm/drm_device.h>
2013-11-19 12:10:12 -05:00
# include <drm/drm_modeset_lock.h>
2021-10-01 12:14:44 +03:00
# include <drm/drm_print.h>
2013-11-19 12:10:12 -05:00
/**
* DOC : kms locking
*
* As KMS moves toward more fine grained locking , and atomic ioctl where
* userspace can indirectly control locking order , it becomes necessary
2016-05-30 11:10:49 +02:00
* to use & ww_mutex and acquire - contexts to avoid deadlocks . But because
2013-11-19 12:10:12 -05:00
* the locking is more distributed around the driver code , we want a bit
* of extra utility / tracking out of our acquire - ctx . This is provided
2017-01-25 07:26:45 +01:00
* by & struct drm_modeset_lock and & struct drm_modeset_acquire_ctx .
2013-11-19 12:10:12 -05:00
*
2019-04-10 08:32:41 -03:00
* For basic principles of & ww_mutex , see : Documentation / locking / ww - mutex - design . rst
2013-11-19 12:10:12 -05:00
*
2016-05-31 22:55:13 +02:00
* The basic usage pattern is to : :
2013-11-19 12:10:12 -05:00
*
2017-09-12 15:37:44 +02:00
* drm_modeset_acquire_init ( ctx , DRM_MODESET_ACQUIRE_INTERRUPTIBLE )
2015-11-25 18:07:55 +01:00
* retry :
2013-11-19 12:10:12 -05:00
* foreach ( lock in random_ordered_set_of_locks ) {
2017-09-12 15:37:44 +02:00
* ret = drm_modeset_lock ( lock , ctx )
2015-11-25 18:07:55 +01:00
* if ( ret = = - EDEADLK ) {
2017-09-12 15:37:44 +02:00
* ret = drm_modeset_backoff ( ctx ) ;
* if ( ! ret )
* goto retry ;
2015-11-25 18:07:55 +01:00
* }
2017-09-12 15:37:44 +02:00
* if ( ret )
* goto out ;
2013-11-19 12:10:12 -05:00
* }
* . . . do stuff . . .
2017-09-12 15:37:44 +02:00
* out :
* drm_modeset_drop_locks ( ctx ) ;
* drm_modeset_acquire_fini ( ctx ) ;
2016-05-30 11:10:49 +02:00
*
2018-11-29 10:04:17 -05:00
* For convenience this control flow is implemented in
* DRM_MODESET_LOCK_ALL_BEGIN ( ) and DRM_MODESET_LOCK_ALL_END ( ) for the case
* where all modeset locks need to be taken through drm_modeset_lock_all_ctx ( ) .
*
2017-07-20 17:07:48 +01:00
* If all that is needed is a single modeset lock , then the & struct
* drm_modeset_acquire_ctx is not needed and the locking can be simplified
2017-09-12 15:37:44 +02:00
* by passing a NULL instead of ctx in the drm_modeset_lock ( ) call or
* calling drm_modeset_lock_single_interruptible ( ) . To unlock afterwards
* call drm_modeset_unlock ( ) .
2017-07-20 17:07:48 +01:00
*
* On top of these per - object locks using & ww_mutex there ' s also an overall
2017-01-25 07:26:45 +01:00
* & drm_mode_config . mutex , for protecting everything else . Mostly this means
2016-11-29 10:24:40 +01:00
* probe state of connectors , and preventing hotplug add / removal of connectors .
2016-05-30 11:10:49 +02:00
*
2016-11-29 10:24:40 +01:00
* Finally there ' s a bunch of dedicated locks to protect drm core internal
* lists and lookup data structures .
2013-11-19 12:10:12 -05:00
*/
2016-11-14 17:40:57 -05:00
static DEFINE_WW_CLASS ( crtc_ww_class ) ;
2021-10-01 12:14:44 +03:00
# if IS_ENABLED(CONFIG_DRM_DEBUG_MODESET_LOCK)
2021-10-18 11:51:13 +03:00
static noinline depot_stack_handle_t __drm_stack_depot_save ( void )
2021-10-01 12:14:44 +03:00
{
unsigned long entries [ 8 ] ;
unsigned int n ;
n = stack_trace_save ( entries , ARRAY_SIZE ( entries ) , 1 ) ;
return stack_depot_save ( entries , n , GFP_NOWAIT | __GFP_NOWARN ) ;
}
2021-10-18 11:51:13 +03:00
static void __drm_stack_depot_print ( depot_stack_handle_t stack_depot )
2021-10-01 12:14:44 +03:00
{
struct drm_printer p = drm_debug_printer ( " drm_modeset_lock " ) ;
unsigned long * entries ;
unsigned int nr_entries ;
char * buf ;
buf = kmalloc ( PAGE_SIZE , GFP_NOWAIT | __GFP_NOWARN ) ;
if ( ! buf )
return ;
nr_entries = stack_depot_fetch ( stack_depot , & entries ) ;
stack_trace_snprint ( buf , PAGE_SIZE , entries , nr_entries , 2 ) ;
drm_printf ( & p , " attempting to lock a contended lock without backoff: \n %s " , buf ) ;
kfree ( buf ) ;
}
# else /* CONFIG_DRM_DEBUG_MODESET_LOCK */
2021-10-18 11:51:13 +03:00
static depot_stack_handle_t __drm_stack_depot_save ( void )
2021-10-01 12:14:44 +03:00
{
return 0 ;
}
2021-10-18 11:51:13 +03:00
static void __drm_stack_depot_print ( depot_stack_handle_t stack_depot )
2021-10-01 12:14:44 +03:00
{
}
# endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */
2014-07-25 17:47:18 +02:00
/**
2015-07-28 13:18:42 +02:00
* drm_modeset_lock_all - take all modeset locks
2015-12-02 17:50:03 +01:00
* @ dev : DRM device
2014-07-27 19:09:33 +02:00
*
2015-07-28 13:18:42 +02:00
* This function takes all modeset locks , suitable where a more fine - grained
2015-12-02 17:50:03 +01:00
* scheme isn ' t ( yet ) implemented . Locks must be dropped by calling the
* drm_modeset_unlock_all ( ) function .
*
* This function is deprecated . It allocates a lock acquisition context and
2017-01-25 07:26:45 +01:00
* stores it in & drm_device . mode_config . This facilitate conversion of
2015-12-02 17:50:03 +01:00
* existing code because it removes the need to manually deal with the
* acquisition context , but it is also brittle because the context is global
* and care must be taken not to nest calls . New code should use the
* drm_modeset_lock_all_ctx ( ) function and pass in the context explicitly .
2014-07-25 17:47:18 +02:00
*/
2015-07-28 13:18:42 +02:00
void drm_modeset_lock_all ( struct drm_device * dev )
2014-07-25 17:47:18 +02:00
{
struct drm_mode_config * config = & dev - > mode_config ;
struct drm_modeset_acquire_ctx * ctx ;
int ret ;
2017-10-31 11:55:35 +00:00
ctx = kzalloc ( sizeof ( * ctx ) , GFP_KERNEL | __GFP_NOFAIL ) ;
2015-07-28 13:18:42 +02:00
if ( WARN_ON ( ! ctx ) )
return ;
mutex_lock ( & config - > mutex ) ;
2014-07-25 17:47:18 +02:00
drm_modeset_acquire_init ( ctx , 0 ) ;
retry :
2015-12-02 17:50:03 +01:00
ret = drm_modeset_lock_all_ctx ( dev , ctx ) ;
if ( ret < 0 ) {
if ( ret = = - EDEADLK ) {
drm_modeset_backoff ( ctx ) ;
goto retry ;
}
drm_modeset_acquire_fini ( ctx ) ;
kfree ( ctx ) ;
return ;
}
2018-02-21 16:23:31 +01:00
ww_acquire_done ( & ctx - > ww_ctx ) ;
2014-07-25 17:47:18 +02:00
WARN_ON ( config - > acquire_ctx ) ;
2015-12-02 17:50:03 +01:00
/*
* We hold the locks now , so it is safe to stash the acquisition
* context for drm_modeset_unlock_all ( ) .
2014-07-25 17:47:18 +02:00
*/
config - > acquire_ctx = ctx ;
drm_warn_on_modeset_not_all_locked ( dev ) ;
}
EXPORT_SYMBOL ( drm_modeset_lock_all ) ;
/**
* drm_modeset_unlock_all - drop all modeset locks
2015-12-02 17:50:03 +01:00
* @ dev : DRM device
2014-07-25 17:47:18 +02:00
*
2015-12-02 17:50:03 +01:00
* This function drops all modeset locks taken by a previous call to the
* drm_modeset_lock_all ( ) function .
*
* This function is deprecated . It uses the lock acquisition context stored
2017-01-25 07:26:45 +01:00
* in & drm_device . mode_config . This facilitates conversion of existing
2015-12-02 17:50:03 +01:00
* code because it removes the need to manually deal with the acquisition
* context , but it is also brittle because the context is global and care must
* be taken not to nest calls . New code should pass the acquisition context
* directly to the drm_modeset_drop_locks ( ) function .
2014-07-25 17:47:18 +02:00
*/
void drm_modeset_unlock_all ( struct drm_device * dev )
{
struct drm_mode_config * config = & dev - > mode_config ;
struct drm_modeset_acquire_ctx * ctx = config - > acquire_ctx ;
if ( WARN_ON ( ! ctx ) )
return ;
config - > acquire_ctx = NULL ;
drm_modeset_drop_locks ( ctx ) ;
drm_modeset_acquire_fini ( ctx ) ;
kfree ( ctx ) ;
mutex_unlock ( & dev - > mode_config . mutex ) ;
}
EXPORT_SYMBOL ( drm_modeset_unlock_all ) ;
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @ dev : device
*
* Useful as a debug assert .
*/
void drm_warn_on_modeset_not_all_locked ( struct drm_device * dev )
{
struct drm_crtc * crtc ;
/* Locking is currently fubar in the panic handler. */
if ( oops_in_progress )
return ;
2015-07-09 23:44:35 +02:00
drm_for_each_crtc ( crtc , dev )
2014-07-25 17:47:18 +02:00
WARN_ON ( ! drm_modeset_is_locked ( & crtc - > mutex ) ) ;
WARN_ON ( ! drm_modeset_is_locked ( & dev - > mode_config . connection_mutex ) ) ;
WARN_ON ( ! mutex_is_locked ( & dev - > mode_config . mutex ) ) ;
}
EXPORT_SYMBOL ( drm_warn_on_modeset_not_all_locked ) ;
2013-11-19 12:10:12 -05:00
/**
* drm_modeset_acquire_init - initialize acquire context
* @ ctx : the acquire context
2017-09-12 15:37:44 +02:00
* @ flags : 0 or % DRM_MODESET_ACQUIRE_INTERRUPTIBLE
*
* When passing % DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @ flags ,
* all calls to drm_modeset_lock ( ) will perform an interruptible
* wait .
2013-11-19 12:10:12 -05:00
*/
void drm_modeset_acquire_init ( struct drm_modeset_acquire_ctx * ctx ,
uint32_t flags )
{
2014-06-07 10:55:39 -04:00
memset ( ctx , 0 , sizeof ( * ctx ) ) ;
2013-11-19 12:10:12 -05:00
ww_acquire_init ( & ctx - > ww_ctx , & crtc_ww_class ) ;
INIT_LIST_HEAD ( & ctx - > locked ) ;
2017-09-12 15:37:44 +02:00
if ( flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE )
ctx - > interruptible = true ;
2013-11-19 12:10:12 -05:00
}
EXPORT_SYMBOL ( drm_modeset_acquire_init ) ;
/**
* drm_modeset_acquire_fini - cleanup acquire context
* @ ctx : the acquire context
*/
void drm_modeset_acquire_fini ( struct drm_modeset_acquire_ctx * ctx )
{
ww_acquire_fini ( & ctx - > ww_ctx ) ;
}
EXPORT_SYMBOL ( drm_modeset_acquire_fini ) ;
/**
* drm_modeset_drop_locks - drop all locks
* @ ctx : the acquire context
*
* Drop all locks currently held against this acquire context .
*/
void drm_modeset_drop_locks ( struct drm_modeset_acquire_ctx * ctx )
{
2021-10-01 12:14:44 +03:00
if ( WARN_ON ( ctx - > contended ) )
2021-10-18 11:51:13 +03:00
__drm_stack_depot_print ( ctx - > stack_depot ) ;
2021-10-01 12:14:44 +03:00
2013-11-19 12:10:12 -05:00
while ( ! list_empty ( & ctx - > locked ) ) {
struct drm_modeset_lock * lock ;
lock = list_first_entry ( & ctx - > locked ,
struct drm_modeset_lock , head ) ;
drm_modeset_unlock ( lock ) ;
}
}
EXPORT_SYMBOL ( drm_modeset_drop_locks ) ;
static inline int modeset_lock ( struct drm_modeset_lock * lock ,
struct drm_modeset_acquire_ctx * ctx ,
bool interruptible , bool slow )
{
int ret ;
2021-10-01 12:14:44 +03:00
if ( WARN_ON ( ctx - > contended ) )
2021-10-18 11:51:13 +03:00
__drm_stack_depot_print ( ctx - > stack_depot ) ;
2013-11-19 12:10:12 -05:00
2014-07-27 19:09:33 +02:00
if ( ctx - > trylock_only ) {
2015-08-27 13:58:09 +02:00
lockdep_assert_held ( & ctx - > ww_ctx ) ;
kernel/locking: Add context to ww_mutex_trylock()
i915 will soon gain an eviction path that trylock a whole lot of locks
for eviction, getting dmesg failures like below:
BUG: MAX_LOCK_DEPTH too low!
turning off the locking correctness validator.
depth: 48 max: 48!
48 locks held by i915_selftest/5776:
#0: ffff888101a79240 (&dev->mutex){....}-{3:3}, at: __driver_attach+0x88/0x160
#1: ffffc900009778c0 (reservation_ww_class_acquire){+.+.}-{0:0}, at: i915_vma_pin.constprop.63+0x39/0x1b0 [i915]
#2: ffff88800cf74de8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_vma_pin.constprop.63+0x5f/0x1b0 [i915]
#3: ffff88810c7f9e38 (&vm->mutex/1){+.+.}-{3:3}, at: i915_vma_pin_ww+0x1c4/0x9d0 [i915]
#4: ffff88810bad5768 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
#5: ffff88810bad60e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
...
#46: ffff88811964d768 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
#47: ffff88811964e0e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: i915_gem_evict_something+0x110/0x860 [i915]
INFO: lockdep is turned off.
Fixing eviction to nest into ww_class_acquire is a high priority, but
it requires a rework of the entire driver, which can only be done one
step at a time.
As an intermediate solution, add an acquire context to
ww_mutex_trylock, which allows us to do proper nesting annotations on
the trylocks, making the above lockdep splat disappear.
This is also useful in regulator_lock_nested, which may avoid dropping
regulator_nesting_mutex in the uncontended path, so use it there.
TTM may be another user for this, where we could lock a buffer in a
fastpath with list locks held, without dropping all locks we hold.
[peterz: rework actual ww_mutex_trylock() implementations]
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/YUBGPdDDjKlxAuXJ@hirez.programming.kicks-ass.net
2021-09-09 11:32:18 +02:00
if ( ! ww_mutex_trylock ( & lock - > mutex , NULL ) )
2014-07-27 19:09:33 +02:00
return - EBUSY ;
else
return 0 ;
} else if ( interruptible & & slow ) {
2013-11-19 12:10:12 -05:00
ret = ww_mutex_lock_slow_interruptible ( & lock - > mutex , & ctx - > ww_ctx ) ;
} else if ( interruptible ) {
ret = ww_mutex_lock_interruptible ( & lock - > mutex , & ctx - > ww_ctx ) ;
} else if ( slow ) {
ww_mutex_lock_slow ( & lock - > mutex , & ctx - > ww_ctx ) ;
ret = 0 ;
} else {
ret = ww_mutex_lock ( & lock - > mutex , & ctx - > ww_ctx ) ;
}
if ( ! ret ) {
WARN_ON ( ! list_empty ( & lock - > head ) ) ;
list_add ( & lock - > head , & ctx - > locked ) ;
} else if ( ret = = - EALREADY ) {
/* we already hold the lock.. this is fine. For atomic
* we will need to be able to drm_modeset_lock ( ) things
* without having to keep track of what is already locked
* or not .
*/
ret = 0 ;
} else if ( ret = = - EDEADLK ) {
ctx - > contended = lock ;
2021-10-18 11:51:13 +03:00
ctx - > stack_depot = __drm_stack_depot_save ( ) ;
2013-11-19 12:10:12 -05:00
}
return ret ;
}
2017-09-12 15:37:44 +02:00
/**
* drm_modeset_backoff - deadlock avoidance backoff
* @ ctx : the acquire context
*
* If deadlock is detected ( ie . drm_modeset_lock ( ) returns - EDEADLK ) ,
* you must call this function to drop all currently held locks and
* block until the contended lock becomes available .
*
* This function returns 0 on success , or - ERESTARTSYS if this context
* is initialized with % DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
* wait has been interrupted .
*/
int drm_modeset_backoff ( struct drm_modeset_acquire_ctx * ctx )
2013-11-19 12:10:12 -05:00
{
struct drm_modeset_lock * contended = ctx - > contended ;
ctx - > contended = NULL ;
2021-10-01 12:14:44 +03:00
ctx - > stack_depot = 0 ;
2013-11-19 12:10:12 -05:00
if ( WARN_ON ( ! contended ) )
return 0 ;
drm_modeset_drop_locks ( ctx ) ;
2017-09-12 15:37:44 +02:00
return modeset_lock ( contended , ctx , ctx - > interruptible , true ) ;
2013-11-19 12:10:12 -05:00
}
EXPORT_SYMBOL ( drm_modeset_backoff ) ;
2016-11-14 17:40:57 -05:00
/**
* drm_modeset_lock_init - initialize lock
* @ lock : lock to init
*/
void drm_modeset_lock_init ( struct drm_modeset_lock * lock )
{
ww_mutex_init ( & lock - > mutex , & crtc_ww_class ) ;
INIT_LIST_HEAD ( & lock - > head ) ;
}
EXPORT_SYMBOL ( drm_modeset_lock_init ) ;
2013-11-19 12:10:12 -05:00
/**
* drm_modeset_lock - take modeset lock
* @ lock : lock to take
* @ ctx : acquire ctx
*
2017-07-20 17:07:48 +01:00
* If @ ctx is not NULL , then its ww acquire context is used and the
2013-11-19 12:10:12 -05:00
* lock will be tracked by the context and can be released by calling
* drm_modeset_drop_locks ( ) . If - EDEADLK is returned , this means a
* deadlock scenario has been detected and it is an error to attempt
* to take any more locks without first calling drm_modeset_backoff ( ) .
2017-07-20 17:07:48 +01:00
*
2017-09-12 15:37:44 +02:00
* If the @ ctx is not NULL and initialized with
* % DRM_MODESET_ACQUIRE_INTERRUPTIBLE , this function will fail with
* - ERESTARTSYS when interrupted .
*
2017-07-20 17:07:48 +01:00
* If @ ctx is NULL then the function call behaves like a normal ,
2017-09-12 15:37:44 +02:00
* uninterruptible non - nesting mutex_lock ( ) call .
2013-11-19 12:10:12 -05:00
*/
int drm_modeset_lock ( struct drm_modeset_lock * lock ,
struct drm_modeset_acquire_ctx * ctx )
{
if ( ctx )
2017-09-12 15:37:44 +02:00
return modeset_lock ( lock , ctx , ctx - > interruptible , false ) ;
2013-11-19 12:10:12 -05:00
ww_mutex_lock ( & lock - > mutex , NULL ) ;
return 0 ;
}
EXPORT_SYMBOL ( drm_modeset_lock ) ;
/**
2017-09-12 15:37:44 +02:00
* drm_modeset_lock_single_interruptible - take a single modeset lock
2013-11-19 12:10:12 -05:00
* @ lock : lock to take
*
2017-09-12 15:37:44 +02:00
* This function behaves as drm_modeset_lock ( ) with a NULL context ,
* but performs interruptible waits .
*
* This function returns 0 on success , or - ERESTARTSYS when interrupted .
2013-11-19 12:10:12 -05:00
*/
2017-09-12 15:37:44 +02:00
int drm_modeset_lock_single_interruptible ( struct drm_modeset_lock * lock )
2013-11-19 12:10:12 -05:00
{
return ww_mutex_lock_interruptible ( & lock - > mutex , NULL ) ;
}
2017-09-12 15:37:44 +02:00
EXPORT_SYMBOL ( drm_modeset_lock_single_interruptible ) ;
2013-11-19 12:10:12 -05:00
/**
* drm_modeset_unlock - drop modeset lock
* @ lock : lock to release
*/
void drm_modeset_unlock ( struct drm_modeset_lock * lock )
{
list_del_init ( & lock - > head ) ;
ww_mutex_unlock ( & lock - > mutex ) ;
}
EXPORT_SYMBOL ( drm_modeset_unlock ) ;
2015-12-02 17:50:03 +01:00
/**
* drm_modeset_lock_all_ctx - take all modeset locks
* @ dev : DRM device
* @ ctx : lock acquisition context
*
* This function takes all modeset locks , suitable where a more fine - grained
* scheme isn ' t ( yet ) implemented .
*
2017-01-25 07:26:45 +01:00
* Unlike drm_modeset_lock_all ( ) , it doesn ' t take the & drm_mode_config . mutex
2015-12-02 17:50:03 +01:00
* since that lock isn ' t required for modeset state changes . Callers which
* need to grab that lock too need to do so outside of the acquire context
* @ ctx .
*
* Locks acquired with this function should be released by calling the
* drm_modeset_drop_locks ( ) function on @ ctx .
*
2018-11-29 10:04:17 -05:00
* See also : DRM_MODESET_LOCK_ALL_BEGIN ( ) and DRM_MODESET_LOCK_ALL_END ( )
*
2015-12-02 17:50:03 +01:00
* Returns : 0 on success or a negative error - code on failure .
*/
int drm_modeset_lock_all_ctx ( struct drm_device * dev ,
struct drm_modeset_acquire_ctx * ctx )
2013-11-19 12:10:12 -05:00
{
2018-10-22 14:31:22 +02:00
struct drm_private_obj * privobj ;
2013-11-19 12:10:12 -05:00
struct drm_crtc * crtc ;
2014-11-11 10:12:00 +01:00
struct drm_plane * plane ;
2015-12-02 17:50:03 +01:00
int ret ;
ret = drm_modeset_lock ( & dev - > mode_config . connection_mutex , ctx ) ;
if ( ret )
return ret ;
2013-11-19 12:10:12 -05:00
2015-07-09 23:44:35 +02:00
drm_for_each_crtc ( crtc , dev ) {
2013-11-19 12:10:12 -05:00
ret = drm_modeset_lock ( & crtc - > mutex , ctx ) ;
if ( ret )
return ret ;
}
2015-07-09 23:44:35 +02:00
drm_for_each_plane ( plane , dev ) {
2014-11-11 10:12:00 +01:00
ret = drm_modeset_lock ( & plane - > mutex , ctx ) ;
if ( ret )
return ret ;
}
2018-10-22 14:31:22 +02:00
drm_for_each_privobj ( privobj , dev ) {
ret = drm_modeset_lock ( & privobj - > lock , ctx ) ;
if ( ret )
return ret ;
}
2013-11-19 12:10:12 -05:00
return 0 ;
}
2015-12-02 17:50:03 +01:00
EXPORT_SYMBOL ( drm_modeset_lock_all_ctx ) ;