2013-11-19 21:10:12 +04:00
/*
* Copyright ( C ) 2014 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
2018-10-22 15:31:22 +03:00
# include <drm/drm_atomic.h>
2013-11-19 21:10:12 +04:00
# include <drm/drm_crtc.h>
2019-05-26 20:35:35 +03:00
# include <drm/drm_device.h>
2013-11-19 21:10:12 +04:00
# include <drm/drm_modeset_lock.h>
/**
* DOC : kms locking
*
* As KMS moves toward more fine grained locking , and atomic ioctl where
* userspace can indirectly control locking order , it becomes necessary
2016-05-30 12:10:49 +03:00
* to use & ww_mutex and acquire - contexts to avoid deadlocks . But because
2013-11-19 21:10:12 +04:00
* the locking is more distributed around the driver code , we want a bit
* of extra utility / tracking out of our acquire - ctx . This is provided
2017-01-25 09:26:45 +03:00
* by & struct drm_modeset_lock and & struct drm_modeset_acquire_ctx .
2013-11-19 21:10:12 +04:00
*
2016-05-30 12:10:49 +03:00
* For basic principles of & ww_mutex , see : Documentation / locking / ww - mutex - design . txt
2013-11-19 21:10:12 +04:00
*
2016-05-31 23:55:13 +03:00
* The basic usage pattern is to : :
2013-11-19 21:10:12 +04:00
*
2017-09-12 16:37:44 +03:00
* drm_modeset_acquire_init ( ctx , DRM_MODESET_ACQUIRE_INTERRUPTIBLE )
2015-11-25 20:07:55 +03:00
* retry :
2013-11-19 21:10:12 +04:00
* foreach ( lock in random_ordered_set_of_locks ) {
2017-09-12 16:37:44 +03:00
* ret = drm_modeset_lock ( lock , ctx )
2015-11-25 20:07:55 +03:00
* if ( ret = = - EDEADLK ) {
2017-09-12 16:37:44 +03:00
* ret = drm_modeset_backoff ( ctx ) ;
* if ( ! ret )
* goto retry ;
2015-11-25 20:07:55 +03:00
* }
2017-09-12 16:37:44 +03:00
* if ( ret )
* goto out ;
2013-11-19 21:10:12 +04:00
* }
* . . . do stuff . . .
2017-09-12 16:37:44 +03:00
* out :
* drm_modeset_drop_locks ( ctx ) ;
* drm_modeset_acquire_fini ( ctx ) ;
2016-05-30 12:10:49 +03:00
*
2018-11-29 18:04:17 +03:00
* For convenience this control flow is implemented in
* DRM_MODESET_LOCK_ALL_BEGIN ( ) and DRM_MODESET_LOCK_ALL_END ( ) for the case
* where all modeset locks need to be taken through drm_modeset_lock_all_ctx ( ) .
*
2017-07-20 19:07:48 +03:00
* If all that is needed is a single modeset lock , then the & struct
* drm_modeset_acquire_ctx is not needed and the locking can be simplified
2017-09-12 16:37:44 +03:00
* by passing a NULL instead of ctx in the drm_modeset_lock ( ) call or
* calling drm_modeset_lock_single_interruptible ( ) . To unlock afterwards
* call drm_modeset_unlock ( ) .
2017-07-20 19:07:48 +03:00
*
* On top of these per - object locks using & ww_mutex there ' s also an overall
2017-01-25 09:26:45 +03:00
* & drm_mode_config . mutex , for protecting everything else . Mostly this means
2016-11-29 12:24:40 +03:00
* probe state of connectors , and preventing hotplug add / removal of connectors .
2016-05-30 12:10:49 +03:00
*
2016-11-29 12:24:40 +03:00
* Finally there ' s a bunch of dedicated locks to protect drm core internal
* lists and lookup data structures .
2013-11-19 21:10:12 +04:00
*/
2016-11-15 01:40:57 +03:00
static DEFINE_WW_CLASS ( crtc_ww_class ) ;
2014-07-25 19:47:18 +04:00
/**
2015-07-28 14:18:42 +03:00
* drm_modeset_lock_all - take all modeset locks
2015-12-02 19:50:03 +03:00
* @ dev : DRM device
2014-07-27 21:09:33 +04:00
*
2015-07-28 14:18:42 +03:00
* This function takes all modeset locks , suitable where a more fine - grained
2015-12-02 19:50:03 +03:00
* scheme isn ' t ( yet ) implemented . Locks must be dropped by calling the
* drm_modeset_unlock_all ( ) function .
*
* This function is deprecated . It allocates a lock acquisition context and
2017-01-25 09:26:45 +03:00
* stores it in & drm_device . mode_config . This facilitate conversion of
2015-12-02 19:50:03 +03:00
* existing code because it removes the need to manually deal with the
* acquisition context , but it is also brittle because the context is global
* and care must be taken not to nest calls . New code should use the
* drm_modeset_lock_all_ctx ( ) function and pass in the context explicitly .
2014-07-25 19:47:18 +04:00
*/
2015-07-28 14:18:42 +03:00
void drm_modeset_lock_all ( struct drm_device * dev )
2014-07-25 19:47:18 +04:00
{
struct drm_mode_config * config = & dev - > mode_config ;
struct drm_modeset_acquire_ctx * ctx ;
int ret ;
2017-10-31 14:55:35 +03:00
ctx = kzalloc ( sizeof ( * ctx ) , GFP_KERNEL | __GFP_NOFAIL ) ;
2015-07-28 14:18:42 +03:00
if ( WARN_ON ( ! ctx ) )
return ;
mutex_lock ( & config - > mutex ) ;
2014-07-25 19:47:18 +04:00
drm_modeset_acquire_init ( ctx , 0 ) ;
retry :
2015-12-02 19:50:03 +03:00
ret = drm_modeset_lock_all_ctx ( dev , ctx ) ;
if ( ret < 0 ) {
if ( ret = = - EDEADLK ) {
drm_modeset_backoff ( ctx ) ;
goto retry ;
}
drm_modeset_acquire_fini ( ctx ) ;
kfree ( ctx ) ;
return ;
}
2018-02-21 18:23:31 +03:00
ww_acquire_done ( & ctx - > ww_ctx ) ;
2014-07-25 19:47:18 +04:00
WARN_ON ( config - > acquire_ctx ) ;
2015-12-02 19:50:03 +03:00
/*
* We hold the locks now , so it is safe to stash the acquisition
* context for drm_modeset_unlock_all ( ) .
2014-07-25 19:47:18 +04:00
*/
config - > acquire_ctx = ctx ;
drm_warn_on_modeset_not_all_locked ( dev ) ;
}
EXPORT_SYMBOL ( drm_modeset_lock_all ) ;
/**
* drm_modeset_unlock_all - drop all modeset locks
2015-12-02 19:50:03 +03:00
* @ dev : DRM device
2014-07-25 19:47:18 +04:00
*
2015-12-02 19:50:03 +03:00
* This function drops all modeset locks taken by a previous call to the
* drm_modeset_lock_all ( ) function .
*
* This function is deprecated . It uses the lock acquisition context stored
2017-01-25 09:26:45 +03:00
* in & drm_device . mode_config . This facilitates conversion of existing
2015-12-02 19:50:03 +03:00
* code because it removes the need to manually deal with the acquisition
* context , but it is also brittle because the context is global and care must
* be taken not to nest calls . New code should pass the acquisition context
* directly to the drm_modeset_drop_locks ( ) function .
2014-07-25 19:47:18 +04:00
*/
void drm_modeset_unlock_all ( struct drm_device * dev )
{
struct drm_mode_config * config = & dev - > mode_config ;
struct drm_modeset_acquire_ctx * ctx = config - > acquire_ctx ;
if ( WARN_ON ( ! ctx ) )
return ;
config - > acquire_ctx = NULL ;
drm_modeset_drop_locks ( ctx ) ;
drm_modeset_acquire_fini ( ctx ) ;
kfree ( ctx ) ;
mutex_unlock ( & dev - > mode_config . mutex ) ;
}
EXPORT_SYMBOL ( drm_modeset_unlock_all ) ;
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @ dev : device
*
* Useful as a debug assert .
*/
void drm_warn_on_modeset_not_all_locked ( struct drm_device * dev )
{
struct drm_crtc * crtc ;
/* Locking is currently fubar in the panic handler. */
if ( oops_in_progress )
return ;
2015-07-10 00:44:35 +03:00
drm_for_each_crtc ( crtc , dev )
2014-07-25 19:47:18 +04:00
WARN_ON ( ! drm_modeset_is_locked ( & crtc - > mutex ) ) ;
WARN_ON ( ! drm_modeset_is_locked ( & dev - > mode_config . connection_mutex ) ) ;
WARN_ON ( ! mutex_is_locked ( & dev - > mode_config . mutex ) ) ;
}
EXPORT_SYMBOL ( drm_warn_on_modeset_not_all_locked ) ;
2013-11-19 21:10:12 +04:00
/**
* drm_modeset_acquire_init - initialize acquire context
* @ ctx : the acquire context
2017-09-12 16:37:44 +03:00
* @ flags : 0 or % DRM_MODESET_ACQUIRE_INTERRUPTIBLE
*
* When passing % DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @ flags ,
* all calls to drm_modeset_lock ( ) will perform an interruptible
* wait .
2013-11-19 21:10:12 +04:00
*/
void drm_modeset_acquire_init ( struct drm_modeset_acquire_ctx * ctx ,
uint32_t flags )
{
2014-06-07 18:55:39 +04:00
memset ( ctx , 0 , sizeof ( * ctx ) ) ;
2013-11-19 21:10:12 +04:00
ww_acquire_init ( & ctx - > ww_ctx , & crtc_ww_class ) ;
INIT_LIST_HEAD ( & ctx - > locked ) ;
2017-09-12 16:37:44 +03:00
if ( flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE )
ctx - > interruptible = true ;
2013-11-19 21:10:12 +04:00
}
EXPORT_SYMBOL ( drm_modeset_acquire_init ) ;
/**
* drm_modeset_acquire_fini - cleanup acquire context
* @ ctx : the acquire context
*/
void drm_modeset_acquire_fini ( struct drm_modeset_acquire_ctx * ctx )
{
ww_acquire_fini ( & ctx - > ww_ctx ) ;
}
EXPORT_SYMBOL ( drm_modeset_acquire_fini ) ;
/**
* drm_modeset_drop_locks - drop all locks
* @ ctx : the acquire context
*
* Drop all locks currently held against this acquire context .
*/
void drm_modeset_drop_locks ( struct drm_modeset_acquire_ctx * ctx )
{
WARN_ON ( ctx - > contended ) ;
while ( ! list_empty ( & ctx - > locked ) ) {
struct drm_modeset_lock * lock ;
lock = list_first_entry ( & ctx - > locked ,
struct drm_modeset_lock , head ) ;
drm_modeset_unlock ( lock ) ;
}
}
EXPORT_SYMBOL ( drm_modeset_drop_locks ) ;
static inline int modeset_lock ( struct drm_modeset_lock * lock ,
struct drm_modeset_acquire_ctx * ctx ,
bool interruptible , bool slow )
{
int ret ;
WARN_ON ( ctx - > contended ) ;
2014-07-27 21:09:33 +04:00
if ( ctx - > trylock_only ) {
2015-08-27 14:58:09 +03:00
lockdep_assert_held ( & ctx - > ww_ctx ) ;
2014-07-27 21:09:33 +04:00
if ( ! ww_mutex_trylock ( & lock - > mutex ) )
return - EBUSY ;
else
return 0 ;
} else if ( interruptible & & slow ) {
2013-11-19 21:10:12 +04:00
ret = ww_mutex_lock_slow_interruptible ( & lock - > mutex , & ctx - > ww_ctx ) ;
} else if ( interruptible ) {
ret = ww_mutex_lock_interruptible ( & lock - > mutex , & ctx - > ww_ctx ) ;
} else if ( slow ) {
ww_mutex_lock_slow ( & lock - > mutex , & ctx - > ww_ctx ) ;
ret = 0 ;
} else {
ret = ww_mutex_lock ( & lock - > mutex , & ctx - > ww_ctx ) ;
}
if ( ! ret ) {
WARN_ON ( ! list_empty ( & lock - > head ) ) ;
list_add ( & lock - > head , & ctx - > locked ) ;
} else if ( ret = = - EALREADY ) {
/* we already hold the lock.. this is fine. For atomic
* we will need to be able to drm_modeset_lock ( ) things
* without having to keep track of what is already locked
* or not .
*/
ret = 0 ;
} else if ( ret = = - EDEADLK ) {
ctx - > contended = lock ;
}
return ret ;
}
2017-09-12 16:37:44 +03:00
/**
* drm_modeset_backoff - deadlock avoidance backoff
* @ ctx : the acquire context
*
* If deadlock is detected ( ie . drm_modeset_lock ( ) returns - EDEADLK ) ,
* you must call this function to drop all currently held locks and
* block until the contended lock becomes available .
*
* This function returns 0 on success , or - ERESTARTSYS if this context
* is initialized with % DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
* wait has been interrupted .
*/
int drm_modeset_backoff ( struct drm_modeset_acquire_ctx * ctx )
2013-11-19 21:10:12 +04:00
{
struct drm_modeset_lock * contended = ctx - > contended ;
ctx - > contended = NULL ;
if ( WARN_ON ( ! contended ) )
return 0 ;
drm_modeset_drop_locks ( ctx ) ;
2017-09-12 16:37:44 +03:00
return modeset_lock ( contended , ctx , ctx - > interruptible , true ) ;
2013-11-19 21:10:12 +04:00
}
EXPORT_SYMBOL ( drm_modeset_backoff ) ;
2016-11-15 01:40:57 +03:00
/**
* drm_modeset_lock_init - initialize lock
* @ lock : lock to init
*/
void drm_modeset_lock_init ( struct drm_modeset_lock * lock )
{
ww_mutex_init ( & lock - > mutex , & crtc_ww_class ) ;
INIT_LIST_HEAD ( & lock - > head ) ;
}
EXPORT_SYMBOL ( drm_modeset_lock_init ) ;
2013-11-19 21:10:12 +04:00
/**
* drm_modeset_lock - take modeset lock
* @ lock : lock to take
* @ ctx : acquire ctx
*
2017-07-20 19:07:48 +03:00
* If @ ctx is not NULL , then its ww acquire context is used and the
2013-11-19 21:10:12 +04:00
* lock will be tracked by the context and can be released by calling
* drm_modeset_drop_locks ( ) . If - EDEADLK is returned , this means a
* deadlock scenario has been detected and it is an error to attempt
* to take any more locks without first calling drm_modeset_backoff ( ) .
2017-07-20 19:07:48 +03:00
*
2017-09-12 16:37:44 +03:00
* If the @ ctx is not NULL and initialized with
* % DRM_MODESET_ACQUIRE_INTERRUPTIBLE , this function will fail with
* - ERESTARTSYS when interrupted .
*
2017-07-20 19:07:48 +03:00
* If @ ctx is NULL then the function call behaves like a normal ,
2017-09-12 16:37:44 +03:00
* uninterruptible non - nesting mutex_lock ( ) call .
2013-11-19 21:10:12 +04:00
*/
int drm_modeset_lock ( struct drm_modeset_lock * lock ,
struct drm_modeset_acquire_ctx * ctx )
{
if ( ctx )
2017-09-12 16:37:44 +03:00
return modeset_lock ( lock , ctx , ctx - > interruptible , false ) ;
2013-11-19 21:10:12 +04:00
ww_mutex_lock ( & lock - > mutex , NULL ) ;
return 0 ;
}
EXPORT_SYMBOL ( drm_modeset_lock ) ;
/**
2017-09-12 16:37:44 +03:00
* drm_modeset_lock_single_interruptible - take a single modeset lock
2013-11-19 21:10:12 +04:00
* @ lock : lock to take
*
2017-09-12 16:37:44 +03:00
* This function behaves as drm_modeset_lock ( ) with a NULL context ,
* but performs interruptible waits .
*
* This function returns 0 on success , or - ERESTARTSYS when interrupted .
2013-11-19 21:10:12 +04:00
*/
2017-09-12 16:37:44 +03:00
int drm_modeset_lock_single_interruptible ( struct drm_modeset_lock * lock )
2013-11-19 21:10:12 +04:00
{
return ww_mutex_lock_interruptible ( & lock - > mutex , NULL ) ;
}
2017-09-12 16:37:44 +03:00
EXPORT_SYMBOL ( drm_modeset_lock_single_interruptible ) ;
2013-11-19 21:10:12 +04:00
/**
* drm_modeset_unlock - drop modeset lock
* @ lock : lock to release
*/
void drm_modeset_unlock ( struct drm_modeset_lock * lock )
{
list_del_init ( & lock - > head ) ;
ww_mutex_unlock ( & lock - > mutex ) ;
}
EXPORT_SYMBOL ( drm_modeset_unlock ) ;
2015-12-02 19:50:03 +03:00
/**
* drm_modeset_lock_all_ctx - take all modeset locks
* @ dev : DRM device
* @ ctx : lock acquisition context
*
* This function takes all modeset locks , suitable where a more fine - grained
* scheme isn ' t ( yet ) implemented .
*
2017-01-25 09:26:45 +03:00
* Unlike drm_modeset_lock_all ( ) , it doesn ' t take the & drm_mode_config . mutex
2015-12-02 19:50:03 +03:00
* since that lock isn ' t required for modeset state changes . Callers which
* need to grab that lock too need to do so outside of the acquire context
* @ ctx .
*
* Locks acquired with this function should be released by calling the
* drm_modeset_drop_locks ( ) function on @ ctx .
*
2018-11-29 18:04:17 +03:00
* See also : DRM_MODESET_LOCK_ALL_BEGIN ( ) and DRM_MODESET_LOCK_ALL_END ( )
*
2015-12-02 19:50:03 +03:00
* Returns : 0 on success or a negative error - code on failure .
*/
int drm_modeset_lock_all_ctx ( struct drm_device * dev ,
struct drm_modeset_acquire_ctx * ctx )
2013-11-19 21:10:12 +04:00
{
2018-10-22 15:31:22 +03:00
struct drm_private_obj * privobj ;
2013-11-19 21:10:12 +04:00
struct drm_crtc * crtc ;
2014-11-11 12:12:00 +03:00
struct drm_plane * plane ;
2015-12-02 19:50:03 +03:00
int ret ;
ret = drm_modeset_lock ( & dev - > mode_config . connection_mutex , ctx ) ;
if ( ret )
return ret ;
2013-11-19 21:10:12 +04:00
2015-07-10 00:44:35 +03:00
drm_for_each_crtc ( crtc , dev ) {
2013-11-19 21:10:12 +04:00
ret = drm_modeset_lock ( & crtc - > mutex , ctx ) ;
if ( ret )
return ret ;
}
2015-07-10 00:44:35 +03:00
drm_for_each_plane ( plane , dev ) {
2014-11-11 12:12:00 +03:00
ret = drm_modeset_lock ( & plane - > mutex , ctx ) ;
if ( ret )
return ret ;
}
2018-10-22 15:31:22 +03:00
drm_for_each_privobj ( privobj , dev ) {
ret = drm_modeset_lock ( & privobj - > lock , ctx ) ;
if ( ret )
return ret ;
}
2013-11-19 21:10:12 +04:00
return 0 ;
}
2015-12-02 19:50:03 +03:00
EXPORT_SYMBOL ( drm_modeset_lock_all_ctx ) ;