2013-11-19 21:10:12 +04:00
/*
* Copyright ( C ) 2014 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
# ifndef DRM_MODESET_LOCK_H_
# define DRM_MODESET_LOCK_H_
2021-10-01 12:14:44 +03:00
# include <linux/types.h> /* stackdepot.h is not self-contained */
# include <linux/stackdepot.h>
2013-11-19 21:10:12 +04:00
# include <linux/ww_mutex.h>
struct drm_modeset_lock ;
/**
2014-08-17 01:15:34 +04:00
* struct drm_modeset_acquire_ctx - locking context ( see ww_acquire_ctx )
2013-11-19 21:10:12 +04:00
* @ ww_ctx : base acquire ctx
* @ contended : used internally for - EDEADLK handling
* @ locked : list of held locks
2014-10-27 22:37:37 +03:00
* @ trylock_only : trylock mode used in atomic contexts / panic notifiers
2017-09-12 16:37:44 +03:00
* @ interruptible : whether interruptible locking should be used .
2013-11-19 21:10:12 +04:00
*
* Each thread competing for a set of locks must use one acquire
* ctx . And if any lock fxn returns - EDEADLK , it must backoff and
* retry .
*/
struct drm_modeset_acquire_ctx {
struct ww_acquire_ctx ww_ctx ;
2015-08-21 22:46:14 +03:00
/*
2013-11-19 21:10:12 +04:00
* Contended lock : if a lock is contended you should only call
* drm_modeset_backoff ( ) which drops locks and slow - locks the
* contended lock .
*/
struct drm_modeset_lock * contended ;
2021-10-01 12:14:44 +03:00
/*
* Stack depot for debugging when a contended lock was not backed off
* from .
*/
depot_stack_handle_t stack_depot ;
2015-08-21 22:46:14 +03:00
/*
2013-11-19 21:10:12 +04:00
* list of held locks ( drm_modeset_lock )
*/
struct list_head locked ;
2014-07-27 21:09:33 +04:00
2015-08-21 22:46:14 +03:00
/*
2014-07-27 21:09:33 +04:00
* Trylock mode , use only for panic handlers !
*/
bool trylock_only ;
2017-09-12 16:37:44 +03:00
/* Perform interruptible waits on this context. */
bool interruptible ;
2013-11-19 21:10:12 +04:00
} ;
/**
2014-08-17 01:15:34 +04:00
* struct drm_modeset_lock - used for locking modeset resources .
2013-11-19 21:10:12 +04:00
* @ mutex : resource locking
2019-02-02 04:23:26 +03:00
* @ head : used to hold its place on & drm_atomi_state . locked list when
2013-11-19 21:10:12 +04:00
* part of an atomic update
*
* Used for locking CRTCs and other modeset resources .
*/
struct drm_modeset_lock {
2015-08-21 22:46:14 +03:00
/*
2013-11-19 21:10:12 +04:00
* modeset lock
*/
struct ww_mutex mutex ;
2015-08-21 22:46:14 +03:00
/*
2013-11-19 21:10:12 +04:00
* Resources that are locked as part of an atomic update are added
* to a list ( so we know what to unlock at the end ) .
*/
struct list_head head ;
} ;
2017-09-12 16:37:44 +03:00
# define DRM_MODESET_ACQUIRE_INTERRUPTIBLE BIT(0)
2013-11-19 21:10:12 +04:00
void drm_modeset_acquire_init ( struct drm_modeset_acquire_ctx * ctx ,
uint32_t flags ) ;
void drm_modeset_acquire_fini ( struct drm_modeset_acquire_ctx * ctx ) ;
void drm_modeset_drop_locks ( struct drm_modeset_acquire_ctx * ctx ) ;
2017-09-12 16:37:44 +03:00
int drm_modeset_backoff ( struct drm_modeset_acquire_ctx * ctx ) ;
2013-11-19 21:10:12 +04:00
2016-11-15 01:40:57 +03:00
void drm_modeset_lock_init ( struct drm_modeset_lock * lock ) ;
2013-11-19 21:10:12 +04:00
/**
* drm_modeset_lock_fini - cleanup lock
* @ lock : lock to cleanup
*/
static inline void drm_modeset_lock_fini ( struct drm_modeset_lock * lock )
{
WARN_ON ( ! list_empty ( & lock - > head ) ) ;
}
/**
* drm_modeset_is_locked - equivalent to mutex_is_locked ( )
* @ lock : lock to check
*/
static inline bool drm_modeset_is_locked ( struct drm_modeset_lock * lock )
{
return ww_mutex_is_locked ( & lock - > mutex ) ;
}
2019-07-08 15:53:07 +03:00
/**
* drm_modeset_lock_assert_held - equivalent to lockdep_assert_held ( )
* @ lock : lock to check
*/
static inline void drm_modeset_lock_assert_held ( struct drm_modeset_lock * lock )
{
lockdep_assert_held ( & lock - > mutex . base ) ;
}
2013-11-19 21:10:12 +04:00
int drm_modeset_lock ( struct drm_modeset_lock * lock ,
struct drm_modeset_acquire_ctx * ctx ) ;
2017-09-12 16:37:44 +03:00
int __must_check drm_modeset_lock_single_interruptible ( struct drm_modeset_lock * lock ) ;
2013-11-19 21:10:12 +04:00
void drm_modeset_unlock ( struct drm_modeset_lock * lock ) ;
struct drm_device ;
2014-07-25 20:07:40 +04:00
struct drm_crtc ;
2014-11-11 12:12:00 +03:00
struct drm_plane ;
2014-07-25 19:47:18 +04:00
2021-10-02 18:45:27 +03:00
void drm_modeset_lock_all ( struct drm_device * dev ) ;
void drm_modeset_unlock_all ( struct drm_device * dev ) ;
2014-07-25 19:47:18 +04:00
void drm_warn_on_modeset_not_all_locked ( struct drm_device * dev ) ;
2015-12-02 19:50:03 +03:00
int drm_modeset_lock_all_ctx ( struct drm_device * dev ,
struct drm_modeset_acquire_ctx * ctx ) ;
2013-11-19 21:10:12 +04:00
2018-11-29 18:04:17 +03:00
/**
* DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks
* @ dev : drm device
* @ ctx : local modeset acquire context , will be dereferenced
* @ flags : DRM_MODESET_ACQUIRE_ * flags to pass to drm_modeset_acquire_init ( )
* @ ret : local ret / err / etc variable to track error status
*
* Use these macros to simplify grabbing all modeset locks using a local
* context . This has the advantage of reducing boilerplate , but also properly
* checking return values where appropriate .
*
* Any code run between BEGIN and END will be holding the modeset locks .
*
* This must be paired with DRM_MODESET_LOCK_ALL_END ( ) . We will jump back and
* forth between the labels on deadlock and error conditions .
*
* Drivers can acquire additional modeset locks . If any lock acquisition
* fails , the control flow needs to jump to DRM_MODESET_LOCK_ALL_END ( ) with
* the @ ret parameter containing the return value of drm_modeset_lock ( ) .
*
* Returns :
* The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN ( )
* is 0 , so no error checking is necessary
*/
# define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \
2020-08-14 12:38:42 +03:00
if ( ! drm_drv_uses_atomic_modeset ( dev ) ) \
mutex_lock ( & dev - > mode_config . mutex ) ; \
2018-11-29 18:04:17 +03:00
drm_modeset_acquire_init ( & ctx , flags ) ; \
modeset_lock_retry : \
ret = drm_modeset_lock_all_ctx ( dev , & ctx ) ; \
if ( ret ) \
goto modeset_lock_fail ;
/**
* DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
2020-08-14 12:38:42 +03:00
* @ dev : drm device
2018-11-29 18:04:17 +03:00
* @ ctx : local modeset acquire context , will be dereferenced
* @ ret : local ret / err / etc variable to track error status
*
* The other side of DRM_MODESET_LOCK_ALL_BEGIN ( ) . It will bounce back to BEGIN
* if ret is - EDEADLK .
*
* It ' s important that you use the same ret variable for begin and end so
* deadlock conditions are properly handled .
*
* Returns :
* ret will be untouched unless it is - EDEADLK on entry . That means that if you
* successfully acquire the locks , ret will be whatever your code sets it to . If
* there is a deadlock or other failure with acquire or backoff , ret will be set
* to that failure . In both of these cases the code between BEGIN / END will not
* be run , so the failure will reflect the inability to grab the locks .
*/
2020-08-14 12:38:42 +03:00
# define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \
2018-11-29 18:04:17 +03:00
modeset_lock_fail : \
if ( ret = = - EDEADLK ) { \
ret = drm_modeset_backoff ( & ctx ) ; \
if ( ! ret ) \
goto modeset_lock_retry ; \
} \
drm_modeset_drop_locks ( & ctx ) ; \
2020-08-14 12:38:42 +03:00
drm_modeset_acquire_fini ( & ctx ) ; \
if ( ! drm_drv_uses_atomic_modeset ( dev ) ) \
mutex_unlock ( & dev - > mode_config . mutex ) ;
2018-11-29 18:04:17 +03:00
2013-11-19 21:10:12 +04:00
# endif /* DRM_MODESET_LOCK_H_ */