2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2014-11-08 13:21:06 -05:00
/*
* Copyright ( C ) 2014 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*/
2018-09-05 15:57:11 +02:00
# include <drm/drm_atomic_uapi.h>
2019-08-04 08:55:51 +02:00
# include <drm/drm_vblank.h>
2018-09-05 15:57:11 +02:00
2019-08-29 09:45:18 -07:00
# include "msm_atomic_trace.h"
2014-11-08 13:21:06 -05:00
# include "msm_drv.h"
2018-04-03 10:42:23 -04:00
# include "msm_gem.h"
2014-11-08 13:21:06 -05:00
# include "msm_kms.h"
2020-08-18 09:31:19 -07:00
/*
* Helpers to control vblanks while we flush . . basically just to ensure
* that vblank accounting is switched on , so we get valid seqn / timestamp
* on pageflip events ( if requested )
*/
static void vblank_get ( struct msm_kms * kms , unsigned crtc_mask )
{
struct drm_crtc * crtc ;
for_each_crtc_mask ( kms - > dev , crtc , crtc_mask ) {
if ( ! crtc - > state - > active )
continue ;
drm_crtc_vblank_get ( crtc ) ;
}
}
static void vblank_put ( struct msm_kms * kms , unsigned crtc_mask )
{
struct drm_crtc * crtc ;
for_each_crtc_mask ( kms - > dev , crtc , crtc_mask ) {
if ( ! crtc - > state - > active )
continue ;
drm_crtc_vblank_put ( crtc ) ;
}
}
2020-10-16 19:40:43 +05:30
static void lock_crtcs ( struct msm_kms * kms , unsigned int crtc_mask )
{
2021-02-11 14:40:58 -08:00
int crtc_index ;
2020-10-16 19:40:43 +05:30
struct drm_crtc * crtc ;
2021-02-11 14:40:58 -08:00
for_each_crtc_mask ( kms - > dev , crtc , crtc_mask ) {
crtc_index = drm_crtc_index ( crtc ) ;
mutex_lock_nested ( & kms - > commit_lock [ crtc_index ] , crtc_index ) ;
}
2020-10-16 19:40:43 +05:30
}
static void unlock_crtcs ( struct msm_kms * kms , unsigned int crtc_mask )
{
struct drm_crtc * crtc ;
2020-10-20 15:26:00 -07:00
for_each_crtc_mask_reverse ( kms - > dev , crtc , crtc_mask )
2020-10-16 19:40:43 +05:30
mutex_unlock ( & kms - > commit_lock [ drm_crtc_index ( crtc ) ] ) ;
}
2019-08-29 09:45:16 -07:00
static void msm_atomic_async_commit ( struct msm_kms * kms , int crtc_idx )
{
unsigned crtc_mask = BIT ( crtc_idx ) ;
2019-08-29 09:45:18 -07:00
trace_msm_atomic_async_commit_start ( crtc_mask ) ;
2020-10-16 19:40:43 +05:30
lock_crtcs ( kms , crtc_mask ) ;
2019-08-29 09:45:16 -07:00
if ( ! ( kms - > pending_crtc_mask & crtc_mask ) ) {
2020-10-16 19:40:43 +05:30
unlock_crtcs ( kms , crtc_mask ) ;
2019-08-29 09:45:18 -07:00
goto out ;
2019-08-29 09:45:16 -07:00
}
kms - > pending_crtc_mask & = ~ crtc_mask ;
kms - > funcs - > enable_commit ( kms ) ;
2020-08-18 09:31:19 -07:00
vblank_get ( kms , crtc_mask ) ;
2019-08-29 09:45:16 -07:00
/*
* Flush hardware updates :
*/
2019-08-29 09:45:18 -07:00
trace_msm_atomic_flush_commit ( crtc_mask ) ;
2019-08-29 09:45:16 -07:00
kms - > funcs - > flush_commit ( kms , crtc_mask ) ;
/*
* Wait for flush to complete :
*/
2019-08-29 09:45:18 -07:00
trace_msm_atomic_wait_flush_start ( crtc_mask ) ;
2019-08-29 09:45:16 -07:00
kms - > funcs - > wait_flush ( kms , crtc_mask ) ;
2019-08-29 09:45:18 -07:00
trace_msm_atomic_wait_flush_finish ( crtc_mask ) ;
2019-08-29 09:45:16 -07:00
2020-08-18 09:31:19 -07:00
vblank_put ( kms , crtc_mask ) ;
2019-08-29 09:45:16 -07:00
kms - > funcs - > complete_commit ( kms , crtc_mask ) ;
2020-10-16 19:40:43 +05:30
unlock_crtcs ( kms , crtc_mask ) ;
2019-08-29 09:45:16 -07:00
kms - > funcs - > disable_commit ( kms ) ;
2019-08-29 09:45:18 -07:00
out :
trace_msm_atomic_async_commit_finish ( crtc_mask ) ;
2019-08-29 09:45:16 -07:00
}
2020-10-19 14:10:53 -07:00
static void msm_atomic_pending_work ( struct kthread_work * work )
2019-08-29 09:45:16 -07:00
{
struct msm_pending_timer * timer = container_of ( work ,
2021-09-27 16:04:53 -07:00
struct msm_pending_timer , work . work ) ;
2019-08-29 09:45:16 -07:00
msm_atomic_async_commit ( timer - > kms , timer - > crtc_idx ) ;
}
2020-10-19 14:10:53 -07:00
int msm_atomic_init_pending_timer ( struct msm_pending_timer * timer ,
2019-08-29 09:45:16 -07:00
struct msm_kms * kms , int crtc_idx )
{
timer - > kms = kms ;
timer - > crtc_idx = crtc_idx ;
2020-10-19 14:10:53 -07:00
timer - > worker = kthread_create_worker ( 0 , " atomic-worker-%d " , crtc_idx ) ;
if ( IS_ERR ( timer - > worker ) ) {
int ret = PTR_ERR ( timer - > worker ) ;
timer - > worker = NULL ;
return ret ;
}
sched_set_fifo ( timer - > worker - > task ) ;
2021-09-27 16:04:53 -07:00
msm_hrtimer_work_init ( & timer - > work , timer - > worker ,
msm_atomic_pending_work ,
CLOCK_MONOTONIC , HRTIMER_MODE_ABS ) ;
2020-10-19 14:10:53 -07:00
return 0 ;
}
void msm_atomic_destroy_pending_timer ( struct msm_pending_timer * timer )
{
if ( timer - > worker )
kthread_destroy_worker ( timer - > worker ) ;
2019-08-29 09:45:16 -07:00
}
static bool can_do_async ( struct drm_atomic_state * state ,
struct drm_crtc * * async_crtc )
{
struct drm_connector_state * connector_state ;
struct drm_connector * connector ;
struct drm_crtc_state * crtc_state ;
struct drm_crtc * crtc ;
int i , num_crtcs = 0 ;
if ( ! ( state - > legacy_cursor_update | | state - > async_update ) )
return false ;
/* any connector change, means slow path: */
for_each_new_connector_in_state ( state , connector , connector_state , i )
return false ;
for_each_new_crtc_in_state ( state , crtc , crtc_state , i ) {
if ( drm_atomic_crtc_needs_modeset ( crtc_state ) )
return false ;
if ( + + num_crtcs > 1 )
return false ;
* async_crtc = crtc ;
}
return true ;
}
2019-08-29 09:45:12 -07:00
/* Get bitmask of crtcs that will need to be flushed. The bitmask
* can be used with for_each_crtc_mask ( ) iterator , to iterate
* effected crtcs without needing to preserve the atomic state .
*/
static unsigned get_crtc_mask ( struct drm_atomic_state * state )
{
struct drm_crtc_state * crtc_state ;
struct drm_crtc * crtc ;
unsigned i , mask = 0 ;
for_each_new_crtc_in_state ( state , crtc , crtc_state , i )
mask | = drm_crtc_mask ( crtc ) ;
return mask ;
}
2018-02-28 14:19:05 -05:00
void msm_atomic_commit_tail ( struct drm_atomic_state * state )
2014-11-08 13:21:06 -05:00
{
struct drm_device * dev = state - > dev ;
2015-01-30 17:04:45 -05:00
struct msm_drm_private * priv = dev - > dev_private ;
struct msm_kms * kms = priv - > kms ;
2019-08-29 09:45:16 -07:00
struct drm_crtc * async_crtc = NULL ;
2019-08-29 09:45:12 -07:00
unsigned crtc_mask = get_crtc_mask ( state ) ;
2019-08-29 09:45:16 -07:00
bool async = kms - > funcs - > vsync_time & &
can_do_async ( state , & async_crtc ) ;
2015-01-30 17:04:45 -05:00
2019-08-29 09:45:18 -07:00
trace_msm_atomic_commit_tail_start ( async , crtc_mask ) ;
2019-08-29 09:45:15 -07:00
kms - > funcs - > enable_commit ( kms ) ;
2019-08-29 09:45:16 -07:00
/*
* Ensure any previous ( potentially async ) commit has
* completed :
*/
2020-10-16 19:40:43 +05:30
lock_crtcs ( kms , crtc_mask ) ;
2019-08-29 09:45:18 -07:00
trace_msm_atomic_wait_flush_start ( crtc_mask ) ;
2019-08-29 09:45:16 -07:00
kms - > funcs - > wait_flush ( kms , crtc_mask ) ;
2019-08-29 09:45:18 -07:00
trace_msm_atomic_wait_flush_finish ( crtc_mask ) ;
2019-08-29 09:45:16 -07:00
/*
* Now that there is no in - progress flush , prepare the
* current update :
*/
2015-01-30 17:04:45 -05:00
kms - > funcs - > prepare_commit ( kms , state ) ;
2014-11-08 13:21:06 -05:00
2019-08-29 09:45:14 -07:00
/*
* Push atomic updates down to hardware :
*/
2015-02-22 12:24:19 +01:00
drm_atomic_helper_commit_modeset_disables ( dev , state ) ;
2016-08-29 17:12:03 +08:00
drm_atomic_helper_commit_planes ( dev , state , 0 ) ;
2015-02-22 12:24:19 +01:00
drm_atomic_helper_commit_modeset_enables ( dev , state ) ;
2014-11-08 13:21:06 -05:00
2019-08-29 09:45:16 -07:00
if ( async ) {
struct msm_pending_timer * timer =
& kms - > pending_timers [ drm_crtc_index ( async_crtc ) ] ;
/* async updates are limited to single-crtc updates: */
WARN_ON ( crtc_mask ! = drm_crtc_mask ( async_crtc ) ) ;
/*
* Start timer if we don ' t already have an update pending
* on this crtc :
*/
if ( ! ( kms - > pending_crtc_mask & crtc_mask ) ) {
ktime_t vsync_time , wakeup_time ;
kms - > pending_crtc_mask | = crtc_mask ;
vsync_time = kms - > funcs - > vsync_time ( kms , async_crtc ) ;
wakeup_time = ktime_sub ( vsync_time , ms_to_ktime ( 1 ) ) ;
2021-09-27 16:04:53 -07:00
msm_hrtimer_queue_work ( & timer - > work , wakeup_time ,
2019-08-29 09:45:16 -07:00
HRTIMER_MODE_ABS ) ;
}
kms - > funcs - > disable_commit ( kms ) ;
2020-10-16 19:40:43 +05:30
unlock_crtcs ( kms , crtc_mask ) ;
2019-08-29 09:45:16 -07:00
/*
* At this point , from drm core ' s perspective , we
* are done with the atomic update , so we can just
* go ahead and signal that it is done :
*/
drm_atomic_helper_commit_hw_done ( state ) ;
drm_atomic_helper_cleanup_planes ( dev , state ) ;
2019-08-29 09:45:18 -07:00
trace_msm_atomic_commit_tail_finish ( async , crtc_mask ) ;
2019-08-29 09:45:16 -07:00
return ;
}
/*
* If there is any async flush pending on updated crtcs , fold
* them into the current flush .
*/
kms - > pending_crtc_mask & = ~ crtc_mask ;
2020-08-18 09:31:19 -07:00
vblank_get ( kms , crtc_mask ) ;
2019-08-29 09:45:14 -07:00
/*
* Flush hardware updates :
*/
2019-08-29 09:45:18 -07:00
trace_msm_atomic_flush_commit ( crtc_mask ) ;
2019-08-29 09:45:14 -07:00
kms - > funcs - > flush_commit ( kms , crtc_mask ) ;
2020-10-16 19:40:43 +05:30
unlock_crtcs ( kms , crtc_mask ) ;
2019-08-29 09:45:16 -07:00
/*
* Wait for flush to complete :
*/
2019-08-29 09:45:18 -07:00
trace_msm_atomic_wait_flush_start ( crtc_mask ) ;
2019-08-29 09:45:12 -07:00
kms - > funcs - > wait_flush ( kms , crtc_mask ) ;
2019-08-29 09:45:18 -07:00
trace_msm_atomic_wait_flush_finish ( crtc_mask ) ;
2019-08-29 09:45:16 -07:00
2020-08-18 09:31:19 -07:00
vblank_put ( kms , crtc_mask ) ;
2020-10-16 19:40:43 +05:30
lock_crtcs ( kms , crtc_mask ) ;
2019-08-29 09:45:13 -07:00
kms - > funcs - > complete_commit ( kms , crtc_mask ) ;
2020-10-16 19:40:43 +05:30
unlock_crtcs ( kms , crtc_mask ) ;
2019-08-29 09:45:15 -07:00
kms - > funcs - > disable_commit ( kms ) ;
2018-02-28 14:19:01 -05:00
drm_atomic_helper_commit_hw_done ( state ) ;
drm_atomic_helper_cleanup_planes ( dev , state ) ;
2019-08-29 09:45:18 -07:00
trace_msm_atomic_commit_tail_finish ( async , crtc_mask ) ;
2018-02-28 14:18:58 -05:00
}