2014-11-08 13:21:06 -05:00
/*
* Copyright ( C ) 2014 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include "msm_drv.h"
# include "msm_kms.h"
# include "msm_gem.h"
struct msm_commit {
2015-01-30 17:04:45 -05:00
struct drm_device * dev ;
2014-11-08 13:21:06 -05:00
struct drm_atomic_state * state ;
uint32_t fence ;
struct msm_fence_cb fence_cb ;
2014-11-25 12:41:18 -05:00
uint32_t crtc_mask ;
2014-11-08 13:21:06 -05:00
} ;
static void fence_cb ( struct msm_fence_cb * cb ) ;
2014-11-25 12:41:18 -05:00
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
*/
static int start_atomic ( struct msm_drm_private * priv , uint32_t crtc_mask )
{
int ret ;
spin_lock ( & priv - > pending_crtcs_event . lock ) ;
ret = wait_event_interruptible_locked ( priv - > pending_crtcs_event ,
! ( priv - > pending_crtcs & crtc_mask ) ) ;
if ( ret = = 0 ) {
DBG ( " start: %08x " , crtc_mask ) ;
priv - > pending_crtcs | = crtc_mask ;
}
spin_unlock ( & priv - > pending_crtcs_event . lock ) ;
return ret ;
}
/* clear specified crtcs (no longer pending update)
*/
static void end_atomic ( struct msm_drm_private * priv , uint32_t crtc_mask )
{
spin_lock ( & priv - > pending_crtcs_event . lock ) ;
DBG ( " end: %08x " , crtc_mask ) ;
priv - > pending_crtcs & = ~ crtc_mask ;
wake_up_all_locked ( & priv - > pending_crtcs_event ) ;
spin_unlock ( & priv - > pending_crtcs_event . lock ) ;
}
2015-01-30 17:04:45 -05:00
static struct msm_commit * commit_init ( struct drm_atomic_state * state )
2014-11-08 13:21:06 -05:00
{
struct msm_commit * c = kzalloc ( sizeof ( * c ) , GFP_KERNEL ) ;
if ( ! c )
return NULL ;
2015-01-30 17:04:45 -05:00
c - > dev = state - > dev ;
2014-11-08 13:21:06 -05:00
c - > state = state ;
2015-01-30 17:04:45 -05:00
2014-11-08 13:21:06 -05:00
/* TODO we might need a way to indicate to run the cb on a
* different wq so wait_for_vblanks ( ) doesn ' t block retiring
* bo ' s . .
*/
INIT_FENCE_CB ( & c - > fence_cb , fence_cb ) ;
return c ;
}
2015-01-30 17:04:45 -05:00
static void commit_destroy ( struct msm_commit * c )
{
end_atomic ( c - > dev - > dev_private , c - > crtc_mask ) ;
kfree ( c ) ;
}
2015-04-28 19:35:37 -04:00
static void msm_atomic_wait_for_commit_done ( struct drm_device * dev ,
struct drm_atomic_state * old_state )
{
struct drm_crtc * crtc ;
struct msm_drm_private * priv = old_state - > dev - > dev_private ;
struct msm_kms * kms = priv - > kms ;
int ncrtcs = old_state - > dev - > mode_config . num_crtc ;
int i ;
for ( i = 0 ; i < ncrtcs ; i + + ) {
crtc = old_state - > crtcs [ i ] ;
if ( ! crtc )
continue ;
if ( ! crtc - > state - > enable )
continue ;
/* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that ( by doing tons of cursor updates ) . */
if ( old_state - > legacy_cursor_update )
continue ;
kms - > funcs - > wait_for_crtc_commit_done ( kms , crtc ) ;
}
}
2014-11-08 13:21:06 -05:00
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon .
*/
static void complete_commit ( struct msm_commit * c )
{
struct drm_atomic_state * state = c - > state ;
struct drm_device * dev = state - > dev ;
2015-01-30 17:04:45 -05:00
struct msm_drm_private * priv = dev - > dev_private ;
struct msm_kms * kms = priv - > kms ;
kms - > funcs - > prepare_commit ( kms , state ) ;
2014-11-08 13:21:06 -05:00
2015-02-22 12:24:19 +01:00
drm_atomic_helper_commit_modeset_disables ( dev , state ) ;
2014-11-08 13:21:06 -05:00
drm_atomic_helper_commit_planes ( dev , state ) ;
2015-02-22 12:24:19 +01:00
drm_atomic_helper_commit_modeset_enables ( dev , state ) ;
2014-11-08 13:21:06 -05:00
2014-11-25 12:41:18 -05:00
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs . So we end up faulting when disabling
* due to ( potentially ) unref ' ing the outgoing fb ' s
* before the vblank when the disable has latched .
*
* But if it did wait on disabled ( or newly disabled )
* CRTCs , that would be racy ( ie . we could have missed
* the irq . We need some way to poll for pipe shut
* down . Or just live with occasionally hitting the
* timeout in the CRTC disable path ( which really should
* not be critical path )
*/
2015-04-28 19:35:37 -04:00
msm_atomic_wait_for_commit_done ( dev , state ) ;
2014-11-08 13:21:06 -05:00
drm_atomic_helper_cleanup_planes ( dev , state ) ;
2015-01-30 17:04:45 -05:00
kms - > funcs - > complete_commit ( kms , state ) ;
2014-11-08 13:21:06 -05:00
2015-01-30 17:04:45 -05:00
drm_atomic_state_free ( state ) ;
2014-11-25 12:41:18 -05:00
2015-01-30 17:04:45 -05:00
commit_destroy ( c ) ;
2014-11-08 13:21:06 -05:00
}
static void fence_cb ( struct msm_fence_cb * cb )
{
struct msm_commit * c =
container_of ( cb , struct msm_commit , fence_cb ) ;
complete_commit ( c ) ;
}
static void add_fb ( struct msm_commit * c , struct drm_framebuffer * fb )
{
struct drm_gem_object * obj = msm_framebuffer_bo ( fb , 0 ) ;
c - > fence = max ( c - > fence , msm_gem_fence ( to_msm_bo ( obj ) , MSM_PREP_READ ) ) ;
}
2014-11-26 17:02:18 +01:00
int msm_atomic_check ( struct drm_device * dev ,
struct drm_atomic_state * state )
{
int ret ;
/*
* msm - > atomic_check can update - > mode_changed for pixel format
* changes , hence must be run before we check the modeset changes .
*/
ret = drm_atomic_helper_check_planes ( dev , state ) ;
if ( ret )
return ret ;
ret = drm_atomic_helper_check_modeset ( dev , state ) ;
if ( ret )
return ret ;
return ret ;
}
2014-11-08 13:21:06 -05:00
/**
* drm_atomic_helper_commit - commit validated state object
* @ dev : DRM device
* @ state : the driver state object
* @ async : asynchronous commit
*
* This function commits a with drm_atomic_helper_check ( ) pre - validated state
* object . This can still fail when e . g . the framebuffer reservation fails . For
* now this doesn ' t implement asynchronous commits .
*
* RETURNS
* Zero for success or - errno .
*/
int msm_atomic_commit ( struct drm_device * dev ,
struct drm_atomic_state * state , bool async )
{
int nplanes = dev - > mode_config . num_total_plane ;
2014-11-25 12:41:18 -05:00
int ncrtcs = dev - > mode_config . num_crtc ;
2015-05-11 11:50:03 -04:00
ktime_t timeout ;
2014-11-25 12:41:18 -05:00
struct msm_commit * c ;
2014-11-08 13:21:06 -05:00
int i , ret ;
ret = drm_atomic_helper_prepare_planes ( dev , state ) ;
if ( ret )
return ret ;
2015-01-30 17:04:45 -05:00
c = commit_init ( state ) ;
2015-05-27 14:39:46 +03:00
if ( ! c ) {
ret = - ENOMEM ;
goto error ;
}
2014-11-25 12:41:18 -05:00
/*
* Figure out what crtcs we have :
*/
for ( i = 0 ; i < ncrtcs ; i + + ) {
struct drm_crtc * crtc = state - > crtcs [ i ] ;
if ( ! crtc )
continue ;
c - > crtc_mask | = ( 1 < < drm_crtc_index ( crtc ) ) ;
}
2014-11-08 13:21:06 -05:00
/*
* Figure out what fence to wait for :
*/
for ( i = 0 ; i < nplanes ; i + + ) {
struct drm_plane * plane = state - > planes [ i ] ;
struct drm_plane_state * new_state = state - > plane_states [ i ] ;
if ( ! plane )
continue ;
2014-11-19 12:29:33 -05:00
if ( ( plane - > state - > fb ! = new_state - > fb ) & & new_state - > fb )
2014-11-08 13:21:06 -05:00
add_fb ( c , new_state - > fb ) ;
}
2014-11-25 12:41:18 -05:00
/*
* Wait for pending updates on any of the same crtc ' s and then
* mark our set of crtc ' s as busy :
*/
ret = start_atomic ( dev - > dev_private , c - > crtc_mask ) ;
2015-02-23 00:58:03 +02:00
if ( ret ) {
kfree ( c ) ;
2015-05-27 14:39:46 +03:00
goto error ;
2015-02-23 00:58:03 +02:00
}
2014-11-25 12:41:18 -05:00
2014-11-08 13:21:06 -05:00
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits . Which means we can commit the new state on
* the software side now .
*/
drm_atomic_helper_swap_state ( dev , state ) ;
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions : It must be guaranteed
* that the asynchronous work has either been cancelled ( if the driver
* supports it , which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes ( ) ) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state ( ) .
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update . Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout .
*/
if ( async ) {
msm_queue_fence_cb ( dev , & c - > fence_cb , c - > fence ) ;
return 0 ;
}
2015-05-11 11:50:03 -04:00
timeout = ktime_add_ms ( ktime_get ( ) , 1000 ) ;
2015-01-12 16:11:16 -05:00
2015-06-22 11:53:42 -04:00
/* uninterruptible wait */
msm_wait_fence ( dev , c - > fence , & timeout , false ) ;
2014-11-08 13:21:06 -05:00
complete_commit ( c ) ;
return 0 ;
2015-05-27 14:39:46 +03:00
error :
drm_atomic_helper_cleanup_planes ( dev , state ) ;
return ret ;
2014-11-08 13:21:06 -05:00
}