2014-11-08 13:21:06 -05:00
/*
* Copyright ( C ) 2014 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include "msm_drv.h"
# include "msm_kms.h"
# include "msm_gem.h"
2016-03-15 15:35:08 -04:00
# include "msm_fence.h"
2014-11-08 13:21:06 -05:00
struct msm_commit {
2015-01-30 17:04:45 -05:00
struct drm_device * dev ;
2014-11-08 13:21:06 -05:00
struct drm_atomic_state * state ;
2016-03-16 18:18:17 -04:00
struct work_struct work ;
2014-11-25 12:41:18 -05:00
uint32_t crtc_mask ;
2014-11-08 13:21:06 -05:00
} ;
2016-03-16 18:18:17 -04:00
static void commit_worker ( struct work_struct * work ) ;
2014-11-08 13:21:06 -05:00
2014-11-25 12:41:18 -05:00
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
*/
static int start_atomic ( struct msm_drm_private * priv , uint32_t crtc_mask )
{
int ret ;
spin_lock ( & priv - > pending_crtcs_event . lock ) ;
ret = wait_event_interruptible_locked ( priv - > pending_crtcs_event ,
! ( priv - > pending_crtcs & crtc_mask ) ) ;
if ( ret = = 0 ) {
DBG ( " start: %08x " , crtc_mask ) ;
priv - > pending_crtcs | = crtc_mask ;
}
spin_unlock ( & priv - > pending_crtcs_event . lock ) ;
return ret ;
}
/* clear specified crtcs (no longer pending update)
*/
static void end_atomic ( struct msm_drm_private * priv , uint32_t crtc_mask )
{
spin_lock ( & priv - > pending_crtcs_event . lock ) ;
DBG ( " end: %08x " , crtc_mask ) ;
priv - > pending_crtcs & = ~ crtc_mask ;
wake_up_all_locked ( & priv - > pending_crtcs_event ) ;
spin_unlock ( & priv - > pending_crtcs_event . lock ) ;
}
2015-01-30 17:04:45 -05:00
static struct msm_commit * commit_init ( struct drm_atomic_state * state )
2014-11-08 13:21:06 -05:00
{
struct msm_commit * c = kzalloc ( sizeof ( * c ) , GFP_KERNEL ) ;
if ( ! c )
return NULL ;
2015-01-30 17:04:45 -05:00
c - > dev = state - > dev ;
2014-11-08 13:21:06 -05:00
c - > state = state ;
2015-01-30 17:04:45 -05:00
2016-03-16 18:18:17 -04:00
INIT_WORK ( & c - > work , commit_worker ) ;
2014-11-08 13:21:06 -05:00
return c ;
}
2015-01-30 17:04:45 -05:00
static void commit_destroy ( struct msm_commit * c )
{
end_atomic ( c - > dev - > dev_private , c - > crtc_mask ) ;
kfree ( c ) ;
}
2015-04-28 19:35:37 -04:00
static void msm_atomic_wait_for_commit_done ( struct drm_device * dev ,
struct drm_atomic_state * old_state )
{
struct drm_crtc * crtc ;
2016-06-02 15:41:53 +02:00
struct drm_crtc_state * crtc_state ;
2015-04-28 19:35:37 -04:00
struct msm_drm_private * priv = old_state - > dev - > dev_private ;
struct msm_kms * kms = priv - > kms ;
int i ;
2016-06-02 15:41:53 +02:00
for_each_crtc_in_state ( old_state , crtc , crtc_state , i ) {
2015-04-28 19:35:37 -04:00
if ( ! crtc - > state - > enable )
continue ;
/* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that ( by doing tons of cursor updates ) . */
if ( old_state - > legacy_cursor_update )
continue ;
kms - > funcs - > wait_for_crtc_commit_done ( kms , crtc ) ;
}
}
2014-11-08 13:21:06 -05:00
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon .
*/
2016-03-16 18:18:17 -04:00
static void complete_commit ( struct msm_commit * c , bool async )
2014-11-08 13:21:06 -05:00
{
struct drm_atomic_state * state = c - > state ;
struct drm_device * dev = state - > dev ;
2015-01-30 17:04:45 -05:00
struct msm_drm_private * priv = dev - > dev_private ;
struct msm_kms * kms = priv - > kms ;
2016-09-12 16:08:11 -03:00
drm_atomic_helper_wait_for_fences ( dev , state , false ) ;
2016-03-16 18:18:17 -04:00
2015-01-30 17:04:45 -05:00
kms - > funcs - > prepare_commit ( kms , state ) ;
2014-11-08 13:21:06 -05:00
2015-02-22 12:24:19 +01:00
drm_atomic_helper_commit_modeset_disables ( dev , state ) ;
2014-11-08 13:21:06 -05:00
2016-08-29 17:12:03 +08:00
drm_atomic_helper_commit_planes ( dev , state , 0 ) ;
2014-11-08 13:21:06 -05:00
2015-02-22 12:24:19 +01:00
drm_atomic_helper_commit_modeset_enables ( dev , state ) ;
2014-11-08 13:21:06 -05:00
2014-11-25 12:41:18 -05:00
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs . So we end up faulting when disabling
* due to ( potentially ) unref ' ing the outgoing fb ' s
* before the vblank when the disable has latched .
*
* But if it did wait on disabled ( or newly disabled )
* CRTCs , that would be racy ( ie . we could have missed
* the irq . We need some way to poll for pipe shut
* down . Or just live with occasionally hitting the
* timeout in the CRTC disable path ( which really should
* not be critical path )
*/
2015-04-28 19:35:37 -04:00
msm_atomic_wait_for_commit_done ( dev , state ) ;
2014-11-08 13:21:06 -05:00
drm_atomic_helper_cleanup_planes ( dev , state ) ;
2015-01-30 17:04:45 -05:00
kms - > funcs - > complete_commit ( kms , state ) ;
2014-11-08 13:21:06 -05:00
2016-10-14 13:18:18 +01:00
drm_atomic_state_put ( state ) ;
2014-11-25 12:41:18 -05:00
2015-01-30 17:04:45 -05:00
commit_destroy ( c ) ;
2014-11-08 13:21:06 -05:00
}
2016-03-16 18:18:17 -04:00
static void commit_worker ( struct work_struct * work )
2014-11-08 13:21:06 -05:00
{
2016-03-16 18:18:17 -04:00
complete_commit ( container_of ( work , struct msm_commit , work ) , true ) ;
2014-11-08 13:21:06 -05:00
}
2014-11-26 17:02:18 +01:00
int msm_atomic_check ( struct drm_device * dev ,
struct drm_atomic_state * state )
{
int ret ;
/*
* msm - > atomic_check can update - > mode_changed for pixel format
* changes , hence must be run before we check the modeset changes .
*/
ret = drm_atomic_helper_check_planes ( dev , state ) ;
if ( ret )
return ret ;
ret = drm_atomic_helper_check_modeset ( dev , state ) ;
if ( ret )
return ret ;
return ret ;
}
2014-11-08 13:21:06 -05:00
/**
* drm_atomic_helper_commit - commit validated state object
* @ dev : DRM device
* @ state : the driver state object
2016-04-26 16:11:38 +02:00
* @ nonblock : nonblocking commit
2014-11-08 13:21:06 -05:00
*
* This function commits a with drm_atomic_helper_check ( ) pre - validated state
2016-04-26 16:11:38 +02:00
* object . This can still fail when e . g . the framebuffer reservation fails .
2014-11-08 13:21:06 -05:00
*
* RETURNS
* Zero for success or - errno .
*/
int msm_atomic_commit ( struct drm_device * dev ,
2016-04-26 16:11:38 +02:00
struct drm_atomic_state * state , bool nonblock )
2014-11-08 13:21:06 -05:00
{
2016-03-15 17:22:13 -04:00
struct msm_drm_private * priv = dev - > dev_private ;
2014-11-25 12:41:18 -05:00
struct msm_commit * c ;
2016-06-02 15:41:53 +02:00
struct drm_crtc * crtc ;
struct drm_crtc_state * crtc_state ;
struct drm_plane * plane ;
struct drm_plane_state * plane_state ;
2014-11-08 13:21:06 -05:00
int i , ret ;
ret = drm_atomic_helper_prepare_planes ( dev , state ) ;
if ( ret )
return ret ;
2015-01-30 17:04:45 -05:00
c = commit_init ( state ) ;
2015-05-27 14:39:46 +03:00
if ( ! c ) {
ret = - ENOMEM ;
goto error ;
}
2014-11-25 12:41:18 -05:00
/*
* Figure out what crtcs we have :
*/
2016-06-02 15:41:53 +02:00
for_each_crtc_in_state ( state , crtc , crtc_state , i )
c - > crtc_mask | = drm_crtc_mask ( crtc ) ;
2014-11-08 13:21:06 -05:00
2016-03-15 18:26:28 -04:00
/*
* Figure out what fence to wait for :
*/
2016-06-02 15:41:53 +02:00
for_each_plane_in_state ( state , plane , plane_state , i ) {
if ( ( plane - > state - > fb ! = plane_state - > fb ) & & plane_state - > fb ) {
struct drm_gem_object * obj = msm_framebuffer_bo ( plane_state - > fb , 0 ) ;
2016-03-15 18:26:28 -04:00
struct msm_gem_object * msm_obj = to_msm_bo ( obj ) ;
2016-06-02 15:41:53 +02:00
plane_state - > fence = reservation_object_get_excl_rcu ( msm_obj - > resv ) ;
2016-03-15 18:26:28 -04:00
}
}
2014-11-25 12:41:18 -05:00
/*
* Wait for pending updates on any of the same crtc ' s and then
* mark our set of crtc ' s as busy :
*/
ret = start_atomic ( dev - > dev_private , c - > crtc_mask ) ;
2015-02-23 00:58:03 +02:00
if ( ret ) {
kfree ( c ) ;
2015-05-27 14:39:46 +03:00
goto error ;
2015-02-23 00:58:03 +02:00
}
2014-11-25 12:41:18 -05:00
2014-11-08 13:21:06 -05:00
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits . Which means we can commit the new state on
* the software side now .
*/
2016-06-10 00:06:32 +02:00
drm_atomic_helper_swap_state ( state , true ) ;
2014-11-08 13:21:06 -05:00
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions : It must be guaranteed
* that the asynchronous work has either been cancelled ( if the driver
* supports it , which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes ( ) ) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state ( ) .
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update . Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout .
*/
2016-10-14 13:18:18 +01:00
drm_atomic_state_get ( state ) ;
2016-03-16 18:18:17 -04:00
if ( nonblock ) {
queue_work ( priv - > atomic_wq , & c - > work ) ;
2014-11-08 13:21:06 -05:00
return 0 ;
}
2016-03-16 18:18:17 -04:00
complete_commit ( c , false ) ;
2014-11-08 13:21:06 -05:00
return 0 ;
2015-05-27 14:39:46 +03:00
error :
drm_atomic_helper_cleanup_planes ( dev , state ) ;
return ret ;
2014-11-08 13:21:06 -05:00
}