2015-03-02 13:01:12 -08:00
/*
* Copyright ( C ) 2015 Broadcom
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
/**
* DOC : VC4 KMS
*
* This is the general code for implementing KMS mode setting that
* doesn ' t clearly associate with any of the other objects ( plane ,
* crtc , HDMI encoder ) .
*/
2017-05-18 13:29:38 +09:00
# include <drm/drm_crtc.h>
# include <drm/drm_atomic.h>
# include <drm/drm_atomic_helper.h>
# include <drm/drm_crtc_helper.h>
# include <drm/drm_plane_helper.h>
# include <drm/drm_fb_cma_helper.h>
2015-03-02 13:01:12 -08:00
# include "vc4_drv.h"
2015-07-02 11:19:54 -05:00
static void vc4_output_poll_changed ( struct drm_device * dev )
{
struct vc4_dev * vc4 = to_vc4_dev ( dev ) ;
2016-07-15 21:15:37 +02:00
drm_fbdev_cma_hotplug_event ( vc4 - > fbdev ) ;
2015-07-02 11:19:54 -05:00
}
2015-11-30 12:34:01 -08:00
struct vc4_commit {
struct drm_device * dev ;
struct drm_atomic_state * state ;
struct vc4_seqno_cb cb ;
} ;
static void
vc4_atomic_complete_commit ( struct vc4_commit * c )
{
struct drm_atomic_state * state = c - > state ;
struct drm_device * dev = state - > dev ;
struct vc4_dev * vc4 = to_vc4_dev ( dev ) ;
2017-06-02 10:32:08 +02:00
drm_atomic_helper_wait_for_fences ( dev , state , false ) ;
drm_atomic_helper_wait_for_dependencies ( state ) ;
2015-11-30 12:34:01 -08:00
drm_atomic_helper_commit_modeset_disables ( dev , state ) ;
2016-08-29 17:12:03 +08:00
drm_atomic_helper_commit_planes ( dev , state , 0 ) ;
2015-11-30 12:34:01 -08:00
drm_atomic_helper_commit_modeset_enables ( dev , state ) ;
2015-12-30 11:50:22 -08:00
/* Make sure that drm_atomic_helper_wait_for_vblanks()
* actually waits for vblank . If we ' re doing a full atomic
* modeset ( as opposed to a vc4_update_plane ( ) short circuit ) ,
* then we need to wait for scanout to be done with our
* display lists before we free it and potentially reallocate
* and overwrite the dlist memory with a new modeset .
*/
state - > legacy_cursor_update = false ;
2017-06-02 10:32:08 +02:00
drm_atomic_helper_commit_hw_done ( state ) ;
2015-11-30 12:34:01 -08:00
drm_atomic_helper_wait_for_vblanks ( dev , state ) ;
drm_atomic_helper_cleanup_planes ( dev , state ) ;
2017-06-02 10:32:08 +02:00
drm_atomic_helper_commit_cleanup_done ( state ) ;
2016-10-14 13:18:18 +01:00
drm_atomic_state_put ( state ) ;
2015-11-30 12:34:01 -08:00
up ( & vc4 - > async_modeset ) ;
kfree ( c ) ;
}
static void
vc4_atomic_complete_commit_seqno_cb ( struct vc4_seqno_cb * cb )
{
struct vc4_commit * c = container_of ( cb , struct vc4_commit , cb ) ;
vc4_atomic_complete_commit ( c ) ;
}
static struct vc4_commit * commit_init ( struct drm_atomic_state * state )
{
struct vc4_commit * c = kzalloc ( sizeof ( * c ) , GFP_KERNEL ) ;
if ( ! c )
return NULL ;
c - > dev = state - > dev ;
c - > state = state ;
return c ;
}
/**
* vc4_atomic_commit - commit validated state object
* @ dev : DRM device
* @ state : the driver state object
2016-04-26 16:11:44 +02:00
* @ nonblock : nonblocking commit
2015-11-30 12:34:01 -08:00
*
* This function commits a with drm_atomic_helper_check ( ) pre - validated state
* object . This can still fail when e . g . the framebuffer reservation fails . For
* now this doesn ' t implement asynchronous commits .
*
* RETURNS
* Zero for success or - errno .
*/
static int vc4_atomic_commit ( struct drm_device * dev ,
struct drm_atomic_state * state ,
2016-04-26 16:11:44 +02:00
bool nonblock )
2015-11-30 12:34:01 -08:00
{
struct vc4_dev * vc4 = to_vc4_dev ( dev ) ;
int ret ;
int i ;
uint64_t wait_seqno = 0 ;
struct vc4_commit * c ;
2016-06-02 00:06:28 +02:00
struct drm_plane * plane ;
struct drm_plane_state * new_state ;
2015-11-30 12:34:01 -08:00
c = commit_init ( state ) ;
if ( ! c )
return - ENOMEM ;
2017-06-02 10:32:08 +02:00
ret = drm_atomic_helper_setup_commit ( state , nonblock ) ;
if ( ret )
return ret ;
2016-11-24 12:11:55 -06:00
ret = down_interruptible ( & vc4 - > async_modeset ) ;
if ( ret ) {
kfree ( c ) ;
return ret ;
2015-11-30 12:34:01 -08:00
}
ret = drm_atomic_helper_prepare_planes ( dev , state ) ;
if ( ret ) {
kfree ( c ) ;
up ( & vc4 - > async_modeset ) ;
return ret ;
}
2017-06-21 11:50:00 -07:00
if ( ! nonblock ) {
ret = drm_atomic_helper_wait_for_fences ( dev , state , true ) ;
if ( ret ) {
drm_atomic_helper_cleanup_planes ( dev , state ) ;
kfree ( c ) ;
up ( & vc4 - > async_modeset ) ;
return ret ;
}
}
2016-06-02 00:06:28 +02:00
for_each_plane_in_state ( state , plane , new_state , i ) {
2015-11-30 12:34:01 -08:00
if ( ( plane - > state - > fb ! = new_state - > fb ) & & new_state - > fb ) {
struct drm_gem_cma_object * cma_bo =
drm_fb_cma_get_gem_obj ( new_state - > fb , 0 ) ;
struct vc4_bo * bo = to_vc4_bo ( & cma_bo - > base ) ;
wait_seqno = max ( bo - > seqno , wait_seqno ) ;
}
}
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits . Which means we can commit the new state on
* the software side now .
*/
2016-06-10 00:06:32 +02:00
drm_atomic_helper_swap_state ( state , true ) ;
2015-11-30 12:34:01 -08:00
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one condition : It must be guaranteed
* that the asynchronous work has either been cancelled ( if the driver
* supports it , which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes ( ) ) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state ( ) .
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update . Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout .
*/
2016-10-14 13:18:18 +01:00
drm_atomic_state_get ( state ) ;
2016-04-26 16:11:44 +02:00
if ( nonblock ) {
2015-11-30 12:34:01 -08:00
vc4_queue_seqno_cb ( dev , & c - > cb , wait_seqno ,
vc4_atomic_complete_commit_seqno_cb ) ;
} else {
vc4_wait_for_seqno ( dev , wait_seqno , ~ 0ull , false ) ;
vc4_atomic_complete_commit ( c ) ;
}
return 0 ;
}
2017-06-07 17:13:36 -07:00
static struct drm_framebuffer * vc4_fb_create ( struct drm_device * dev ,
struct drm_file * file_priv ,
const struct drm_mode_fb_cmd2 * mode_cmd )
{
struct drm_mode_fb_cmd2 mode_cmd_local ;
/* If the user didn't specify a modifier, use the
* vc4_set_tiling_ioctl ( ) state for the BO .
*/
if ( ! ( mode_cmd - > flags & DRM_MODE_FB_MODIFIERS ) ) {
struct drm_gem_object * gem_obj ;
struct vc4_bo * bo ;
gem_obj = drm_gem_object_lookup ( file_priv ,
mode_cmd - > handles [ 0 ] ) ;
if ( ! gem_obj ) {
DRM_ERROR ( " Failed to look up GEM BO %d \n " ,
mode_cmd - > handles [ 0 ] ) ;
return ERR_PTR ( - ENOENT ) ;
}
bo = to_vc4_bo ( gem_obj ) ;
mode_cmd_local = * mode_cmd ;
if ( bo - > t_format ) {
mode_cmd_local . modifier [ 0 ] =
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED ;
} else {
mode_cmd_local . modifier [ 0 ] = DRM_FORMAT_MOD_NONE ;
}
drm_gem_object_unreference_unlocked ( gem_obj ) ;
mode_cmd = & mode_cmd_local ;
}
return drm_fb_cma_create ( dev , file_priv , mode_cmd ) ;
}
2015-03-02 13:01:12 -08:00
static const struct drm_mode_config_funcs vc4_mode_funcs = {
2015-07-02 11:19:54 -05:00
. output_poll_changed = vc4_output_poll_changed ,
2015-03-02 13:01:12 -08:00
. atomic_check = drm_atomic_helper_check ,
2015-11-30 12:34:01 -08:00
. atomic_commit = vc4_atomic_commit ,
2017-06-07 17:13:36 -07:00
. fb_create = vc4_fb_create ,
2015-03-02 13:01:12 -08:00
} ;
int vc4_kms_load ( struct drm_device * dev )
{
2015-07-02 11:19:54 -05:00
struct vc4_dev * vc4 = to_vc4_dev ( dev ) ;
2015-03-02 13:01:12 -08:00
int ret ;
2015-11-30 12:34:01 -08:00
sema_init ( & vc4 - > async_modeset , 1 ) ;
2017-06-22 03:28:11 +02:00
/* Set support for vblank irq fast disable, before drm_vblank_init() */
dev - > vblank_disable_immediate = true ;
2015-03-02 13:01:12 -08:00
ret = drm_vblank_init ( dev , dev - > mode_config . num_crtc ) ;
if ( ret < 0 ) {
dev_err ( dev - > dev , " failed to initialize vblank \n " ) ;
return ret ;
}
dev - > mode_config . max_width = 2048 ;
dev - > mode_config . max_height = 2048 ;
dev - > mode_config . funcs = & vc4_mode_funcs ;
dev - > mode_config . preferred_depth = 24 ;
2015-11-30 12:34:01 -08:00
dev - > mode_config . async_page_flip = true ;
2015-03-02 13:01:12 -08:00
drm_mode_config_reset ( dev ) ;
2017-04-28 15:42:22 -07:00
if ( dev - > mode_config . num_connector ) {
vc4 - > fbdev = drm_fbdev_cma_init ( dev , 32 ,
dev - > mode_config . num_connector ) ;
if ( IS_ERR ( vc4 - > fbdev ) )
vc4 - > fbdev = NULL ;
}
2015-03-02 13:01:12 -08:00
drm_kms_helper_poll_init ( dev ) ;
return 0 ;
}