2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-03-03 00:01:12 +03:00
/*
* Copyright ( C ) 2015 Broadcom
*/
/**
* DOC : VC4 KMS
*
* This is the general code for implementing KMS mode setting that
* doesn ' t clearly associate with any of the other objects ( plane ,
* crtc , HDMI encoder ) .
*/
2017-05-18 07:29:38 +03:00
# include <drm/drm_atomic.h>
# include <drm/drm_atomic_helper.h>
2019-07-16 09:42:07 +03:00
# include <drm/drm_crtc.h>
2017-08-13 16:32:03 +03:00
# include <drm/drm_gem_framebuffer_helper.h>
2019-01-18 00:03:34 +03:00
# include <drm/drm_plane_helper.h>
# include <drm/drm_probe_helper.h>
2019-07-16 09:42:07 +03:00
# include <drm/drm_vblank.h>
2015-03-03 00:01:12 +03:00
# include "vc4_drv.h"
2018-04-20 15:25:44 +03:00
# include "vc4_regs.h"
struct vc4_ctm_state {
struct drm_private_state base ;
struct drm_color_ctm * ctm ;
int fifo ;
} ;
static struct vc4_ctm_state * to_vc4_ctm_state ( struct drm_private_state * priv )
{
return container_of ( priv , struct vc4_ctm_state , base ) ;
}
2019-02-20 18:51:23 +03:00
struct vc4_load_tracker_state {
struct drm_private_state base ;
u64 hvs_load ;
u64 membus_load ;
} ;
static struct vc4_load_tracker_state *
to_vc4_load_tracker_state ( struct drm_private_state * priv )
{
return container_of ( priv , struct vc4_load_tracker_state , base ) ;
}
2018-04-20 15:25:44 +03:00
static struct vc4_ctm_state * vc4_get_ctm_state ( struct drm_atomic_state * state ,
struct drm_private_obj * manager )
{
struct drm_device * dev = state - > dev ;
struct vc4_dev * vc4 = dev - > dev_private ;
struct drm_private_state * priv_state ;
int ret ;
ret = drm_modeset_lock ( & vc4 - > ctm_state_lock , state - > acquire_ctx ) ;
if ( ret )
return ERR_PTR ( ret ) ;
priv_state = drm_atomic_get_private_obj_state ( state , manager ) ;
if ( IS_ERR ( priv_state ) )
return ERR_CAST ( priv_state ) ;
return to_vc4_ctm_state ( priv_state ) ;
}
static struct drm_private_state *
vc4_ctm_duplicate_state ( struct drm_private_obj * obj )
{
struct vc4_ctm_state * state ;
state = kmemdup ( obj - > state , sizeof ( * state ) , GFP_KERNEL ) ;
if ( ! state )
return NULL ;
__drm_atomic_helper_private_obj_duplicate_state ( obj , & state - > base ) ;
return & state - > base ;
}
static void vc4_ctm_destroy_state ( struct drm_private_obj * obj ,
struct drm_private_state * state )
{
struct vc4_ctm_state * ctm_state = to_vc4_ctm_state ( state ) ;
kfree ( ctm_state ) ;
}
static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
. atomic_duplicate_state = vc4_ctm_duplicate_state ,
. atomic_destroy_state = vc4_ctm_destroy_state ,
} ;
/* Converts a DRM S31.32 value to the HW S0.9 format. */
static u16 vc4_ctm_s31_32_to_s0_9 ( u64 in )
{
u16 r ;
/* Sign bit. */
r = in & BIT_ULL ( 63 ) ? BIT ( 9 ) : 0 ;
if ( ( in & GENMASK_ULL ( 62 , 32 ) ) > 0 ) {
/* We have zero integer bits so we can only saturate here. */
r | = GENMASK ( 8 , 0 ) ;
} else {
/* Otherwise take the 9 most important fractional bits. */
r | = ( in > > 23 ) & GENMASK ( 8 , 0 ) ;
}
return r ;
}
static void
vc4_ctm_commit ( struct vc4_dev * vc4 , struct drm_atomic_state * state )
{
struct vc4_ctm_state * ctm_state = to_vc4_ctm_state ( vc4 - > ctm_manager . state ) ;
struct drm_color_ctm * ctm = ctm_state - > ctm ;
if ( ctm_state - > fifo ) {
HVS_WRITE ( SCALER_OLEDCOEF2 ,
VC4_SET_FIELD ( vc4_ctm_s31_32_to_s0_9 ( ctm - > matrix [ 0 ] ) ,
SCALER_OLEDCOEF2_R_TO_R ) |
VC4_SET_FIELD ( vc4_ctm_s31_32_to_s0_9 ( ctm - > matrix [ 3 ] ) ,
SCALER_OLEDCOEF2_R_TO_G ) |
VC4_SET_FIELD ( vc4_ctm_s31_32_to_s0_9 ( ctm - > matrix [ 6 ] ) ,
SCALER_OLEDCOEF2_R_TO_B ) ) ;
HVS_WRITE ( SCALER_OLEDCOEF1 ,
VC4_SET_FIELD ( vc4_ctm_s31_32_to_s0_9 ( ctm - > matrix [ 1 ] ) ,
SCALER_OLEDCOEF1_G_TO_R ) |
VC4_SET_FIELD ( vc4_ctm_s31_32_to_s0_9 ( ctm - > matrix [ 4 ] ) ,
SCALER_OLEDCOEF1_G_TO_G ) |
VC4_SET_FIELD ( vc4_ctm_s31_32_to_s0_9 ( ctm - > matrix [ 7 ] ) ,
SCALER_OLEDCOEF1_G_TO_B ) ) ;
HVS_WRITE ( SCALER_OLEDCOEF0 ,
VC4_SET_FIELD ( vc4_ctm_s31_32_to_s0_9 ( ctm - > matrix [ 2 ] ) ,
SCALER_OLEDCOEF0_B_TO_R ) |
VC4_SET_FIELD ( vc4_ctm_s31_32_to_s0_9 ( ctm - > matrix [ 5 ] ) ,
SCALER_OLEDCOEF0_B_TO_G ) |
VC4_SET_FIELD ( vc4_ctm_s31_32_to_s0_9 ( ctm - > matrix [ 8 ] ) ,
SCALER_OLEDCOEF0_B_TO_B ) ) ;
}
HVS_WRITE ( SCALER_OLEDOFFS ,
VC4_SET_FIELD ( ctm_state - > fifo , SCALER_OLEDOFFS_DISPFIFO ) ) ;
}
2015-03-03 00:01:12 +03:00
2015-11-30 23:34:01 +03:00
static void
2017-06-21 21:50:01 +03:00
vc4_atomic_complete_commit ( struct drm_atomic_state * state )
2015-11-30 23:34:01 +03:00
{
struct drm_device * dev = state - > dev ;
struct vc4_dev * vc4 = to_vc4_dev ( dev ) ;
2019-02-20 18:51:22 +03:00
struct vc4_crtc * vc4_crtc ;
int i ;
for ( i = 0 ; i < dev - > mode_config . num_crtc ; i + + ) {
if ( ! state - > crtcs [ i ] . ptr | | ! state - > crtcs [ i ] . commit )
continue ;
vc4_crtc = to_vc4_crtc ( state - > crtcs [ i ] . ptr ) ;
vc4_hvs_mask_underrun ( dev , vc4_crtc - > channel ) ;
}
2015-11-30 23:34:01 +03:00
2017-06-02 11:32:08 +03:00
drm_atomic_helper_wait_for_fences ( dev , state , false ) ;
drm_atomic_helper_wait_for_dependencies ( state ) ;
2015-11-30 23:34:01 +03:00
drm_atomic_helper_commit_modeset_disables ( dev , state ) ;
2018-04-20 15:25:44 +03:00
vc4_ctm_commit ( vc4 , state ) ;
2016-08-29 12:12:03 +03:00
drm_atomic_helper_commit_planes ( dev , state , 0 ) ;
2015-11-30 23:34:01 +03:00
drm_atomic_helper_commit_modeset_enables ( dev , state ) ;
2018-07-03 10:50:21 +03:00
drm_atomic_helper_fake_vblank ( state ) ;
2017-06-02 11:32:08 +03:00
drm_atomic_helper_commit_hw_done ( state ) ;
2018-07-03 10:50:18 +03:00
drm_atomic_helper_wait_for_flip_done ( dev , state ) ;
2015-11-30 23:34:01 +03:00
drm_atomic_helper_cleanup_planes ( dev , state ) ;
2017-06-02 11:32:08 +03:00
drm_atomic_helper_commit_cleanup_done ( state ) ;
2016-10-14 15:18:18 +03:00
drm_atomic_state_put ( state ) ;
2015-11-30 23:34:01 +03:00
up ( & vc4 - > async_modeset ) ;
}
2017-06-21 21:50:01 +03:00
static void commit_work ( struct work_struct * work )
2015-11-30 23:34:01 +03:00
{
2017-06-21 21:50:01 +03:00
struct drm_atomic_state * state = container_of ( work ,
struct drm_atomic_state ,
commit_work ) ;
vc4_atomic_complete_commit ( state ) ;
2015-11-30 23:34:01 +03:00
}
/**
* vc4_atomic_commit - commit validated state object
* @ dev : DRM device
* @ state : the driver state object
2016-04-26 17:11:44 +03:00
* @ nonblock : nonblocking commit
2015-11-30 23:34:01 +03:00
*
* This function commits a with drm_atomic_helper_check ( ) pre - validated state
* object . This can still fail when e . g . the framebuffer reservation fails . For
* now this doesn ' t implement asynchronous commits .
*
* RETURNS
* Zero for success or - errno .
*/
static int vc4_atomic_commit ( struct drm_device * dev ,
struct drm_atomic_state * state ,
2016-04-26 17:11:44 +03:00
bool nonblock )
2015-11-30 23:34:01 +03:00
{
struct vc4_dev * vc4 = to_vc4_dev ( dev ) ;
int ret ;
2018-03-30 11:54:45 +03:00
if ( state - > async_update ) {
ret = down_interruptible ( & vc4 - > async_modeset ) ;
if ( ret )
return ret ;
ret = drm_atomic_helper_prepare_planes ( dev , state ) ;
if ( ret ) {
up ( & vc4 - > async_modeset ) ;
return ret ;
}
drm_atomic_helper_async_commit ( dev , state ) ;
drm_atomic_helper_cleanup_planes ( dev , state ) ;
up ( & vc4 - > async_modeset ) ;
return 0 ;
}
2018-11-15 13:58:52 +03:00
/* We know for sure we don't want an async update here. Set
* state - > legacy_cursor_update to false to prevent
* drm_atomic_helper_setup_commit ( ) from auto - completing
* commit - > flip_done .
*/
state - > legacy_cursor_update = false ;
2017-06-02 11:32:08 +03:00
ret = drm_atomic_helper_setup_commit ( state , nonblock ) ;
if ( ret )
return ret ;
2017-06-21 21:50:01 +03:00
INIT_WORK ( & state - > commit_work , commit_work ) ;
2016-11-24 21:11:55 +03:00
ret = down_interruptible ( & vc4 - > async_modeset ) ;
2017-06-21 21:50:01 +03:00
if ( ret )
2016-11-24 21:11:55 +03:00
return ret ;
2015-11-30 23:34:01 +03:00
ret = drm_atomic_helper_prepare_planes ( dev , state ) ;
if ( ret ) {
up ( & vc4 - > async_modeset ) ;
return ret ;
}
2017-06-21 21:50:00 +03:00
if ( ! nonblock ) {
ret = drm_atomic_helper_wait_for_fences ( dev , state , true ) ;
if ( ret ) {
drm_atomic_helper_cleanup_planes ( dev , state ) ;
up ( & vc4 - > async_modeset ) ;
return ret ;
}
}
2015-11-30 23:34:01 +03:00
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits . Which means we can commit the new state on
* the software side now .
*/
2017-07-11 17:33:12 +03:00
BUG_ON ( drm_atomic_helper_swap_state ( state , false ) < 0 ) ;
2015-11-30 23:34:01 +03:00
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one condition : It must be guaranteed
* that the asynchronous work has either been cancelled ( if the driver
* supports it , which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes ( ) ) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state ( ) .
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update . Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout .
*/
2016-10-14 15:18:18 +03:00
drm_atomic_state_get ( state ) ;
2017-06-21 21:50:01 +03:00
if ( nonblock )
queue_work ( system_unbound_wq , & state - > commit_work ) ;
else
vc4_atomic_complete_commit ( state ) ;
2015-11-30 23:34:01 +03:00
return 0 ;
}
2017-06-08 03:13:36 +03:00
static struct drm_framebuffer * vc4_fb_create ( struct drm_device * dev ,
struct drm_file * file_priv ,
const struct drm_mode_fb_cmd2 * mode_cmd )
{
struct drm_mode_fb_cmd2 mode_cmd_local ;
/* If the user didn't specify a modifier, use the
* vc4_set_tiling_ioctl ( ) state for the BO .
*/
if ( ! ( mode_cmd - > flags & DRM_MODE_FB_MODIFIERS ) ) {
struct drm_gem_object * gem_obj ;
struct vc4_bo * bo ;
gem_obj = drm_gem_object_lookup ( file_priv ,
mode_cmd - > handles [ 0 ] ) ;
if ( ! gem_obj ) {
2017-07-25 19:27:32 +03:00
DRM_DEBUG ( " Failed to look up GEM BO %d \n " ,
2017-06-08 03:13:36 +03:00
mode_cmd - > handles [ 0 ] ) ;
return ERR_PTR ( - ENOENT ) ;
}
bo = to_vc4_bo ( gem_obj ) ;
mode_cmd_local = * mode_cmd ;
if ( bo - > t_format ) {
mode_cmd_local . modifier [ 0 ] =
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED ;
} else {
mode_cmd_local . modifier [ 0 ] = DRM_FORMAT_MOD_NONE ;
}
2020-05-15 12:51:13 +03:00
drm_gem_object_put ( gem_obj ) ;
2017-06-08 03:13:36 +03:00
mode_cmd = & mode_cmd_local ;
}
2017-08-13 16:32:03 +03:00
return drm_gem_fb_create ( dev , file_priv , mode_cmd ) ;
2017-06-08 03:13:36 +03:00
}
2018-04-20 15:25:44 +03:00
/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
* at a time and the HW only supports S0 .9 scalars . To account for the latter ,
* we don ' t allow userland to set a CTM that we have no hope of approximating .
*/
static int
vc4_ctm_atomic_check ( struct drm_device * dev , struct drm_atomic_state * state )
{
struct vc4_dev * vc4 = to_vc4_dev ( dev ) ;
struct vc4_ctm_state * ctm_state = NULL ;
struct drm_crtc * crtc ;
struct drm_crtc_state * old_crtc_state , * new_crtc_state ;
struct drm_color_ctm * ctm ;
int i ;
for_each_oldnew_crtc_in_state ( state , crtc , old_crtc_state , new_crtc_state , i ) {
/* CTM is being disabled. */
if ( ! new_crtc_state - > ctm & & old_crtc_state - > ctm ) {
ctm_state = vc4_get_ctm_state ( state , & vc4 - > ctm_manager ) ;
if ( IS_ERR ( ctm_state ) )
return PTR_ERR ( ctm_state ) ;
ctm_state - > fifo = 0 ;
}
}
for_each_oldnew_crtc_in_state ( state , crtc , old_crtc_state , new_crtc_state , i ) {
if ( new_crtc_state - > ctm = = old_crtc_state - > ctm )
continue ;
if ( ! ctm_state ) {
ctm_state = vc4_get_ctm_state ( state , & vc4 - > ctm_manager ) ;
if ( IS_ERR ( ctm_state ) )
return PTR_ERR ( ctm_state ) ;
}
/* CTM is being enabled or the matrix changed. */
if ( new_crtc_state - > ctm ) {
/* fifo is 1-based since 0 disables CTM. */
int fifo = to_vc4_crtc ( crtc ) - > channel + 1 ;
/* Check userland isn't trying to turn on CTM for more
* than one CRTC at a time .
*/
if ( ctm_state - > fifo & & ctm_state - > fifo ! = fifo ) {
DRM_DEBUG_DRIVER ( " Too many CTM configured \n " ) ;
return - EINVAL ;
}
/* Check we can approximate the specified CTM.
* We disallow scalars | c | > 1.0 since the HW has
* no integer bits .
*/
ctm = new_crtc_state - > ctm - > data ;
for ( i = 0 ; i < ARRAY_SIZE ( ctm - > matrix ) ; i + + ) {
u64 val = ctm - > matrix [ i ] ;
val & = ~ BIT_ULL ( 63 ) ;
if ( val > BIT_ULL ( 32 ) )
return - EINVAL ;
}
ctm_state - > fifo = fifo ;
ctm_state - > ctm = ctm ;
}
}
return 0 ;
}
2019-02-20 18:51:23 +03:00
static int vc4_load_tracker_atomic_check ( struct drm_atomic_state * state )
{
struct drm_plane_state * old_plane_state , * new_plane_state ;
struct vc4_dev * vc4 = to_vc4_dev ( state - > dev ) ;
struct vc4_load_tracker_state * load_state ;
struct drm_private_state * priv_state ;
struct drm_plane * plane ;
int i ;
priv_state = drm_atomic_get_private_obj_state ( state ,
& vc4 - > load_tracker ) ;
if ( IS_ERR ( priv_state ) )
return PTR_ERR ( priv_state ) ;
load_state = to_vc4_load_tracker_state ( priv_state ) ;
for_each_oldnew_plane_in_state ( state , plane , old_plane_state ,
new_plane_state , i ) {
struct vc4_plane_state * vc4_plane_state ;
if ( old_plane_state - > fb & & old_plane_state - > crtc ) {
vc4_plane_state = to_vc4_plane_state ( old_plane_state ) ;
load_state - > membus_load - = vc4_plane_state - > membus_load ;
load_state - > hvs_load - = vc4_plane_state - > hvs_load ;
}
if ( new_plane_state - > fb & & new_plane_state - > crtc ) {
vc4_plane_state = to_vc4_plane_state ( new_plane_state ) ;
load_state - > membus_load + = vc4_plane_state - > membus_load ;
load_state - > hvs_load + = vc4_plane_state - > hvs_load ;
}
}
2019-02-20 18:51:24 +03:00
/* Don't check the load when the tracker is disabled. */
if ( ! vc4 - > load_tracker_enabled )
return 0 ;
2019-02-20 18:51:23 +03:00
/* The absolute limit is 2Gbyte/sec, but let's take a margin to let
* the system work when other blocks are accessing the memory .
*/
if ( load_state - > membus_load > SZ_1G + SZ_512M )
return - ENOSPC ;
/* HVS clock is supposed to run @ 250Mhz, let's take a margin and
* consider the maximum number of cycles is 240 M .
*/
if ( load_state - > hvs_load > 240000000ULL )
return - ENOSPC ;
return 0 ;
}
static struct drm_private_state *
vc4_load_tracker_duplicate_state ( struct drm_private_obj * obj )
{
struct vc4_load_tracker_state * state ;
state = kmemdup ( obj - > state , sizeof ( * state ) , GFP_KERNEL ) ;
if ( ! state )
return NULL ;
__drm_atomic_helper_private_obj_duplicate_state ( obj , & state - > base ) ;
return & state - > base ;
}
static void vc4_load_tracker_destroy_state ( struct drm_private_obj * obj ,
struct drm_private_state * state )
{
struct vc4_load_tracker_state * load_state ;
load_state = to_vc4_load_tracker_state ( state ) ;
kfree ( load_state ) ;
}
static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
. atomic_duplicate_state = vc4_load_tracker_duplicate_state ,
. atomic_destroy_state = vc4_load_tracker_destroy_state ,
} ;
2018-04-20 15:25:44 +03:00
static int
vc4_atomic_check ( struct drm_device * dev , struct drm_atomic_state * state )
{
int ret ;
ret = vc4_ctm_atomic_check ( dev , state ) ;
if ( ret < 0 )
return ret ;
2019-02-20 18:51:23 +03:00
ret = drm_atomic_helper_check ( dev , state ) ;
if ( ret )
return ret ;
return vc4_load_tracker_atomic_check ( state ) ;
2018-04-20 15:25:44 +03:00
}
2015-03-03 00:01:12 +03:00
static const struct drm_mode_config_funcs vc4_mode_funcs = {
2018-04-20 15:25:44 +03:00
. atomic_check = vc4_atomic_check ,
2015-11-30 23:34:01 +03:00
. atomic_commit = vc4_atomic_commit ,
2017-06-08 03:13:36 +03:00
. fb_create = vc4_fb_create ,
2015-03-03 00:01:12 +03:00
} ;
int vc4_kms_load ( struct drm_device * dev )
{
2015-07-02 19:19:54 +03:00
struct vc4_dev * vc4 = to_vc4_dev ( dev ) ;
2018-04-20 15:25:44 +03:00
struct vc4_ctm_state * ctm_state ;
2019-02-20 18:51:23 +03:00
struct vc4_load_tracker_state * load_state ;
2015-03-03 00:01:12 +03:00
int ret ;
2019-02-20 18:51:24 +03:00
/* Start with the load tracker enabled. Can be disabled through the
* debugfs load_tracker file .
*/
vc4 - > load_tracker_enabled = true ;
2015-11-30 23:34:01 +03:00
sema_init ( & vc4 - > async_modeset , 1 ) ;
2017-06-22 04:28:11 +03:00
/* Set support for vblank irq fast disable, before drm_vblank_init() */
dev - > vblank_disable_immediate = true ;
2019-04-01 21:35:59 +03:00
dev - > irq_enabled = true ;
2015-03-03 00:01:12 +03:00
ret = drm_vblank_init ( dev , dev - > mode_config . num_crtc ) ;
if ( ret < 0 ) {
dev_err ( dev - > dev , " failed to initialize vblank \n " ) ;
return ret ;
}
dev - > mode_config . max_width = 2048 ;
dev - > mode_config . max_height = 2048 ;
dev - > mode_config . funcs = & vc4_mode_funcs ;
dev - > mode_config . preferred_depth = 24 ;
2015-11-30 23:34:01 +03:00
dev - > mode_config . async_page_flip = true ;
2017-08-08 19:44:48 +03:00
dev - > mode_config . allow_fb_modifiers = true ;
2015-11-30 23:34:01 +03:00
2018-04-20 15:25:44 +03:00
drm_modeset_lock_init ( & vc4 - > ctm_state_lock ) ;
ctm_state = kzalloc ( sizeof ( * ctm_state ) , GFP_KERNEL ) ;
if ( ! ctm_state )
return - ENOMEM ;
2018-10-22 15:31:22 +03:00
drm_atomic_private_obj_init ( dev , & vc4 - > ctm_manager , & ctm_state - > base ,
2018-04-20 15:25:44 +03:00
& vc4_ctm_state_funcs ) ;
2019-02-20 18:51:23 +03:00
load_state = kzalloc ( sizeof ( * load_state ) , GFP_KERNEL ) ;
if ( ! load_state ) {
drm_atomic_private_obj_fini ( & vc4 - > ctm_manager ) ;
return - ENOMEM ;
}
drm_atomic_private_obj_init ( dev , & vc4 - > load_tracker , & load_state - > base ,
& vc4_load_tracker_state_funcs ) ;
2015-03-03 00:01:12 +03:00
drm_mode_config_reset ( dev ) ;
drm_kms_helper_poll_init ( dev ) ;
return 0 ;
}