2019-06-04 10:11:33 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2012-11-15 21:28:22 +00:00
/*
* Copyright ( C ) 2012 Avionic Design GmbH
2016-12-14 13:16:11 +02:00
* Copyright ( C ) 2012 - 2016 NVIDIA CORPORATION . All rights reserved .
2012-11-15 21:28:22 +00:00
*/
2016-12-14 13:16:11 +02:00
# include <linux/bitops.h>
2013-10-14 14:43:22 +02:00
# include <linux/host1x.h>
2017-03-09 20:04:55 +01:00
# include <linux/idr.h>
2014-06-26 21:41:53 +02:00
# include <linux/iommu.h>
2019-08-04 11:41:30 +02:00
# include <linux/module.h>
# include <linux/platform_device.h>
2013-10-14 14:43:22 +02:00
2014-11-24 17:41:23 +01:00
# include <drm/drm_atomic.h>
2014-11-24 17:08:06 +01:00
# include <drm/drm_atomic_helper.h>
2019-08-04 11:41:30 +02:00
# include <drm/drm_debugfs.h>
# include <drm/drm_drv.h>
# include <drm/drm_fourcc.h>
# include <drm/drm_ioctl.h>
# include <drm/drm_prime.h>
# include <drm/drm_vblank.h>
2014-11-24 17:08:06 +01:00
2012-11-15 21:28:22 +00:00
# include "drm.h"
2013-03-22 16:34:08 +02:00
# include "gem.h"
2012-11-15 21:28:22 +00:00
# define DRIVER_NAME "tegra"
# define DRIVER_DESC "NVIDIA Tegra graphics"
# define DRIVER_DATE "20120330"
# define DRIVER_MAJOR 0
# define DRIVER_MINOR 0
# define DRIVER_PATCHLEVEL 0
2016-12-14 13:16:11 +02:00
# define CARVEOUT_SZ SZ_64M
2017-06-15 02:18:26 +03:00
# define CDMA_GATHER_FETCHES_MAX_NB 16383
2016-12-14 13:16:11 +02:00
2013-09-26 16:08:18 +02:00
struct tegra_drm_file {
2017-03-09 20:04:55 +01:00
struct idr contexts ;
struct mutex lock ;
2013-09-26 16:08:18 +02:00
} ;
2017-12-14 13:46:20 +01:00
static int tegra_atomic_check ( struct drm_device * drm ,
struct drm_atomic_state * state )
2014-11-24 17:41:23 +01:00
{
2017-12-14 13:46:20 +01:00
int err ;
2014-11-24 17:41:23 +01:00
2018-03-21 12:20:26 +02:00
err = drm_atomic_helper_check ( drm , state ) ;
2017-12-14 13:46:20 +01:00
if ( err < 0 )
return err ;
2014-11-24 17:41:23 +01:00
2018-03-21 12:20:26 +02:00
return tegra_display_hub_atomic_check ( drm , state ) ;
2014-11-24 17:41:23 +01:00
}
2017-10-12 17:40:46 +02:00
static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
2014-11-26 13:03:57 +01:00
. fb_create = tegra_fb_create ,
2015-10-27 13:40:59 +05:30
# ifdef CONFIG_DRM_FBDEV_EMULATION
2017-12-05 19:25:04 +01:00
. output_poll_changed = drm_fb_helper_output_poll_changed ,
2014-11-26 13:03:57 +01:00
# endif
2017-12-14 13:46:20 +01:00
. atomic_check = tegra_atomic_check ,
2017-10-12 17:40:46 +02:00
. atomic_commit = drm_atomic_helper_commit ,
} ;
2017-11-13 11:08:13 +01:00
static void tegra_atomic_commit_tail ( struct drm_atomic_state * old_state )
{
struct drm_device * drm = old_state - > dev ;
struct tegra_drm * tegra = drm - > dev_private ;
if ( tegra - > hub ) {
drm_atomic_helper_commit_modeset_disables ( drm , old_state ) ;
tegra_display_hub_atomic_commit ( drm , old_state ) ;
drm_atomic_helper_commit_planes ( drm , old_state , 0 ) ;
drm_atomic_helper_commit_modeset_enables ( drm , old_state ) ;
drm_atomic_helper_commit_hw_done ( old_state ) ;
drm_atomic_helper_wait_for_vblanks ( drm , old_state ) ;
drm_atomic_helper_cleanup_planes ( drm , old_state ) ;
} else {
drm_atomic_helper_commit_tail_rpm ( old_state ) ;
}
}
2017-10-12 17:40:46 +02:00
static const struct drm_mode_config_helper_funcs
tegra_drm_mode_config_helpers = {
2017-11-13 11:08:13 +01:00
. atomic_commit_tail = tegra_atomic_commit_tail ,
2014-11-26 13:03:57 +01:00
} ;
2012-11-15 21:28:22 +00:00
static int tegra_drm_open ( struct drm_device * drm , struct drm_file * filp )
{
2013-09-26 16:08:18 +02:00
struct tegra_drm_file * fpriv ;
2013-03-22 16:34:09 +02:00
fpriv = kzalloc ( sizeof ( * fpriv ) , GFP_KERNEL ) ;
if ( ! fpriv )
return - ENOMEM ;
2017-03-09 20:04:55 +01:00
idr_init ( & fpriv - > contexts ) ;
mutex_init ( & fpriv - > lock ) ;
2013-03-22 16:34:09 +02:00
filp - > driver_priv = fpriv ;
2012-11-15 21:28:22 +00:00
return 0 ;
}
2013-09-26 16:08:22 +02:00
static void tegra_drm_context_free ( struct tegra_drm_context * context )
2013-03-22 16:34:09 +02:00
{
context - > client - > ops - > close_channel ( context ) ;
kfree ( context ) ;
}
2013-10-10 11:00:33 +02:00
static struct host1x_bo *
2016-05-09 11:04:54 +01:00
host1x_bo_lookup ( struct drm_file * file , u32 handle )
2013-10-10 11:00:33 +02:00
{
struct drm_gem_object * gem ;
struct tegra_bo * bo ;
2016-05-09 11:04:54 +01:00
gem = drm_gem_object_lookup ( file , handle ) ;
2013-10-10 11:00:33 +02:00
if ( ! gem )
return NULL ;
bo = to_tegra_bo ( gem ) ;
return & bo - > base ;
}
2014-06-10 10:25:00 +02:00
static int host1x_reloc_copy_from_user ( struct host1x_reloc * dest ,
struct drm_tegra_reloc __user * src ,
struct drm_device * drm ,
struct drm_file * file )
{
u32 cmdbuf , target ;
int err ;
err = get_user ( cmdbuf , & src - > cmdbuf . handle ) ;
if ( err < 0 )
return err ;
err = get_user ( dest - > cmdbuf . offset , & src - > cmdbuf . offset ) ;
if ( err < 0 )
return err ;
err = get_user ( target , & src - > target . handle ) ;
if ( err < 0 )
return err ;
2015-01-20 18:37:35 -08:00
err = get_user ( dest - > target . offset , & src - > target . offset ) ;
2014-06-10 10:25:00 +02:00
if ( err < 0 )
return err ;
err = get_user ( dest - > shift , & src - > shift ) ;
if ( err < 0 )
return err ;
2019-10-28 13:37:11 +01:00
dest - > flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE ;
2016-05-09 11:04:54 +01:00
dest - > cmdbuf . bo = host1x_bo_lookup ( file , cmdbuf ) ;
2014-06-10 10:25:00 +02:00
if ( ! dest - > cmdbuf . bo )
return - ENOENT ;
2016-05-09 11:04:54 +01:00
dest - > target . bo = host1x_bo_lookup ( file , target ) ;
2014-06-10 10:25:00 +02:00
if ( ! dest - > target . bo )
return - ENOENT ;
return 0 ;
}
2013-10-10 11:00:33 +02:00
int tegra_drm_submit ( struct tegra_drm_context * context ,
struct drm_tegra_submit * args , struct drm_device * drm ,
struct drm_file * file )
{
2018-05-16 14:12:33 +02:00
struct host1x_client * client = & context - > client - > base ;
2013-10-10 11:00:33 +02:00
unsigned int num_cmdbufs = args - > num_cmdbufs ;
unsigned int num_relocs = args - > num_relocs ;
2017-09-28 15:50:44 +03:00
struct drm_tegra_cmdbuf __user * user_cmdbufs ;
struct drm_tegra_reloc __user * user_relocs ;
struct drm_tegra_syncpt __user * user_syncpt ;
2013-10-10 11:00:33 +02:00
struct drm_tegra_syncpt syncpt ;
2017-06-15 02:18:28 +03:00
struct host1x * host1x = dev_get_drvdata ( drm - > dev - > parent ) ;
2017-08-11 19:54:56 +02:00
struct drm_gem_object * * refs ;
2017-06-15 02:18:28 +03:00
struct host1x_syncpt * sp ;
2013-10-10 11:00:33 +02:00
struct host1x_job * job ;
2017-08-11 19:54:56 +02:00
unsigned int num_refs ;
2013-10-10 11:00:33 +02:00
int err ;
2017-09-28 15:50:44 +03:00
user_cmdbufs = u64_to_user_ptr ( args - > cmdbufs ) ;
user_relocs = u64_to_user_ptr ( args - > relocs ) ;
user_syncpt = u64_to_user_ptr ( args - > syncpts ) ;
2013-10-10 11:00:33 +02:00
/* We don't yet support other than one syncpt_incr struct per submit */
if ( args - > num_syncpts ! = 1 )
return - EINVAL ;
2017-06-15 02:18:27 +03:00
/* We don't yet support waitchks */
if ( args - > num_waitchks ! = 0 )
return - EINVAL ;
2013-10-10 11:00:33 +02:00
job = host1x_job_alloc ( context - > channel , args - > num_cmdbufs ,
2018-05-05 08:45:47 +02:00
args - > num_relocs ) ;
2013-10-10 11:00:33 +02:00
if ( ! job )
return - ENOMEM ;
job - > num_relocs = args - > num_relocs ;
2018-05-16 14:12:33 +02:00
job - > client = client ;
job - > class = client - > class ;
2013-10-10 11:00:33 +02:00
job - > serialize = true ;
2017-08-11 19:54:56 +02:00
/*
* Track referenced BOs so that they can be unreferenced after the
* submission is complete .
*/
2018-05-05 08:45:47 +02:00
num_refs = num_cmdbufs + num_relocs * 2 ;
2017-08-11 19:54:56 +02:00
refs = kmalloc_array ( num_refs , sizeof ( * refs ) , GFP_KERNEL ) ;
if ( ! refs ) {
err = - ENOMEM ;
goto put ;
}
/* reuse as an iterator later */
num_refs = 0 ;
2013-10-10 11:00:33 +02:00
while ( num_cmdbufs ) {
struct drm_tegra_cmdbuf cmdbuf ;
struct host1x_bo * bo ;
2017-06-15 02:18:26 +03:00
struct tegra_bo * obj ;
u64 offset ;
2013-10-10 11:00:33 +02:00
2017-09-28 15:50:44 +03:00
if ( copy_from_user ( & cmdbuf , user_cmdbufs , sizeof ( cmdbuf ) ) ) {
2013-11-08 13:07:37 +03:00
err = - EFAULT ;
2013-10-10 11:00:33 +02:00
goto fail ;
2013-11-08 13:07:37 +03:00
}
2013-10-10 11:00:33 +02:00
2017-06-15 02:18:26 +03:00
/*
* The maximum number of CDMA gather fetches is 16383 , a higher
* value means the words count is malformed .
*/
if ( cmdbuf . words > CDMA_GATHER_FETCHES_MAX_NB ) {
err = - EINVAL ;
goto fail ;
}
2016-05-09 11:04:54 +01:00
bo = host1x_bo_lookup ( file , cmdbuf . handle ) ;
2013-10-10 11:00:33 +02:00
if ( ! bo ) {
err = - ENOENT ;
goto fail ;
}
2017-06-15 02:18:26 +03:00
offset = ( u64 ) cmdbuf . offset + ( u64 ) cmdbuf . words * sizeof ( u32 ) ;
obj = host1x_to_tegra_bo ( bo ) ;
2017-08-11 19:54:56 +02:00
refs [ num_refs + + ] = & obj - > gem ;
2017-06-15 02:18:26 +03:00
/*
* Gather buffer base address must be 4 - bytes aligned ,
* unaligned offset is malformed and cause commands stream
* corruption on the buffer address relocation .
*/
2018-06-20 16:03:58 +03:00
if ( offset & 3 | | offset > obj - > gem . size ) {
2017-06-15 02:18:26 +03:00
err = - EINVAL ;
goto fail ;
}
2013-10-10 11:00:33 +02:00
host1x_job_add_gather ( job , bo , cmdbuf . words , cmdbuf . offset ) ;
num_cmdbufs - - ;
2017-09-28 15:50:44 +03:00
user_cmdbufs + + ;
2013-10-10 11:00:33 +02:00
}
2014-06-10 10:25:00 +02:00
/* copy and resolve relocations from submit */
2013-10-10 11:00:33 +02:00
while ( num_relocs - - ) {
2017-06-15 02:18:26 +03:00
struct host1x_reloc * reloc ;
struct tegra_bo * obj ;
2018-05-16 16:58:44 +02:00
err = host1x_reloc_copy_from_user ( & job - > relocs [ num_relocs ] ,
2017-09-28 15:50:44 +03:00
& user_relocs [ num_relocs ] , drm ,
2014-06-10 10:25:00 +02:00
file ) ;
if ( err < 0 )
2013-10-10 11:00:33 +02:00
goto fail ;
2017-06-15 02:18:26 +03:00
2018-05-16 16:58:44 +02:00
reloc = & job - > relocs [ num_relocs ] ;
2017-06-15 02:18:26 +03:00
obj = host1x_to_tegra_bo ( reloc - > cmdbuf . bo ) ;
2017-08-11 19:54:56 +02:00
refs [ num_refs + + ] = & obj - > gem ;
2017-06-15 02:18:26 +03:00
/*
* The unaligned cmdbuf offset will cause an unaligned write
* during of the relocations patching , corrupting the commands
* stream .
*/
if ( reloc - > cmdbuf . offset & 3 | |
reloc - > cmdbuf . offset > = obj - > gem . size ) {
err = - EINVAL ;
goto fail ;
}
obj = host1x_to_tegra_bo ( reloc - > target . bo ) ;
2017-08-11 19:54:56 +02:00
refs [ num_refs + + ] = & obj - > gem ;
2017-06-15 02:18:26 +03:00
if ( reloc - > target . offset > = obj - > gem . size ) {
err = - EINVAL ;
goto fail ;
}
2013-10-10 11:00:33 +02:00
}
2017-09-28 15:50:44 +03:00
if ( copy_from_user ( & syncpt , user_syncpt , sizeof ( syncpt ) ) ) {
2013-11-08 13:07:37 +03:00
err = - EFAULT ;
2013-10-10 11:00:33 +02:00
goto fail ;
2013-11-08 13:07:37 +03:00
}
2013-10-10 11:00:33 +02:00
2017-06-15 02:18:28 +03:00
/* check whether syncpoint ID is valid */
sp = host1x_syncpt_get ( host1x , syncpt . id ) ;
if ( ! sp ) {
err = - ENOENT ;
goto fail ;
}
2013-10-10 11:00:33 +02:00
job - > is_addr_reg = context - > client - > ops - > is_addr_reg ;
2017-06-15 02:18:37 +03:00
job - > is_valid_class = context - > client - > ops - > is_valid_class ;
2013-10-10 11:00:33 +02:00
job - > syncpt_incrs = syncpt . incrs ;
job - > syncpt_id = syncpt . id ;
job - > timeout = 10000 ;
if ( args - > timeout & & args - > timeout < 10000 )
job - > timeout = args - > timeout ;
err = host1x_job_pin ( job , context - > client - > base . dev ) ;
if ( err )
goto fail ;
err = host1x_job_submit ( job ) ;
2017-08-11 19:54:56 +02:00
if ( err ) {
host1x_job_unpin ( job ) ;
goto fail ;
}
2013-10-10 11:00:33 +02:00
args - > fence = job - > syncpt_end ;
fail :
2017-08-11 19:54:56 +02:00
while ( num_refs - - )
drm_gem_object_put_unlocked ( refs [ num_refs ] ) ;
kfree ( refs ) ;
put :
2013-10-10 11:00:33 +02:00
host1x_job_put ( job ) ;
return err ;
}
2013-03-22 16:34:09 +02:00
# ifdef CONFIG_DRM_TEGRA_STAGING
static int tegra_gem_create ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
struct drm_tegra_gem_create * args = data ;
struct tegra_bo * bo ;
2013-10-04 22:34:01 +02:00
bo = tegra_bo_create_with_handle ( file , drm , args - > size , args - > flags ,
2013-03-22 16:34:09 +02:00
& args - > handle ) ;
if ( IS_ERR ( bo ) )
return PTR_ERR ( bo ) ;
return 0 ;
}
static int tegra_gem_mmap ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
struct drm_tegra_gem_mmap * args = data ;
struct drm_gem_object * gem ;
struct tegra_bo * bo ;
2016-05-09 11:04:54 +01:00
gem = drm_gem_object_lookup ( file , args - > handle ) ;
2013-03-22 16:34:09 +02:00
if ( ! gem )
return - EINVAL ;
bo = to_tegra_bo ( gem ) ;
2013-08-13 14:19:58 +02:00
args - > offset = drm_vma_node_offset_addr ( & bo - > gem . vma_node ) ;
2013-03-22 16:34:09 +02:00
2017-08-11 15:33:07 +03:00
drm_gem_object_put_unlocked ( gem ) ;
2013-03-22 16:34:09 +02:00
return 0 ;
}
static int tegra_syncpt_read ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
2013-10-14 14:43:22 +02:00
struct host1x * host = dev_get_drvdata ( drm - > dev - > parent ) ;
2013-03-22 16:34:09 +02:00
struct drm_tegra_syncpt_read * args = data ;
2013-10-14 14:43:22 +02:00
struct host1x_syncpt * sp ;
2013-03-22 16:34:09 +02:00
2013-10-14 14:43:22 +02:00
sp = host1x_syncpt_get ( host , args - > id ) ;
2013-03-22 16:34:09 +02:00
if ( ! sp )
return - EINVAL ;
args - > value = host1x_syncpt_read_min ( sp ) ;
return 0 ;
}
static int tegra_syncpt_incr ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
2013-10-14 14:43:22 +02:00
struct host1x * host1x = dev_get_drvdata ( drm - > dev - > parent ) ;
2013-03-22 16:34:09 +02:00
struct drm_tegra_syncpt_incr * args = data ;
2013-10-14 14:43:22 +02:00
struct host1x_syncpt * sp ;
2013-03-22 16:34:09 +02:00
2013-10-14 14:43:22 +02:00
sp = host1x_syncpt_get ( host1x , args - > id ) ;
2013-03-22 16:34:09 +02:00
if ( ! sp )
return - EINVAL ;
2013-05-29 13:26:08 +03:00
return host1x_syncpt_incr ( sp ) ;
2013-03-22 16:34:09 +02:00
}
static int tegra_syncpt_wait ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
2013-10-14 14:43:22 +02:00
struct host1x * host1x = dev_get_drvdata ( drm - > dev - > parent ) ;
2013-03-22 16:34:09 +02:00
struct drm_tegra_syncpt_wait * args = data ;
2013-10-14 14:43:22 +02:00
struct host1x_syncpt * sp ;
2013-03-22 16:34:09 +02:00
2013-10-14 14:43:22 +02:00
sp = host1x_syncpt_get ( host1x , args - > id ) ;
2013-03-22 16:34:09 +02:00
if ( ! sp )
return - EINVAL ;
2017-12-20 18:46:14 +03:00
return host1x_syncpt_wait ( sp , args - > thresh ,
msecs_to_jiffies ( args - > timeout ) ,
2013-03-22 16:34:09 +02:00
& args - > value ) ;
}
2017-03-09 20:04:55 +01:00
static int tegra_client_open ( struct tegra_drm_file * fpriv ,
struct tegra_drm_client * client ,
struct tegra_drm_context * context )
{
int err ;
err = client - > ops - > open_channel ( client , context ) ;
if ( err < 0 )
return err ;
2017-06-15 02:18:25 +03:00
err = idr_alloc ( & fpriv - > contexts , context , 1 , 0 , GFP_KERNEL ) ;
2017-03-09 20:04:55 +01:00
if ( err < 0 ) {
client - > ops - > close_channel ( context ) ;
return err ;
}
context - > client = client ;
context - > id = err ;
return 0 ;
}
2013-03-22 16:34:09 +02:00
static int tegra_open_channel ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
2013-09-26 16:08:18 +02:00
struct tegra_drm_file * fpriv = file - > driver_priv ;
2013-09-24 13:22:17 +02:00
struct tegra_drm * tegra = drm - > dev_private ;
2013-03-22 16:34:09 +02:00
struct drm_tegra_open_channel * args = data ;
2013-09-26 16:08:22 +02:00
struct tegra_drm_context * context ;
2013-09-24 15:35:40 +02:00
struct tegra_drm_client * client ;
2013-03-22 16:34:09 +02:00
int err = - ENODEV ;
context = kzalloc ( sizeof ( * context ) , GFP_KERNEL ) ;
if ( ! context )
return - ENOMEM ;
2017-03-09 20:04:55 +01:00
mutex_lock ( & fpriv - > lock ) ;
2013-10-14 14:43:22 +02:00
list_for_each_entry ( client , & tegra - > clients , list )
2013-09-24 15:35:40 +02:00
if ( client - > base . class = = args - > client ) {
2017-03-09 20:04:55 +01:00
err = tegra_client_open ( fpriv , client , context ) ;
if ( err < 0 )
2013-03-22 16:34:09 +02:00
break ;
2017-03-09 20:04:55 +01:00
args - > context = context - > id ;
break ;
2013-03-22 16:34:09 +02:00
}
2017-03-09 20:04:55 +01:00
if ( err < 0 )
kfree ( context ) ;
mutex_unlock ( & fpriv - > lock ) ;
2013-03-22 16:34:09 +02:00
return err ;
}
static int tegra_close_channel ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
2013-09-26 16:08:18 +02:00
struct tegra_drm_file * fpriv = file - > driver_priv ;
2013-10-14 14:43:22 +02:00
struct drm_tegra_close_channel * args = data ;
2013-09-26 16:08:22 +02:00
struct tegra_drm_context * context ;
2017-03-09 20:04:55 +01:00
int err = 0 ;
2013-09-26 16:08:22 +02:00
2017-03-09 20:04:55 +01:00
mutex_lock ( & fpriv - > lock ) ;
2013-03-22 16:34:09 +02:00
2017-06-15 02:18:24 +03:00
context = idr_find ( & fpriv - > contexts , args - > context ) ;
2017-03-09 20:04:55 +01:00
if ( ! context ) {
err = - EINVAL ;
goto unlock ;
}
2013-03-22 16:34:09 +02:00
2017-03-09 20:04:55 +01:00
idr_remove ( & fpriv - > contexts , context - > id ) ;
2013-09-26 16:08:22 +02:00
tegra_drm_context_free ( context ) ;
2013-03-22 16:34:09 +02:00
2017-03-09 20:04:55 +01:00
unlock :
mutex_unlock ( & fpriv - > lock ) ;
return err ;
2013-03-22 16:34:09 +02:00
}
static int tegra_get_syncpt ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
2013-09-26 16:08:18 +02:00
struct tegra_drm_file * fpriv = file - > driver_priv ;
2013-03-22 16:34:09 +02:00
struct drm_tegra_get_syncpt * args = data ;
2013-09-26 16:08:22 +02:00
struct tegra_drm_context * context ;
2013-03-22 16:34:09 +02:00
struct host1x_syncpt * syncpt ;
2017-03-09 20:04:55 +01:00
int err = 0 ;
2013-03-22 16:34:09 +02:00
2017-03-09 20:04:55 +01:00
mutex_lock ( & fpriv - > lock ) ;
2013-09-26 16:08:22 +02:00
2017-06-15 02:18:24 +03:00
context = idr_find ( & fpriv - > contexts , args - > context ) ;
2017-03-09 20:04:55 +01:00
if ( ! context ) {
err = - ENODEV ;
goto unlock ;
}
2013-03-22 16:34:09 +02:00
2017-03-09 20:04:55 +01:00
if ( args - > index > = context - > client - > base . num_syncpts ) {
err = - EINVAL ;
goto unlock ;
}
2013-03-22 16:34:09 +02:00
2013-09-24 15:35:40 +02:00
syncpt = context - > client - > base . syncpts [ args - > index ] ;
2013-03-22 16:34:09 +02:00
args - > id = host1x_syncpt_id ( syncpt ) ;
2017-03-09 20:04:55 +01:00
unlock :
mutex_unlock ( & fpriv - > lock ) ;
return err ;
2013-03-22 16:34:09 +02:00
}
static int tegra_submit ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
2013-09-26 16:08:18 +02:00
struct tegra_drm_file * fpriv = file - > driver_priv ;
2013-03-22 16:34:09 +02:00
struct drm_tegra_submit * args = data ;
2013-09-26 16:08:22 +02:00
struct tegra_drm_context * context ;
2017-03-09 20:04:55 +01:00
int err ;
2013-09-26 16:08:22 +02:00
2017-03-09 20:04:55 +01:00
mutex_lock ( & fpriv - > lock ) ;
2013-03-22 16:34:09 +02:00
2017-06-15 02:18:24 +03:00
context = idr_find ( & fpriv - > contexts , args - > context ) ;
2017-03-09 20:04:55 +01:00
if ( ! context ) {
err = - ENODEV ;
goto unlock ;
}
2013-03-22 16:34:09 +02:00
2017-03-09 20:04:55 +01:00
err = context - > client - > ops - > submit ( context , args , drm , file ) ;
2013-03-22 16:34:09 +02:00
2017-03-09 20:04:55 +01:00
unlock :
mutex_unlock ( & fpriv - > lock ) ;
return err ;
2013-03-22 16:34:09 +02:00
}
2013-10-14 15:21:54 +03:00
static int tegra_get_syncpt_base ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
struct tegra_drm_file * fpriv = file - > driver_priv ;
struct drm_tegra_get_syncpt_base * args = data ;
struct tegra_drm_context * context ;
struct host1x_syncpt_base * base ;
struct host1x_syncpt * syncpt ;
2017-03-09 20:04:55 +01:00
int err = 0 ;
2013-10-14 15:21:54 +03:00
2017-03-09 20:04:55 +01:00
mutex_lock ( & fpriv - > lock ) ;
2013-10-14 15:21:54 +03:00
2017-06-15 02:18:24 +03:00
context = idr_find ( & fpriv - > contexts , args - > context ) ;
2017-03-09 20:04:55 +01:00
if ( ! context ) {
err = - ENODEV ;
goto unlock ;
}
2013-10-14 15:21:54 +03:00
2017-03-09 20:04:55 +01:00
if ( args - > syncpt > = context - > client - > base . num_syncpts ) {
err = - EINVAL ;
goto unlock ;
}
2013-10-14 15:21:54 +03:00
syncpt = context - > client - > base . syncpts [ args - > syncpt ] ;
base = host1x_syncpt_get_base ( syncpt ) ;
2017-03-09 20:04:55 +01:00
if ( ! base ) {
err = - ENXIO ;
goto unlock ;
}
2013-10-14 15:21:54 +03:00
args - > id = host1x_syncpt_base_id ( base ) ;
2017-03-09 20:04:55 +01:00
unlock :
mutex_unlock ( & fpriv - > lock ) ;
return err ;
2013-10-14 15:21:54 +03:00
}
2014-06-03 14:56:57 +02:00
static int tegra_gem_set_tiling ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
struct drm_tegra_gem_set_tiling * args = data ;
enum tegra_bo_tiling_mode mode ;
struct drm_gem_object * gem ;
unsigned long value = 0 ;
struct tegra_bo * bo ;
switch ( args - > mode ) {
case DRM_TEGRA_GEM_TILING_MODE_PITCH :
mode = TEGRA_BO_TILING_MODE_PITCH ;
if ( args - > value ! = 0 )
return - EINVAL ;
break ;
case DRM_TEGRA_GEM_TILING_MODE_TILED :
mode = TEGRA_BO_TILING_MODE_TILED ;
if ( args - > value ! = 0 )
return - EINVAL ;
break ;
case DRM_TEGRA_GEM_TILING_MODE_BLOCK :
mode = TEGRA_BO_TILING_MODE_BLOCK ;
if ( args - > value > 5 )
return - EINVAL ;
value = args - > value ;
break ;
default :
return - EINVAL ;
}
2016-05-09 11:04:54 +01:00
gem = drm_gem_object_lookup ( file , args - > handle ) ;
2014-06-03 14:56:57 +02:00
if ( ! gem )
return - ENOENT ;
bo = to_tegra_bo ( gem ) ;
bo - > tiling . mode = mode ;
bo - > tiling . value = value ;
2017-08-11 15:33:07 +03:00
drm_gem_object_put_unlocked ( gem ) ;
2014-06-03 14:56:57 +02:00
return 0 ;
}
static int tegra_gem_get_tiling ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
struct drm_tegra_gem_get_tiling * args = data ;
struct drm_gem_object * gem ;
struct tegra_bo * bo ;
int err = 0 ;
2016-05-09 11:04:54 +01:00
gem = drm_gem_object_lookup ( file , args - > handle ) ;
2014-06-03 14:56:57 +02:00
if ( ! gem )
return - ENOENT ;
bo = to_tegra_bo ( gem ) ;
switch ( bo - > tiling . mode ) {
case TEGRA_BO_TILING_MODE_PITCH :
args - > mode = DRM_TEGRA_GEM_TILING_MODE_PITCH ;
args - > value = 0 ;
break ;
case TEGRA_BO_TILING_MODE_TILED :
args - > mode = DRM_TEGRA_GEM_TILING_MODE_TILED ;
args - > value = 0 ;
break ;
case TEGRA_BO_TILING_MODE_BLOCK :
args - > mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK ;
args - > value = bo - > tiling . value ;
break ;
default :
err = - EINVAL ;
break ;
}
2017-08-11 15:33:07 +03:00
drm_gem_object_put_unlocked ( gem ) ;
2014-06-03 14:56:57 +02:00
return err ;
}
2014-06-10 12:04:03 +02:00
static int tegra_gem_set_flags ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
struct drm_tegra_gem_set_flags * args = data ;
struct drm_gem_object * gem ;
struct tegra_bo * bo ;
if ( args - > flags & ~ DRM_TEGRA_GEM_FLAGS )
return - EINVAL ;
2016-05-09 11:04:54 +01:00
gem = drm_gem_object_lookup ( file , args - > handle ) ;
2014-06-10 12:04:03 +02:00
if ( ! gem )
return - ENOENT ;
bo = to_tegra_bo ( gem ) ;
bo - > flags = 0 ;
if ( args - > flags & DRM_TEGRA_GEM_BOTTOM_UP )
bo - > flags | = TEGRA_BO_BOTTOM_UP ;
2017-08-11 15:33:07 +03:00
drm_gem_object_put_unlocked ( gem ) ;
2014-06-10 12:04:03 +02:00
return 0 ;
}
static int tegra_gem_get_flags ( struct drm_device * drm , void * data ,
struct drm_file * file )
{
struct drm_tegra_gem_get_flags * args = data ;
struct drm_gem_object * gem ;
struct tegra_bo * bo ;
2016-05-09 11:04:54 +01:00
gem = drm_gem_object_lookup ( file , args - > handle ) ;
2014-06-10 12:04:03 +02:00
if ( ! gem )
return - ENOENT ;
bo = to_tegra_bo ( gem ) ;
args - > flags = 0 ;
if ( bo - > flags & TEGRA_BO_BOTTOM_UP )
args - > flags | = DRM_TEGRA_GEM_BOTTOM_UP ;
2017-08-11 15:33:07 +03:00
drm_gem_object_put_unlocked ( gem ) ;
2014-06-10 12:04:03 +02:00
return 0 ;
}
2013-03-22 16:34:09 +02:00
# endif
2013-08-02 13:27:49 -04:00
static const struct drm_ioctl_desc tegra_drm_ioctls [ ] = {
2013-03-22 16:34:09 +02:00
# ifdef CONFIG_DRM_TEGRA_STAGING
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_GEM_CREATE , tegra_gem_create ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_GEM_MMAP , tegra_gem_mmap ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_SYNCPT_READ , tegra_syncpt_read ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_SYNCPT_INCR , tegra_syncpt_incr ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_SYNCPT_WAIT , tegra_syncpt_wait ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_OPEN_CHANNEL , tegra_open_channel ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_CLOSE_CHANNEL , tegra_close_channel ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_GET_SYNCPT , tegra_get_syncpt ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_SUBMIT , tegra_submit ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_GET_SYNCPT_BASE , tegra_get_syncpt_base ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_GEM_SET_TILING , tegra_gem_set_tiling ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_GEM_GET_TILING , tegra_gem_get_tiling ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_GEM_SET_FLAGS , tegra_gem_set_flags ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2017-08-15 15:42:39 +02:00
DRM_IOCTL_DEF_DRV ( TEGRA_GEM_GET_FLAGS , tegra_gem_get_flags ,
2019-05-22 16:46:59 +01:00
DRM_RENDER_ALLOW ) ,
2013-03-22 16:34:09 +02:00
# endif
2012-11-15 21:28:22 +00:00
} ;
static const struct file_operations tegra_drm_fops = {
. owner = THIS_MODULE ,
. open = drm_open ,
. release = drm_release ,
. unlocked_ioctl = drm_ioctl ,
2013-03-22 16:34:08 +02:00
. mmap = tegra_drm_mmap ,
2012-11-15 21:28:22 +00:00
. poll = drm_poll ,
. read = drm_read ,
. compat_ioctl = drm_compat_ioctl ,
. llseek = noop_llseek ,
} ;
2017-03-09 20:04:55 +01:00
static int tegra_drm_context_cleanup ( int id , void * p , void * data )
{
struct tegra_drm_context * context = p ;
tegra_drm_context_free ( context ) ;
return 0 ;
}
2017-05-08 10:26:31 +02:00
static void tegra_drm_postclose ( struct drm_device * drm , struct drm_file * file )
2012-11-28 12:00:18 +01:00
{
2013-09-26 16:08:18 +02:00
struct tegra_drm_file * fpriv = file - > driver_priv ;
2012-11-28 12:00:18 +01:00
2017-03-09 20:04:55 +01:00
mutex_lock ( & fpriv - > lock ) ;
idr_for_each ( & fpriv - > contexts , tegra_drm_context_cleanup , NULL ) ;
mutex_unlock ( & fpriv - > lock ) ;
2013-03-22 16:34:09 +02:00
2017-03-09 20:04:55 +01:00
idr_destroy ( & fpriv - > contexts ) ;
mutex_destroy ( & fpriv - > lock ) ;
2013-03-22 16:34:09 +02:00
kfree ( fpriv ) ;
2012-11-28 12:00:18 +01:00
}
2013-02-13 16:13:16 +01:00
# ifdef CONFIG_DEBUG_FS
static int tegra_debugfs_framebuffers ( struct seq_file * s , void * data )
{
struct drm_info_node * node = ( struct drm_info_node * ) s - > private ;
struct drm_device * drm = node - > minor - > dev ;
struct drm_framebuffer * fb ;
mutex_lock ( & drm - > mode_config . fb_lock ) ;
list_for_each_entry ( fb , & drm - > mode_config . fb_list , head ) {
seq_printf ( s , " %3d: user size: %d x %d, depth %d, %d bpp, refcount %d \n " ,
2016-12-14 23:31:35 +02:00
fb - > base . id , fb - > width , fb - > height ,
fb - > format - > depth ,
2016-12-14 23:32:20 +02:00
fb - > format - > cpp [ 0 ] * 8 ,
2016-04-15 15:10:35 +10:00
drm_framebuffer_read_refcount ( fb ) ) ;
2013-02-13 16:13:16 +01:00
}
mutex_unlock ( & drm - > mode_config . fb_lock ) ;
return 0 ;
}
2015-01-23 09:16:03 +01:00
static int tegra_debugfs_iova ( struct seq_file * s , void * data )
{
struct drm_info_node * node = ( struct drm_info_node * ) s - > private ;
struct drm_device * drm = node - > minor - > dev ;
struct tegra_drm * tegra = drm - > dev_private ;
2016-12-29 12:09:24 +01:00
struct drm_printer p = drm_seq_file_printer ( s ) ;
2015-01-23 09:16:03 +01:00
2017-08-14 23:53:45 +02:00
if ( tegra - > domain ) {
mutex_lock ( & tegra - > mm_lock ) ;
drm_mm_print ( & tegra - > mm , & p ) ;
mutex_unlock ( & tegra - > mm_lock ) ;
}
2016-12-29 12:09:24 +01:00
return 0 ;
2015-01-23 09:16:03 +01:00
}
2013-02-13 16:13:16 +01:00
static struct drm_info_list tegra_debugfs_list [ ] = {
{ " framebuffers " , tegra_debugfs_framebuffers , 0 } ,
2015-01-23 09:16:03 +01:00
{ " iova " , tegra_debugfs_iova , 0 } ,
2013-02-13 16:13:16 +01:00
} ;
static int tegra_debugfs_init ( struct drm_minor * minor )
{
return drm_debugfs_create_files ( tegra_debugfs_list ,
ARRAY_SIZE ( tegra_debugfs_list ) ,
minor - > debugfs_root , minor ) ;
}
# endif
2013-11-08 13:17:14 +01:00
static struct drm_driver tegra_drm_driver = {
2019-06-17 17:39:24 +02:00
. driver_features = DRIVER_MODESET | DRIVER_GEM |
2017-08-15 15:42:39 +02:00
DRIVER_ATOMIC | DRIVER_RENDER ,
2012-11-15 21:28:22 +00:00
. open = tegra_drm_open ,
2017-05-08 10:26:31 +02:00
. postclose = tegra_drm_postclose ,
2017-12-05 19:25:04 +01:00
. lastclose = drm_fb_helper_lastclose ,
2012-11-15 21:28:22 +00:00
2013-02-13 16:13:16 +01:00
# if defined(CONFIG_DEBUG_FS)
. debugfs_init = tegra_debugfs_init ,
# endif
2016-04-26 19:30:00 +02:00
. gem_free_object_unlocked = tegra_bo_free_object ,
2013-03-22 16:34:08 +02:00
. gem_vm_ops = & tegra_bo_vm_ops ,
2013-12-12 10:00:43 +01:00
. prime_handle_to_fd = drm_gem_prime_handle_to_fd ,
. prime_fd_to_handle = drm_gem_prime_fd_to_handle ,
. gem_prime_export = tegra_gem_prime_export ,
. gem_prime_import = tegra_gem_prime_import ,
2013-03-22 16:34:08 +02:00
. dumb_create = tegra_bo_dumb_create ,
2012-11-15 21:28:22 +00:00
. ioctls = tegra_drm_ioctls ,
. num_ioctls = ARRAY_SIZE ( tegra_drm_ioctls ) ,
. fops = & tegra_drm_fops ,
. name = DRIVER_NAME ,
. desc = DRIVER_DESC ,
. date = DRIVER_DATE ,
. major = DRIVER_MAJOR ,
. minor = DRIVER_MINOR ,
. patchlevel = DRIVER_PATCHLEVEL ,
} ;
2013-10-14 14:43:22 +02:00
int tegra_drm_register_client ( struct tegra_drm * tegra ,
struct tegra_drm_client * client )
{
mutex_lock ( & tegra - > clients_lock ) ;
list_add_tail ( & client - > list , & tegra - > clients ) ;
2019-02-01 14:28:31 +01:00
client - > drm = tegra ;
2013-10-14 14:43:22 +02:00
mutex_unlock ( & tegra - > clients_lock ) ;
return 0 ;
}
int tegra_drm_unregister_client ( struct tegra_drm * tegra ,
struct tegra_drm_client * client )
{
mutex_lock ( & tegra - > clients_lock ) ;
list_del_init ( & client - > list ) ;
2019-02-01 14:28:31 +01:00
client - > drm = NULL ;
2013-10-14 14:43:22 +02:00
mutex_unlock ( & tegra - > clients_lock ) ;
return 0 ;
}
2019-10-28 13:37:08 +01:00
int host1x_client_iommu_attach ( struct host1x_client * client )
2018-05-04 15:02:24 +02:00
{
2019-10-28 13:37:18 +01:00
struct iommu_domain * domain = iommu_get_domain_for_dev ( client - > dev ) ;
2018-05-04 15:02:24 +02:00
struct drm_device * drm = dev_get_drvdata ( client - > parent ) ;
struct tegra_drm * tegra = drm - > dev_private ;
struct iommu_group * group = NULL ;
int err ;
2019-10-28 13:37:18 +01:00
/*
* If the host1x client is already attached to an IOMMU domain that is
* not the shared IOMMU domain , don ' t try to attach it to a different
* domain . This allows using the IOMMU - backed DMA API .
*/
if ( domain & & domain ! = tegra - > domain )
return 0 ;
2019-10-28 13:37:08 +01:00
2019-10-28 13:37:18 +01:00
if ( tegra - > domain ) {
2018-05-04 15:02:24 +02:00
group = iommu_group_get ( client - > dev ) ;
if ( ! group ) {
dev_err ( client - > dev , " failed to get IOMMU group \n " ) ;
2019-02-08 14:35:13 +01:00
return - ENODEV ;
2018-05-04 15:02:24 +02:00
}
2019-10-28 13:37:08 +01:00
if ( domain ! = tegra - > domain ) {
2018-05-04 15:02:24 +02:00
err = iommu_attach_group ( tegra - > domain , group ) ;
if ( err < 0 ) {
iommu_group_put ( group ) ;
2019-02-08 14:35:13 +01:00
return err ;
2018-05-04 15:02:24 +02:00
}
}
2019-10-28 13:37:18 +01:00
tegra - > use_explicit_iommu = true ;
2018-05-04 15:02:24 +02:00
}
2019-02-08 14:35:13 +01:00
client - > group = group ;
return 0 ;
2018-05-04 15:02:24 +02:00
}
2019-02-08 14:35:13 +01:00
void host1x_client_iommu_detach ( struct host1x_client * client )
2018-05-04 15:02:24 +02:00
{
struct drm_device * drm = dev_get_drvdata ( client - > parent ) ;
struct tegra_drm * tegra = drm - > dev_private ;
2019-10-28 13:37:08 +01:00
struct iommu_domain * domain ;
2018-05-04 15:02:24 +02:00
2019-02-08 14:35:13 +01:00
if ( client - > group ) {
2019-10-28 13:37:08 +01:00
/*
* Devices that are part of the same group may no longer be
* attached to a domain at this point because their group may
* have been detached by an earlier client .
*/
domain = iommu_get_domain_for_dev ( client - > dev ) ;
if ( domain )
2019-02-08 14:35:13 +01:00
iommu_detach_group ( tegra - > domain , client - > group ) ;
2018-05-04 15:02:24 +02:00
2019-02-08 14:35:13 +01:00
iommu_group_put ( client - > group ) ;
2019-10-28 13:37:18 +01:00
client - > group = NULL ;
2018-05-04 15:02:24 +02:00
}
}
2017-11-09 13:17:11 +01:00
void * tegra_drm_alloc ( struct tegra_drm * tegra , size_t size , dma_addr_t * dma )
2016-12-14 13:16:11 +02:00
{
struct iova * alloc ;
void * virt ;
gfp_t gfp ;
int err ;
if ( tegra - > domain )
size = iova_align ( & tegra - > carveout . domain , size ) ;
else
size = PAGE_ALIGN ( size ) ;
gfp = GFP_KERNEL | __GFP_ZERO ;
if ( ! tegra - > domain ) {
/*
* Many units only support 32 - bit addresses , even on 64 - bit
* SoCs . If there is no IOMMU to translate into a 32 - bit IO
* virtual address space , force allocations to be in the
* lower 32 - bit range .
*/
gfp | = GFP_DMA ;
}
virt = ( void * ) __get_free_pages ( gfp , get_order ( size ) ) ;
if ( ! virt )
return ERR_PTR ( - ENOMEM ) ;
if ( ! tegra - > domain ) {
/*
* If IOMMU is disabled , devices address physical memory
* directly .
*/
* dma = virt_to_phys ( virt ) ;
return virt ;
}
alloc = alloc_iova ( & tegra - > carveout . domain ,
size > > tegra - > carveout . shift ,
tegra - > carveout . limit , true ) ;
if ( ! alloc ) {
err = - EBUSY ;
goto free_pages ;
}
* dma = iova_dma_addr ( & tegra - > carveout . domain , alloc ) ;
err = iommu_map ( tegra - > domain , * dma , virt_to_phys ( virt ) ,
size , IOMMU_READ | IOMMU_WRITE ) ;
if ( err < 0 )
goto free_iova ;
return virt ;
free_iova :
__free_iova ( & tegra - > carveout . domain , alloc ) ;
free_pages :
free_pages ( ( unsigned long ) virt , get_order ( size ) ) ;
return ERR_PTR ( err ) ;
}
void tegra_drm_free ( struct tegra_drm * tegra , size_t size , void * virt ,
dma_addr_t dma )
{
if ( tegra - > domain )
size = iova_align ( & tegra - > carveout . domain , size ) ;
else
size = PAGE_ALIGN ( size ) ;
if ( tegra - > domain ) {
iommu_unmap ( tegra - > domain , dma , size ) ;
free_iova ( & tegra - > carveout . domain ,
iova_pfn ( & tegra - > carveout . domain , dma ) ) ;
}
free_pages ( ( unsigned long ) virt , get_order ( size ) ) ;
}
2014-05-22 09:57:15 +02:00
static int host1x_drm_probe ( struct host1x_device * dev )
2013-10-14 14:43:22 +02:00
{
2014-05-22 09:57:15 +02:00
struct drm_driver * driver = & tegra_drm_driver ;
2019-10-28 13:37:18 +01:00
struct iommu_domain * domain ;
2019-10-28 13:16:10 +01:00
struct tegra_drm * tegra ;
2014-05-22 09:57:15 +02:00
struct drm_device * drm ;
int err ;
drm = drm_dev_alloc ( driver , & dev - > dev ) ;
2016-09-21 16:59:19 +02:00
if ( IS_ERR ( drm ) )
return PTR_ERR ( drm ) ;
2014-05-22 09:57:15 +02:00
2019-10-28 13:16:10 +01:00
tegra = kzalloc ( sizeof ( * tegra ) , GFP_KERNEL ) ;
if ( ! tegra ) {
err = - ENOMEM ;
goto put ;
}
2019-10-28 13:37:18 +01:00
/*
* If the Tegra DRM clients are backed by an IOMMU , push buffers are
* likely to be allocated beyond the 32 - bit boundary if sufficient
* system memory is available . This is problematic on earlier Tegra
* generations where host1x supports a maximum of 32 address bits in
* the GATHER opcode . In this case , unless host1x is behind an IOMMU
* as well it won ' t be able to process buffers allocated beyond the
* 32 - bit boundary .
*
* The DMA API will use bounce buffers in this case , so that could
* perhaps still be made to work , even if less efficient , but there
* is another catch : in order to perform cache maintenance on pages
* allocated for discontiguous buffers we need to map and unmap the
* SG table representing these buffers . This is fine for something
* small like a push buffer , but it exhausts the bounce buffer pool
* ( typically on the order of a few MiB ) for framebuffers ( many MiB
* for any modern resolution ) .
*
* Work around this by making sure that Tegra DRM clients only use
* an IOMMU if the parent host1x also uses an IOMMU .
*
* Note that there ' s still a small gap here that we don ' t cover : if
* the DMA API is backed by an IOMMU there ' s no way to control which
* device is attached to an IOMMU and which isn ' t , except via wiring
* up the device tree appropriately . This is considered an problem
* of integration , so care must be taken for the DT to be consistent .
*/
domain = iommu_get_domain_for_dev ( drm - > dev - > parent ) ;
if ( domain & & iommu_present ( & platform_bus_type ) ) {
2019-10-28 13:16:10 +01:00
tegra - > domain = iommu_domain_alloc ( & platform_bus_type ) ;
if ( ! tegra - > domain ) {
err = - ENOMEM ;
goto free ;
}
err = iova_cache_get ( ) ;
if ( err < 0 )
goto domain ;
}
mutex_init ( & tegra - > clients_lock ) ;
INIT_LIST_HEAD ( & tegra - > clients ) ;
2014-05-22 09:57:15 +02:00
dev_set_drvdata ( & dev - > dev , drm ) ;
2019-10-28 13:16:10 +01:00
drm - > dev_private = tegra ;
tegra - > drm = drm ;
drm_mode_config_init ( drm ) ;
drm - > mode_config . min_width = 0 ;
drm - > mode_config . min_height = 0 ;
drm - > mode_config . max_width = 4096 ;
drm - > mode_config . max_height = 4096 ;
drm - > mode_config . allow_fb_modifiers = true ;
drm - > mode_config . normalize_zpos = true ;
2014-05-22 09:57:15 +02:00
2019-10-28 13:16:10 +01:00
drm - > mode_config . funcs = & tegra_drm_mode_config_funcs ;
drm - > mode_config . helper_private = & tegra_drm_mode_config_helpers ;
err = tegra_drm_fb_prepare ( drm ) ;
2018-09-01 16:08:51 +02:00
if ( err < 0 )
2019-10-28 13:16:10 +01:00
goto config ;
drm_kms_helper_poll_init ( drm ) ;
err = host1x_device_init ( dev ) ;
if ( err < 0 )
goto fbdev ;
2019-10-28 13:37:18 +01:00
if ( tegra - > use_explicit_iommu ) {
2019-10-28 13:16:10 +01:00
u64 carveout_start , carveout_end , gem_start , gem_end ;
u64 dma_mask = dma_get_mask ( & dev - > dev ) ;
dma_addr_t start , end ;
unsigned long order ;
start = tegra - > domain - > geometry . aperture_start & dma_mask ;
end = tegra - > domain - > geometry . aperture_end & dma_mask ;
gem_start = start ;
gem_end = end - CARVEOUT_SZ ;
carveout_start = gem_end + 1 ;
carveout_end = end ;
order = __ffs ( tegra - > domain - > pgsize_bitmap ) ;
init_iova_domain ( & tegra - > carveout . domain , 1UL < < order ,
carveout_start > > order ) ;
tegra - > carveout . shift = iova_shift ( & tegra - > carveout . domain ) ;
tegra - > carveout . limit = carveout_end > > tegra - > carveout . shift ;
drm_mm_init ( & tegra - > mm , gem_start , gem_end - gem_start + 1 ) ;
mutex_init ( & tegra - > mm_lock ) ;
DRM_DEBUG_DRIVER ( " IOMMU apertures: \n " ) ;
DRM_DEBUG_DRIVER ( " GEM: %#llx-%#llx \n " , gem_start , gem_end ) ;
DRM_DEBUG_DRIVER ( " Carveout: %#llx-%#llx \n " , carveout_start ,
carveout_end ) ;
2019-10-28 13:37:18 +01:00
} else if ( tegra - > domain ) {
iommu_domain_free ( tegra - > domain ) ;
tegra - > domain = NULL ;
iova_cache_put ( ) ;
2019-10-28 13:16:10 +01:00
}
if ( tegra - > hub ) {
err = tegra_display_hub_prepare ( tegra - > hub ) ;
if ( err < 0 )
goto device ;
}
/*
* We don ' t use the drm_irq_install ( ) helpers provided by the DRM
* core , so we need to set this manually in order to allow the
* DRM_IOCTL_WAIT_VBLANK to operate correctly .
*/
drm - > irq_enabled = true ;
/* syncpoints are used for full 32-bit hardware VBLANK counters */
drm - > max_vblank_count = 0xffffffff ;
err = drm_vblank_init ( drm , drm - > mode_config . num_crtc ) ;
if ( err < 0 )
goto hub ;
drm_mode_config_reset ( drm ) ;
err = drm_fb_helper_remove_conflicting_framebuffers ( NULL , " tegradrmfb " ,
false ) ;
if ( err < 0 )
goto hub ;
err = tegra_drm_fb_init ( drm ) ;
if ( err < 0 )
goto hub ;
2018-09-01 16:08:51 +02:00
2014-05-22 09:57:15 +02:00
err = drm_dev_register ( drm , 0 ) ;
if ( err < 0 )
2019-10-28 13:16:10 +01:00
goto fb ;
2014-05-22 09:57:15 +02:00
return 0 ;
2019-10-28 13:16:10 +01:00
fb :
tegra_drm_fb_exit ( drm ) ;
hub :
if ( tegra - > hub )
tegra_display_hub_cleanup ( tegra - > hub ) ;
device :
if ( tegra - > domain ) {
mutex_destroy ( & tegra - > mm_lock ) ;
drm_mm_takedown ( & tegra - > mm ) ;
put_iova_domain ( & tegra - > carveout . domain ) ;
iova_cache_put ( ) ;
}
host1x_device_exit ( dev ) ;
fbdev :
drm_kms_helper_poll_fini ( drm ) ;
tegra_drm_fb_free ( drm ) ;
config :
drm_mode_config_cleanup ( drm ) ;
domain :
if ( tegra - > domain )
iommu_domain_free ( tegra - > domain ) ;
free :
kfree ( tegra ) ;
2018-09-26 13:56:40 +02:00
put :
drm_dev_put ( drm ) ;
2014-05-22 09:57:15 +02:00
return err ;
2013-10-14 14:43:22 +02:00
}
2014-05-22 09:57:15 +02:00
static int host1x_drm_remove ( struct host1x_device * dev )
2013-10-14 14:43:22 +02:00
{
2014-05-22 09:57:15 +02:00
struct drm_device * drm = dev_get_drvdata ( & dev - > dev ) ;
2019-10-28 13:16:10 +01:00
struct tegra_drm * tegra = drm - > dev_private ;
int err ;
2014-05-22 09:57:15 +02:00
drm_dev_unregister ( drm ) ;
2019-10-28 13:16:10 +01:00
drm_kms_helper_poll_fini ( drm ) ;
tegra_drm_fb_exit ( drm ) ;
drm_atomic_helper_shutdown ( drm ) ;
drm_mode_config_cleanup ( drm ) ;
err = host1x_device_exit ( dev ) ;
if ( err < 0 )
dev_err ( & dev - > dev , " host1x device cleanup failed: %d \n " , err ) ;
if ( tegra - > domain ) {
mutex_destroy ( & tegra - > mm_lock ) ;
drm_mm_takedown ( & tegra - > mm ) ;
put_iova_domain ( & tegra - > carveout . domain ) ;
iova_cache_put ( ) ;
iommu_domain_free ( tegra - > domain ) ;
}
kfree ( tegra ) ;
2018-09-26 13:56:40 +02:00
drm_dev_put ( drm ) ;
2013-10-14 14:43:22 +02:00
return 0 ;
}
2014-12-18 17:15:25 +01:00
# ifdef CONFIG_PM_SLEEP
static int host1x_drm_suspend ( struct device * dev )
{
struct drm_device * drm = dev_get_drvdata ( dev ) ;
2015-08-11 13:11:49 +02:00
2018-08-01 01:37:05 +05:30
return drm_mode_config_helper_suspend ( drm ) ;
2014-12-18 17:15:25 +01:00
}
static int host1x_drm_resume ( struct device * dev )
{
struct drm_device * drm = dev_get_drvdata ( dev ) ;
2018-08-01 01:37:05 +05:30
return drm_mode_config_helper_resume ( drm ) ;
2014-12-18 17:15:25 +01:00
}
# endif
2015-08-11 13:22:44 +02:00
static SIMPLE_DEV_PM_OPS ( host1x_drm_pm_ops , host1x_drm_suspend ,
host1x_drm_resume ) ;
2014-12-18 17:15:25 +01:00
2013-10-14 14:43:22 +02:00
static const struct of_device_id host1x_drm_subdevs [ ] = {
{ . compatible = " nvidia,tegra20-dc " , } ,
{ . compatible = " nvidia,tegra20-hdmi " , } ,
{ . compatible = " nvidia,tegra20-gr2d " , } ,
2013-02-28 08:08:01 +01:00
{ . compatible = " nvidia,tegra20-gr3d " , } ,
2013-10-14 14:43:22 +02:00
{ . compatible = " nvidia,tegra30-dc " , } ,
{ . compatible = " nvidia,tegra30-hdmi " , } ,
{ . compatible = " nvidia,tegra30-gr2d " , } ,
2013-02-28 08:08:01 +01:00
{ . compatible = " nvidia,tegra30-gr3d " , } ,
2013-09-03 08:45:46 +02:00
{ . compatible = " nvidia,tegra114-dsi " , } ,
2013-09-30 16:54:47 +02:00
{ . compatible = " nvidia,tegra114-hdmi " , } ,
2013-02-28 08:08:01 +01:00
{ . compatible = " nvidia,tegra114-gr3d " , } ,
2013-12-12 11:03:59 +01:00
{ . compatible = " nvidia,tegra124-dc " , } ,
2013-11-15 16:06:05 +01:00
{ . compatible = " nvidia,tegra124-sor " , } ,
2013-11-15 16:07:32 +01:00
{ . compatible = " nvidia,tegra124-hdmi " , } ,
2015-04-10 11:35:21 +02:00
{ . compatible = " nvidia,tegra124-dsi " , } ,
2016-12-14 13:16:13 +02:00
{ . compatible = " nvidia,tegra124-vic " , } ,
2015-04-10 11:35:21 +02:00
{ . compatible = " nvidia,tegra132-dsi " , } ,
2015-03-27 10:31:58 +01:00
{ . compatible = " nvidia,tegra210-dc " , } ,
2015-04-08 16:56:22 +02:00
{ . compatible = " nvidia,tegra210-dsi " , } ,
2015-07-30 10:32:46 +02:00
{ . compatible = " nvidia,tegra210-sor " , } ,
2015-07-30 10:34:24 +02:00
{ . compatible = " nvidia,tegra210-sor1 " , } ,
2016-12-14 13:16:13 +02:00
{ . compatible = " nvidia,tegra210-vic " , } ,
2017-11-13 11:08:13 +01:00
{ . compatible = " nvidia,tegra186-display " , } ,
2017-08-30 17:42:54 +02:00
{ . compatible = " nvidia,tegra186-dc " , } ,
2017-10-12 19:12:57 +02:00
{ . compatible = " nvidia,tegra186-sor " , } ,
{ . compatible = " nvidia,tegra186-sor1 " , } ,
2017-09-05 11:43:06 +03:00
{ . compatible = " nvidia,tegra186-vic " , } ,
2018-09-21 12:27:43 +02:00
{ . compatible = " nvidia,tegra194-display " , } ,
2018-09-21 12:27:44 +02:00
{ . compatible = " nvidia,tegra194-dc " , } ,
2018-09-21 12:27:46 +02:00
{ . compatible = " nvidia,tegra194-sor " , } ,
2018-10-26 10:59:38 +02:00
{ . compatible = " nvidia,tegra194-vic " , } ,
2013-10-14 14:43:22 +02:00
{ /* sentinel */ }
} ;
static struct host1x_driver host1x_drm_driver = {
2014-12-18 15:29:14 +01:00
. driver = {
. name = " drm " ,
2014-12-18 17:15:25 +01:00
. pm = & host1x_drm_pm_ops ,
2014-12-18 15:29:14 +01:00
} ,
2013-10-14 14:43:22 +02:00
. probe = host1x_drm_probe ,
. remove = host1x_drm_remove ,
. subdevs = host1x_drm_subdevs ,
} ;
2015-09-10 16:07:14 +02:00
static struct platform_driver * const drivers [ ] = {
2017-11-13 11:08:13 +01:00
& tegra_display_hub_driver ,
2015-09-10 16:07:14 +02:00
& tegra_dc_driver ,
& tegra_hdmi_driver ,
& tegra_dsi_driver ,
& tegra_dpaux_driver ,
& tegra_sor_driver ,
& tegra_gr2d_driver ,
& tegra_gr3d_driver ,
2016-12-14 13:16:13 +02:00
& tegra_vic_driver ,
2015-09-10 16:07:14 +02:00
} ;
2013-10-14 14:43:22 +02:00
static int __init host1x_drm_init ( void )
{
int err ;
err = host1x_driver_register ( & host1x_drm_driver ) ;
if ( err < 0 )
return err ;
2015-09-10 16:07:14 +02:00
err = platform_register_drivers ( drivers , ARRAY_SIZE ( drivers ) ) ;
2013-10-14 14:43:22 +02:00
if ( err < 0 )
goto unregister_host1x ;
return 0 ;
unregister_host1x :
host1x_driver_unregister ( & host1x_drm_driver ) ;
return err ;
}
module_init ( host1x_drm_init ) ;
static void __exit host1x_drm_exit ( void )
{
2015-09-10 16:07:14 +02:00
platform_unregister_drivers ( drivers , ARRAY_SIZE ( drivers ) ) ;
2013-10-14 14:43:22 +02:00
host1x_driver_unregister ( & host1x_drm_driver ) ;
}
module_exit ( host1x_drm_exit ) ;
MODULE_AUTHOR ( " Thierry Reding <thierry.reding@avionic-design.de> " ) ;
MODULE_DESCRIPTION ( " NVIDIA Tegra DRM driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;