2018-09-10 14:27:58 -05:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
/* Copyright 2019 Collabora ltd. */
# include <linux/module.h>
# include <linux/of_platform.h>
# include <linux/pagemap.h>
# include <linux/pm_runtime.h>
# include <drm/panfrost_drm.h>
# include <drm/drm_drv.h>
# include <drm/drm_ioctl.h>
# include <drm/drm_syncobj.h>
# include <drm/drm_utils.h>
# include "panfrost_device.h"
# include "panfrost_devfreq.h"
# include "panfrost_gem.h"
# include "panfrost_mmu.h"
# include "panfrost_job.h"
# include "panfrost_gpu.h"
2019-06-18 10:16:48 +02:00
# include "panfrost_perfcnt.h"
2018-09-10 14:27:58 -05:00
2019-06-18 10:16:46 +02:00
static bool unstable_ioctls ;
module_param_unsafe ( unstable_ioctls , bool , 0600 ) ;
2018-09-10 14:27:58 -05:00
static int panfrost_ioctl_get_param ( struct drm_device * ddev , void * data , struct drm_file * file )
{
struct drm_panfrost_get_param * param = data ;
struct panfrost_device * pfdev = ddev - > dev_private ;
if ( param - > pad ! = 0 )
return - EINVAL ;
2019-07-24 11:56:26 +01:00
# define PANFROST_FEATURE(name, member) \
case DRM_PANFROST_PARAM_ # # name : \
param - > value = pfdev - > features . member ; \
break
# define PANFROST_FEATURE_ARRAY(name, member, max) \
case DRM_PANFROST_PARAM_ # # name # # 0 . . . \
DRM_PANFROST_PARAM_ # # name # # max : \
param - > value = pfdev - > features . member [ param - > param - \
DRM_PANFROST_PARAM_ # # name # # 0 ] ; \
break
2018-09-10 14:27:58 -05:00
switch ( param - > param ) {
2019-07-24 11:56:26 +01:00
PANFROST_FEATURE ( GPU_PROD_ID , id ) ;
PANFROST_FEATURE ( GPU_REVISION , revision ) ;
PANFROST_FEATURE ( SHADER_PRESENT , shader_present ) ;
PANFROST_FEATURE ( TILER_PRESENT , tiler_present ) ;
PANFROST_FEATURE ( L2_PRESENT , l2_present ) ;
PANFROST_FEATURE ( STACK_PRESENT , stack_present ) ;
PANFROST_FEATURE ( AS_PRESENT , as_present ) ;
PANFROST_FEATURE ( JS_PRESENT , js_present ) ;
PANFROST_FEATURE ( L2_FEATURES , l2_features ) ;
PANFROST_FEATURE ( CORE_FEATURES , core_features ) ;
PANFROST_FEATURE ( TILER_FEATURES , tiler_features ) ;
PANFROST_FEATURE ( MEM_FEATURES , mem_features ) ;
PANFROST_FEATURE ( MMU_FEATURES , mmu_features ) ;
PANFROST_FEATURE ( THREAD_FEATURES , thread_features ) ;
PANFROST_FEATURE ( MAX_THREADS , max_threads ) ;
PANFROST_FEATURE ( THREAD_MAX_WORKGROUP_SZ ,
thread_max_workgroup_sz ) ;
PANFROST_FEATURE ( THREAD_MAX_BARRIER_SZ ,
thread_max_barrier_sz ) ;
PANFROST_FEATURE ( COHERENCY_FEATURES , coherency_features ) ;
PANFROST_FEATURE_ARRAY ( TEXTURE_FEATURES , texture_features , 3 ) ;
PANFROST_FEATURE_ARRAY ( JS_FEATURES , js_features , 15 ) ;
PANFROST_FEATURE ( NR_CORE_GROUPS , nr_core_groups ) ;
PANFROST_FEATURE ( THREAD_TLS_ALLOC , thread_tls_alloc ) ;
2018-09-10 14:27:58 -05:00
default :
return - EINVAL ;
}
return 0 ;
}
static int panfrost_ioctl_create_bo ( struct drm_device * dev , void * data ,
struct drm_file * file )
{
2020-01-15 20:15:54 -06:00
struct panfrost_file_priv * priv = file - > driver_priv ;
2019-07-11 15:56:14 -06:00
struct panfrost_gem_object * bo ;
2018-09-10 14:27:58 -05:00
struct drm_panfrost_create_bo * args = data ;
2020-01-15 20:15:54 -06:00
struct panfrost_gem_mapping * mapping ;
2018-09-10 14:27:58 -05:00
2019-07-11 15:56:14 -06:00
if ( ! args - > size | | args - > pad | |
2019-07-26 16:09:43 -06:00
( args - > flags & ~ ( PANFROST_BO_NOEXEC | PANFROST_BO_HEAP ) ) )
return - EINVAL ;
/* Heaps should never be executable */
if ( ( args - > flags & PANFROST_BO_HEAP ) & &
! ( args - > flags & PANFROST_BO_NOEXEC ) )
2018-09-10 14:27:58 -05:00
return - EINVAL ;
2019-07-11 15:56:14 -06:00
bo = panfrost_gem_create_with_handle ( file , dev , args - > size , args - > flags ,
& args - > handle ) ;
if ( IS_ERR ( bo ) )
return PTR_ERR ( bo ) ;
2018-09-10 14:27:58 -05:00
2020-01-15 20:15:54 -06:00
mapping = panfrost_gem_mapping_get ( bo , priv ) ;
if ( ! mapping ) {
drm_gem_object_put_unlocked ( & bo - > base . base ) ;
return - EINVAL ;
}
args - > offset = mapping - > mmnode . start < < PAGE_SHIFT ;
panfrost_gem_mapping_put ( mapping ) ;
2018-09-10 14:27:58 -05:00
return 0 ;
}
/**
* panfrost_lookup_bos ( ) - Sets up job - > bo [ ] with the GEM objects
* referenced by the job .
* @ dev : DRM device
* @ file_priv : DRM file for this fd
* @ args : IOCTL args
* @ job : job being set up
*
* Resolve handles from userspace to BOs and attach them to job .
*
* Note that this function doesn ' t need to unreference the BOs on
* failure , because that will happen at panfrost_job_cleanup ( ) time .
*/
static int
panfrost_lookup_bos ( struct drm_device * dev ,
struct drm_file * file_priv ,
struct drm_panfrost_submit * args ,
struct panfrost_job * job )
{
2020-01-15 20:15:54 -06:00
struct panfrost_file_priv * priv = file_priv - > driver_priv ;
struct panfrost_gem_object * bo ;
unsigned int i ;
int ret ;
2018-09-10 14:27:58 -05:00
job - > bo_count = args - > bo_handle_count ;
if ( ! job - > bo_count )
return 0 ;
job - > implicit_fences = kvmalloc_array ( job - > bo_count ,
sizeof ( struct dma_fence * ) ,
GFP_KERNEL | __GFP_ZERO ) ;
if ( ! job - > implicit_fences )
return - ENOMEM ;
2020-01-15 20:15:54 -06:00
ret = drm_gem_objects_lookup ( file_priv ,
( void __user * ) ( uintptr_t ) args - > bo_handles ,
job - > bo_count , & job - > bos ) ;
if ( ret )
return ret ;
job - > mappings = kvmalloc_array ( job - > bo_count ,
sizeof ( struct panfrost_gem_mapping * ) ,
GFP_KERNEL | __GFP_ZERO ) ;
if ( ! job - > mappings )
return - ENOMEM ;
for ( i = 0 ; i < job - > bo_count ; i + + ) {
struct panfrost_gem_mapping * mapping ;
bo = to_panfrost_bo ( job - > bos [ i ] ) ;
mapping = panfrost_gem_mapping_get ( bo , priv ) ;
if ( ! mapping ) {
ret = - EINVAL ;
break ;
}
2019-11-29 14:59:08 +01:00
atomic_inc ( & bo - > gpu_usecount ) ;
2020-01-15 20:15:54 -06:00
job - > mappings [ i ] = mapping ;
}
return ret ;
2018-09-10 14:27:58 -05:00
}
/**
* panfrost_copy_in_sync ( ) - Sets up job - > in_fences [ ] with the sync objects
* referenced by the job .
* @ dev : DRM device
* @ file_priv : DRM file for this fd
* @ args : IOCTL args
* @ job : job being set up
*
* Resolve syncobjs from userspace to fences and attach them to job .
*
* Note that this function doesn ' t need to unreference the fences on
* failure , because that will happen at panfrost_job_cleanup ( ) time .
*/
static int
panfrost_copy_in_sync ( struct drm_device * dev ,
struct drm_file * file_priv ,
struct drm_panfrost_submit * args ,
struct panfrost_job * job )
{
u32 * handles ;
int ret = 0 ;
int i ;
job - > in_fence_count = args - > in_sync_count ;
if ( ! job - > in_fence_count )
return 0 ;
job - > in_fences = kvmalloc_array ( job - > in_fence_count ,
sizeof ( struct dma_fence * ) ,
GFP_KERNEL | __GFP_ZERO ) ;
if ( ! job - > in_fences ) {
DRM_DEBUG ( " Failed to allocate job in fences \n " ) ;
return - ENOMEM ;
}
handles = kvmalloc_array ( job - > in_fence_count , sizeof ( u32 ) , GFP_KERNEL ) ;
if ( ! handles ) {
ret = - ENOMEM ;
DRM_DEBUG ( " Failed to allocate incoming syncobj handles \n " ) ;
goto fail ;
}
if ( copy_from_user ( handles ,
( void __user * ) ( uintptr_t ) args - > in_syncs ,
job - > in_fence_count * sizeof ( u32 ) ) ) {
ret = - EFAULT ;
DRM_DEBUG ( " Failed to copy in syncobj handles \n " ) ;
goto fail ;
}
for ( i = 0 ; i < job - > in_fence_count ; i + + ) {
ret = drm_syncobj_find_fence ( file_priv , handles [ i ] , 0 , 0 ,
& job - > in_fences [ i ] ) ;
if ( ret = = - EINVAL )
goto fail ;
}
fail :
kvfree ( handles ) ;
return ret ;
}
static int panfrost_ioctl_submit ( struct drm_device * dev , void * data ,
struct drm_file * file )
{
struct panfrost_device * pfdev = dev - > dev_private ;
struct drm_panfrost_submit * args = data ;
2019-04-24 15:13:53 +02:00
struct drm_syncobj * sync_out = NULL ;
2018-09-10 14:27:58 -05:00
struct panfrost_job * job ;
int ret = 0 ;
2019-04-24 15:13:53 +02:00
if ( ! args - > jc )
return - EINVAL ;
if ( args - > requirements & & args - > requirements ! = PANFROST_JD_REQ_FS )
return - EINVAL ;
if ( args - > out_sync > 0 ) {
sync_out = drm_syncobj_find ( file , args - > out_sync ) ;
if ( ! sync_out )
return - ENODEV ;
}
2018-09-10 14:27:58 -05:00
job = kzalloc ( sizeof ( * job ) , GFP_KERNEL ) ;
2019-04-24 15:13:53 +02:00
if ( ! job ) {
ret = - ENOMEM ;
goto fail_out_sync ;
}
2018-09-10 14:27:58 -05:00
kref_init ( & job - > refcount ) ;
job - > pfdev = pfdev ;
job - > jc = args - > jc ;
job - > requirements = args - > requirements ;
job - > flush_id = panfrost_gpu_get_latest_flush_id ( pfdev ) ;
job - > file_priv = file - > driver_priv ;
ret = panfrost_copy_in_sync ( dev , file , args , job ) ;
if ( ret )
2019-04-24 15:13:53 +02:00
goto fail_job ;
2018-09-10 14:27:58 -05:00
ret = panfrost_lookup_bos ( dev , file , args , job ) ;
if ( ret )
2019-04-24 15:13:53 +02:00
goto fail_job ;
2018-09-10 14:27:58 -05:00
ret = panfrost_job_push ( job ) ;
if ( ret )
2019-04-24 15:13:53 +02:00
goto fail_job ;
2018-09-10 14:27:58 -05:00
/* Update the return sync object for the job */
2019-04-24 15:13:53 +02:00
if ( sync_out )
2018-09-10 14:27:58 -05:00
drm_syncobj_replace_fence ( sync_out , job - > render_done_fence ) ;
2019-04-24 15:13:53 +02:00
fail_job :
2018-09-10 14:27:58 -05:00
panfrost_job_put ( job ) ;
2019-04-24 15:13:53 +02:00
fail_out_sync :
2019-05-09 10:21:51 +02:00
if ( sync_out )
drm_syncobj_put ( sync_out ) ;
2018-09-10 14:27:58 -05:00
return ret ;
}
static int
panfrost_ioctl_wait_bo ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
long ret ;
struct drm_panfrost_wait_bo * args = data ;
struct drm_gem_object * gem_obj ;
unsigned long timeout = drm_timeout_abs_to_jiffies ( args - > timeout_ns ) ;
if ( args - > pad )
return - EINVAL ;
gem_obj = drm_gem_object_lookup ( file_priv , args - > handle ) ;
if ( ! gem_obj )
return - ENOENT ;
2019-08-11 10:06:32 +02:00
ret = dma_resv_wait_timeout_rcu ( gem_obj - > resv , true ,
2018-09-10 14:27:58 -05:00
true , timeout ) ;
if ( ! ret )
ret = timeout ? - ETIMEDOUT : - EBUSY ;
drm_gem_object_put_unlocked ( gem_obj ) ;
return ret ;
}
static int panfrost_ioctl_mmap_bo ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_panfrost_mmap_bo * args = data ;
2019-08-07 10:52:48 -04:00
struct drm_gem_object * gem_obj ;
int ret ;
2018-09-10 14:27:58 -05:00
if ( args - > flags ! = 0 ) {
DRM_INFO ( " unknown mmap_bo flags: %d \n " , args - > flags ) ;
return - EINVAL ;
}
2019-08-07 10:52:48 -04:00
gem_obj = drm_gem_object_lookup ( file_priv , args - > handle ) ;
if ( ! gem_obj ) {
DRM_DEBUG ( " Failed to look up GEM BO %d \n " , args - > handle ) ;
return - ENOENT ;
}
2019-07-26 16:09:43 -06:00
/* Don't allow mmapping of heap objects as pages are not pinned. */
2019-11-29 14:59:03 +01:00
if ( to_panfrost_bo ( gem_obj ) - > is_heap ) {
ret = - EINVAL ;
goto out ;
}
2019-07-26 16:09:43 -06:00
2019-08-07 10:52:48 -04:00
ret = drm_gem_create_mmap_offset ( gem_obj ) ;
if ( ret = = 0 )
args - > offset = drm_vma_node_offset_addr ( & gem_obj - > vma_node ) ;
2019-11-29 14:59:03 +01:00
out :
drm_gem_object_put_unlocked ( gem_obj ) ;
2019-08-07 10:52:48 -04:00
return ret ;
2018-09-10 14:27:58 -05:00
}
static int panfrost_ioctl_get_bo_offset ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2020-01-15 20:15:54 -06:00
struct panfrost_file_priv * priv = file_priv - > driver_priv ;
2018-09-10 14:27:58 -05:00
struct drm_panfrost_get_bo_offset * args = data ;
2020-01-15 20:15:54 -06:00
struct panfrost_gem_mapping * mapping ;
2018-09-10 14:27:58 -05:00
struct drm_gem_object * gem_obj ;
struct panfrost_gem_object * bo ;
gem_obj = drm_gem_object_lookup ( file_priv , args - > handle ) ;
if ( ! gem_obj ) {
DRM_DEBUG ( " Failed to look up GEM BO %d \n " , args - > handle ) ;
return - ENOENT ;
}
bo = to_panfrost_bo ( gem_obj ) ;
2020-01-15 20:15:54 -06:00
mapping = panfrost_gem_mapping_get ( bo , priv ) ;
2018-09-10 14:27:58 -05:00
drm_gem_object_put_unlocked ( gem_obj ) ;
2020-01-15 20:15:54 -06:00
if ( ! mapping )
return - EINVAL ;
args - > offset = mapping - > mmnode . start < < PAGE_SHIFT ;
panfrost_gem_mapping_put ( mapping ) ;
2018-09-10 14:27:58 -05:00
return 0 ;
}
2019-08-05 08:33:58 -06:00
static int panfrost_ioctl_madvise ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2020-01-15 20:15:54 -06:00
struct panfrost_file_priv * priv = file_priv - > driver_priv ;
2019-08-05 08:33:58 -06:00
struct drm_panfrost_madvise * args = data ;
struct panfrost_device * pfdev = dev - > dev_private ;
struct drm_gem_object * gem_obj ;
2020-01-15 20:15:54 -06:00
struct panfrost_gem_object * bo ;
int ret = 0 ;
2019-08-05 08:33:58 -06:00
gem_obj = drm_gem_object_lookup ( file_priv , args - > handle ) ;
if ( ! gem_obj ) {
DRM_DEBUG ( " Failed to look up GEM BO %d \n " , args - > handle ) ;
return - ENOENT ;
}
2020-01-15 20:15:54 -06:00
bo = to_panfrost_bo ( gem_obj ) ;
2019-11-29 14:59:02 +01:00
mutex_lock ( & pfdev - > shrinker_lock ) ;
2020-01-15 20:15:54 -06:00
mutex_lock ( & bo - > mappings . lock ) ;
if ( args - > madv = = PANFROST_MADV_DONTNEED ) {
struct panfrost_gem_mapping * first ;
first = list_first_entry ( & bo - > mappings . list ,
struct panfrost_gem_mapping ,
node ) ;
/*
* If we want to mark the BO purgeable , there must be only one
* user : the caller FD .
* We could do something smarter and mark the BO purgeable only
* when all its users have marked it purgeable , but globally
* visible / shared BOs are likely to never be marked purgeable
* anyway , so let ' s not bother .
*/
if ( ! list_is_singular ( & bo - > mappings . list ) | |
WARN_ON_ONCE ( first - > mmu ! = & priv - > mmu ) ) {
ret = - EINVAL ;
goto out_unlock_mappings ;
}
}
2019-08-05 08:33:58 -06:00
args - > retained = drm_gem_shmem_madvise ( gem_obj , args - > madv ) ;
if ( args - > retained ) {
if ( args - > madv = = PANFROST_MADV_DONTNEED )
2019-11-29 14:59:02 +01:00
list_add_tail ( & bo - > base . madv_list ,
& pfdev - > shrinker_list ) ;
2019-08-05 08:33:58 -06:00
else if ( args - > madv = = PANFROST_MADV_WILLNEED )
list_del_init ( & bo - > base . madv_list ) ;
}
2020-01-15 20:15:54 -06:00
out_unlock_mappings :
mutex_unlock ( & bo - > mappings . lock ) ;
2019-11-29 14:59:02 +01:00
mutex_unlock ( & pfdev - > shrinker_lock ) ;
2019-08-05 08:33:58 -06:00
drm_gem_object_put_unlocked ( gem_obj ) ;
2020-01-15 20:15:54 -06:00
return ret ;
2019-08-05 08:33:58 -06:00
}
2019-06-18 10:16:46 +02:00
int panfrost_unstable_ioctl_check ( void )
{
if ( ! unstable_ioctls )
return - ENOSYS ;
return 0 ;
}
2019-07-11 15:56:14 -06:00
# define PFN_4G (SZ_4G >> PAGE_SHIFT)
# define PFN_4G_MASK (PFN_4G - 1)
# define PFN_16M (SZ_16M >> PAGE_SHIFT)
static void panfrost_drm_mm_color_adjust ( const struct drm_mm_node * node ,
unsigned long color ,
u64 * start , u64 * end )
{
/* Executable buffers can't start or end on a 4GB boundary */
if ( ! ( color & PANFROST_BO_NOEXEC ) ) {
u64 next_seg ;
if ( ( * start & PFN_4G_MASK ) = = 0 )
( * start ) + + ;
if ( ( * end & PFN_4G_MASK ) = = 0 )
( * end ) - - ;
next_seg = ALIGN ( * start , PFN_4G ) ;
if ( next_seg - * start < = PFN_16M )
* start = next_seg + 1 ;
* end = min ( * end , ALIGN ( * start , PFN_4G ) - 1 ) ;
}
}
2018-09-10 14:27:58 -05:00
static int
panfrost_open ( struct drm_device * dev , struct drm_file * file )
{
2019-08-13 09:01:15 -06:00
int ret ;
2018-09-10 14:27:58 -05:00
struct panfrost_device * pfdev = dev - > dev_private ;
struct panfrost_file_priv * panfrost_priv ;
panfrost_priv = kzalloc ( sizeof ( * panfrost_priv ) , GFP_KERNEL ) ;
if ( ! panfrost_priv )
return - ENOMEM ;
panfrost_priv - > pfdev = pfdev ;
file - > driver_priv = panfrost_priv ;
2019-08-13 09:01:15 -06:00
spin_lock_init ( & panfrost_priv - > mm_lock ) ;
/* 4G enough for now. can be 48-bit */
drm_mm_init ( & panfrost_priv - > mm , SZ_32M > > PAGE_SHIFT , ( SZ_4G - SZ_32M ) > > PAGE_SHIFT ) ;
panfrost_priv - > mm . color_adjust = panfrost_drm_mm_color_adjust ;
ret = panfrost_mmu_pgtable_alloc ( panfrost_priv ) ;
if ( ret )
goto err_pgtable ;
ret = panfrost_job_open ( panfrost_priv ) ;
if ( ret )
goto err_job ;
return 0 ;
err_job :
panfrost_mmu_pgtable_free ( panfrost_priv ) ;
err_pgtable :
drm_mm_takedown ( & panfrost_priv - > mm ) ;
kfree ( panfrost_priv ) ;
return ret ;
2018-09-10 14:27:58 -05:00
}
static void
panfrost_postclose ( struct drm_device * dev , struct drm_file * file )
{
struct panfrost_file_priv * panfrost_priv = file - > driver_priv ;
2019-11-29 14:59:05 +01:00
panfrost_perfcnt_close ( file ) ;
2018-09-10 14:27:58 -05:00
panfrost_job_close ( panfrost_priv ) ;
2019-08-13 09:01:15 -06:00
panfrost_mmu_pgtable_free ( panfrost_priv ) ;
drm_mm_takedown ( & panfrost_priv - > mm ) ;
2018-09-10 14:27:58 -05:00
kfree ( panfrost_priv ) ;
}
static const struct drm_ioctl_desc panfrost_drm_driver_ioctls [ ] = {
# define PANFROST_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV ( PANFROST_ # # n , panfrost_ioctl_ # # func , flags )
2019-11-01 13:03:12 +00:00
PANFROST_IOCTL ( SUBMIT , submit , DRM_RENDER_ALLOW ) ,
2018-09-10 14:27:58 -05:00
PANFROST_IOCTL ( WAIT_BO , wait_bo , DRM_RENDER_ALLOW ) ,
PANFROST_IOCTL ( CREATE_BO , create_bo , DRM_RENDER_ALLOW ) ,
PANFROST_IOCTL ( MMAP_BO , mmap_bo , DRM_RENDER_ALLOW ) ,
PANFROST_IOCTL ( GET_PARAM , get_param , DRM_RENDER_ALLOW ) ,
PANFROST_IOCTL ( GET_BO_OFFSET , get_bo_offset , DRM_RENDER_ALLOW ) ,
2019-06-18 10:16:48 +02:00
PANFROST_IOCTL ( PERFCNT_ENABLE , perfcnt_enable , DRM_RENDER_ALLOW ) ,
PANFROST_IOCTL ( PERFCNT_DUMP , perfcnt_dump , DRM_RENDER_ALLOW ) ,
2019-08-05 08:33:58 -06:00
PANFROST_IOCTL ( MADVISE , madvise , DRM_RENDER_ALLOW ) ,
2018-09-10 14:27:58 -05:00
} ;
2019-10-16 13:51:57 +02:00
DEFINE_DRM_GEM_FOPS ( panfrost_drm_driver_fops ) ;
2018-09-10 14:27:58 -05:00
2019-07-02 12:49:36 -06:00
/*
* Panfrost driver version :
* - 1.0 - initial interface
* - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
*/
2018-09-10 14:27:58 -05:00
static struct drm_driver panfrost_drm_driver = {
2019-06-17 17:39:24 +02:00
. driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ ,
2018-09-10 14:27:58 -05:00
. open = panfrost_open ,
. postclose = panfrost_postclose ,
. ioctls = panfrost_drm_driver_ioctls ,
. num_ioctls = ARRAY_SIZE ( panfrost_drm_driver_ioctls ) ,
. fops = & panfrost_drm_driver_fops ,
. name = " panfrost " ,
. desc = " panfrost DRM " ,
. date = " 20180908 " ,
. major = 1 ,
2019-07-02 12:49:36 -06:00
. minor = 1 ,
2018-09-10 14:27:58 -05:00
. gem_create_object = panfrost_gem_create_object ,
. prime_handle_to_fd = drm_gem_prime_handle_to_fd ,
. prime_fd_to_handle = drm_gem_prime_fd_to_handle ,
. gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table ,
. gem_prime_mmap = drm_gem_prime_mmap ,
} ;
static int panfrost_probe ( struct platform_device * pdev )
{
struct panfrost_device * pfdev ;
struct drm_device * ddev ;
int err ;
pfdev = devm_kzalloc ( & pdev - > dev , sizeof ( * pfdev ) , GFP_KERNEL ) ;
if ( ! pfdev )
return - ENOMEM ;
pfdev - > pdev = pdev ;
pfdev - > dev = & pdev - > dev ;
platform_set_drvdata ( pdev , pfdev ) ;
2020-02-07 13:26:24 +08:00
pfdev - > comp = of_device_get_match_data ( & pdev - > dev ) ;
if ( ! pfdev - > comp )
return - ENODEV ;
2018-09-10 14:27:58 -05:00
/* Allocate and initialze the DRM device. */
ddev = drm_dev_alloc ( & panfrost_drm_driver , & pdev - > dev ) ;
if ( IS_ERR ( ddev ) )
return PTR_ERR ( ddev ) ;
ddev - > dev_private = pfdev ;
pfdev - > ddev = ddev ;
2019-08-05 08:33:58 -06:00
mutex_init ( & pfdev - > shrinker_lock ) ;
INIT_LIST_HEAD ( & pfdev - > shrinker_list ) ;
2018-09-10 14:27:58 -05:00
err = panfrost_device_init ( pfdev ) ;
if ( err ) {
2019-05-03 16:31:44 +01:00
if ( err ! = - EPROBE_DEFER )
dev_err ( & pdev - > dev , " Fatal error during GPU init \n " ) ;
2018-09-10 14:27:58 -05:00
goto err_out0 ;
}
err = panfrost_devfreq_init ( pfdev ) ;
if ( err ) {
2019-05-03 16:31:44 +01:00
if ( err ! = - EPROBE_DEFER )
dev_err ( & pdev - > dev , " Fatal error during devfreq init \n " ) ;
2018-09-10 14:27:58 -05:00
goto err_out1 ;
}
2019-08-26 17:33:10 -05:00
pm_runtime_set_active ( pfdev - > dev ) ;
pm_runtime_mark_last_busy ( pfdev - > dev ) ;
pm_runtime_enable ( pfdev - > dev ) ;
pm_runtime_set_autosuspend_delay ( pfdev - > dev , 50 ) ; /* ~3 frames */
pm_runtime_use_autosuspend ( pfdev - > dev ) ;
2018-09-10 14:27:58 -05:00
/*
* Register the DRM device with the core and the connectors with
* sysfs
*/
err = drm_dev_register ( ddev , 0 ) ;
if ( err < 0 )
2019-08-16 10:31:07 +01:00
goto err_out2 ;
2018-09-10 14:27:58 -05:00
2019-08-05 08:33:58 -06:00
panfrost_gem_shrinker_init ( ddev ) ;
2018-09-10 14:27:58 -05:00
return 0 ;
2019-08-16 10:31:07 +01:00
err_out2 :
2019-10-23 14:21:57 +02:00
pm_runtime_disable ( pfdev - > dev ) ;
2019-08-16 10:31:07 +01:00
panfrost_devfreq_fini ( pfdev ) ;
2018-09-10 14:27:58 -05:00
err_out1 :
panfrost_device_fini ( pfdev ) ;
err_out0 :
drm_dev_put ( ddev ) ;
return err ;
}
static int panfrost_remove ( struct platform_device * pdev )
{
struct panfrost_device * pfdev = platform_get_drvdata ( pdev ) ;
struct drm_device * ddev = pfdev - > ddev ;
drm_dev_unregister ( ddev ) ;
2019-08-05 08:33:58 -06:00
panfrost_gem_shrinker_cleanup ( ddev ) ;
2019-08-22 21:12:09 -05:00
2018-09-10 14:27:58 -05:00
pm_runtime_get_sync ( pfdev - > dev ) ;
2019-08-16 10:31:07 +01:00
panfrost_devfreq_fini ( pfdev ) ;
2018-09-10 14:27:58 -05:00
panfrost_device_fini ( pfdev ) ;
2019-08-22 21:12:09 -05:00
pm_runtime_put_sync_suspend ( pfdev - > dev ) ;
pm_runtime_disable ( pfdev - > dev ) ;
2018-09-10 14:27:58 -05:00
drm_dev_put ( ddev ) ;
return 0 ;
}
2020-02-27 09:41:46 +08:00
static const char * const default_supplies [ ] = { " mali " } ;
2020-02-07 13:26:24 +08:00
static const struct panfrost_compatible default_data = {
. num_supplies = ARRAY_SIZE ( default_supplies ) ,
. supply_names = default_supplies ,
2020-02-07 13:26:25 +08:00
. num_pm_domains = 1 , /* optional */
. pm_domain_names = NULL ,
2020-02-07 13:26:24 +08:00
} ;
2018-09-10 14:27:58 -05:00
static const struct of_device_id dt_match [ ] = {
2020-02-07 13:26:24 +08:00
{ . compatible = " arm,mali-t604 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t624 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t628 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t720 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t760 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t820 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t830 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t860 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t880 " , . data = & default_data , } ,
2018-09-10 14:27:58 -05:00
{ }
} ;
MODULE_DEVICE_TABLE ( of , dt_match ) ;
static const struct dev_pm_ops panfrost_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS ( pm_runtime_force_suspend , pm_runtime_force_resume )
SET_RUNTIME_PM_OPS ( panfrost_device_suspend , panfrost_device_resume , NULL )
} ;
static struct platform_driver panfrost_driver = {
. probe = panfrost_probe ,
. remove = panfrost_remove ,
. driver = {
. name = " panfrost " ,
. pm = & panfrost_pm_ops ,
. of_match_table = dt_match ,
} ,
} ;
module_platform_driver ( panfrost_driver ) ;
MODULE_AUTHOR ( " Panfrost Project Developers " ) ;
MODULE_DESCRIPTION ( " Panfrost DRM Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;