2018-09-10 14:27:58 -05:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
/* Copyright 2019 Collabora ltd. */
# include <linux/module.h>
# include <linux/of_platform.h>
# include <linux/pagemap.h>
# include <linux/pm_runtime.h>
# include <drm/panfrost_drm.h>
# include <drm/drm_drv.h>
# include <drm/drm_ioctl.h>
# include <drm/drm_syncobj.h>
# include <drm/drm_utils.h>
# include "panfrost_device.h"
# include "panfrost_gem.h"
# include "panfrost_mmu.h"
# include "panfrost_job.h"
# include "panfrost_gpu.h"
2019-06-18 10:16:48 +02:00
# include "panfrost_perfcnt.h"
2018-09-10 14:27:58 -05:00
2019-06-18 10:16:46 +02:00
static bool unstable_ioctls ;
module_param_unsafe ( unstable_ioctls , bool , 0600 ) ;
2018-09-10 14:27:58 -05:00
static int panfrost_ioctl_get_param ( struct drm_device * ddev , void * data , struct drm_file * file )
{
struct drm_panfrost_get_param * param = data ;
struct panfrost_device * pfdev = ddev - > dev_private ;
if ( param - > pad ! = 0 )
return - EINVAL ;
2019-07-24 11:56:26 +01:00
# define PANFROST_FEATURE(name, member) \
case DRM_PANFROST_PARAM_ # # name : \
param - > value = pfdev - > features . member ; \
break
# define PANFROST_FEATURE_ARRAY(name, member, max) \
case DRM_PANFROST_PARAM_ # # name # # 0 . . . \
DRM_PANFROST_PARAM_ # # name # # max : \
param - > value = pfdev - > features . member [ param - > param - \
DRM_PANFROST_PARAM_ # # name # # 0 ] ; \
break
2018-09-10 14:27:58 -05:00
switch ( param - > param ) {
2019-07-24 11:56:26 +01:00
PANFROST_FEATURE ( GPU_PROD_ID , id ) ;
PANFROST_FEATURE ( GPU_REVISION , revision ) ;
PANFROST_FEATURE ( SHADER_PRESENT , shader_present ) ;
PANFROST_FEATURE ( TILER_PRESENT , tiler_present ) ;
PANFROST_FEATURE ( L2_PRESENT , l2_present ) ;
PANFROST_FEATURE ( STACK_PRESENT , stack_present ) ;
PANFROST_FEATURE ( AS_PRESENT , as_present ) ;
PANFROST_FEATURE ( JS_PRESENT , js_present ) ;
PANFROST_FEATURE ( L2_FEATURES , l2_features ) ;
PANFROST_FEATURE ( CORE_FEATURES , core_features ) ;
PANFROST_FEATURE ( TILER_FEATURES , tiler_features ) ;
PANFROST_FEATURE ( MEM_FEATURES , mem_features ) ;
PANFROST_FEATURE ( MMU_FEATURES , mmu_features ) ;
PANFROST_FEATURE ( THREAD_FEATURES , thread_features ) ;
PANFROST_FEATURE ( MAX_THREADS , max_threads ) ;
PANFROST_FEATURE ( THREAD_MAX_WORKGROUP_SZ ,
thread_max_workgroup_sz ) ;
PANFROST_FEATURE ( THREAD_MAX_BARRIER_SZ ,
thread_max_barrier_sz ) ;
PANFROST_FEATURE ( COHERENCY_FEATURES , coherency_features ) ;
2021-06-04 09:00:11 -04:00
PANFROST_FEATURE ( AFBC_FEATURES , afbc_features ) ;
2019-07-24 11:56:26 +01:00
PANFROST_FEATURE_ARRAY ( TEXTURE_FEATURES , texture_features , 3 ) ;
PANFROST_FEATURE_ARRAY ( JS_FEATURES , js_features , 15 ) ;
PANFROST_FEATURE ( NR_CORE_GROUPS , nr_core_groups ) ;
PANFROST_FEATURE ( THREAD_TLS_ALLOC , thread_tls_alloc ) ;
2018-09-10 14:27:58 -05:00
default :
return - EINVAL ;
}
return 0 ;
}
static int panfrost_ioctl_create_bo ( struct drm_device * dev , void * data ,
struct drm_file * file )
{
2020-01-15 20:15:54 -06:00
struct panfrost_file_priv * priv = file - > driver_priv ;
2019-07-11 15:56:14 -06:00
struct panfrost_gem_object * bo ;
2018-09-10 14:27:58 -05:00
struct drm_panfrost_create_bo * args = data ;
2020-01-15 20:15:54 -06:00
struct panfrost_gem_mapping * mapping ;
2022-12-19 14:01:30 +00:00
int ret ;
2018-09-10 14:27:58 -05:00
2019-07-11 15:56:14 -06:00
if ( ! args - > size | | args - > pad | |
2019-07-26 16:09:43 -06:00
( args - > flags & ~ ( PANFROST_BO_NOEXEC | PANFROST_BO_HEAP ) ) )
return - EINVAL ;
/* Heaps should never be executable */
if ( ( args - > flags & PANFROST_BO_HEAP ) & &
! ( args - > flags & PANFROST_BO_NOEXEC ) )
2018-09-10 14:27:58 -05:00
return - EINVAL ;
2022-12-19 14:01:30 +00:00
bo = panfrost_gem_create ( dev , args - > size , args - > flags ) ;
2019-07-11 15:56:14 -06:00
if ( IS_ERR ( bo ) )
return PTR_ERR ( bo ) ;
2018-09-10 14:27:58 -05:00
2022-12-19 14:01:30 +00:00
ret = drm_gem_handle_create ( file , & bo - > base . base , & args - > handle ) ;
if ( ret )
goto out ;
2020-01-15 20:15:54 -06:00
mapping = panfrost_gem_mapping_get ( bo , priv ) ;
2022-12-19 14:01:30 +00:00
if ( mapping ) {
args - > offset = mapping - > mmnode . start < < PAGE_SHIFT ;
panfrost_gem_mapping_put ( mapping ) ;
} else {
/* This can only happen if the handle from
* drm_gem_handle_create ( ) has already been guessed and freed
* by user space
*/
ret = - EINVAL ;
2020-01-15 20:15:54 -06:00
}
2022-12-19 14:01:30 +00:00
out :
drm_gem_object_put ( & bo - > base . base ) ;
return ret ;
2018-09-10 14:27:58 -05:00
}
/**
* panfrost_lookup_bos ( ) - Sets up job - > bo [ ] with the GEM objects
* referenced by the job .
* @ dev : DRM device
* @ file_priv : DRM file for this fd
* @ args : IOCTL args
* @ job : job being set up
*
* Resolve handles from userspace to BOs and attach them to job .
*
* Note that this function doesn ' t need to unreference the BOs on
* failure , because that will happen at panfrost_job_cleanup ( ) time .
*/
static int
panfrost_lookup_bos ( struct drm_device * dev ,
struct drm_file * file_priv ,
struct drm_panfrost_submit * args ,
struct panfrost_job * job )
{
2020-01-15 20:15:54 -06:00
struct panfrost_file_priv * priv = file_priv - > driver_priv ;
struct panfrost_gem_object * bo ;
unsigned int i ;
int ret ;
2018-09-10 14:27:58 -05:00
job - > bo_count = args - > bo_handle_count ;
if ( ! job - > bo_count )
return 0 ;
2020-01-15 20:15:54 -06:00
ret = drm_gem_objects_lookup ( file_priv ,
( void __user * ) ( uintptr_t ) args - > bo_handles ,
job - > bo_count , & job - > bos ) ;
if ( ret )
return ret ;
job - > mappings = kvmalloc_array ( job - > bo_count ,
sizeof ( struct panfrost_gem_mapping * ) ,
GFP_KERNEL | __GFP_ZERO ) ;
if ( ! job - > mappings )
return - ENOMEM ;
for ( i = 0 ; i < job - > bo_count ; i + + ) {
struct panfrost_gem_mapping * mapping ;
bo = to_panfrost_bo ( job - > bos [ i ] ) ;
mapping = panfrost_gem_mapping_get ( bo , priv ) ;
if ( ! mapping ) {
ret = - EINVAL ;
break ;
}
2019-11-29 14:59:08 +01:00
atomic_inc ( & bo - > gpu_usecount ) ;
2020-01-15 20:15:54 -06:00
job - > mappings [ i ] = mapping ;
}
return ret ;
2018-09-10 14:27:58 -05:00
}
/**
2021-06-22 18:55:01 +02:00
* panfrost_copy_in_sync ( ) - Sets up job - > deps with the sync objects
2018-09-10 14:27:58 -05:00
* referenced by the job .
* @ dev : DRM device
* @ file_priv : DRM file for this fd
* @ args : IOCTL args
* @ job : job being set up
*
* Resolve syncobjs from userspace to fences and attach them to job .
*
* Note that this function doesn ' t need to unreference the fences on
* failure , because that will happen at panfrost_job_cleanup ( ) time .
*/
static int
panfrost_copy_in_sync ( struct drm_device * dev ,
struct drm_file * file_priv ,
struct drm_panfrost_submit * args ,
struct panfrost_job * job )
{
u32 * handles ;
int ret = 0 ;
2021-06-22 18:55:01 +02:00
int i , in_fence_count ;
2018-09-10 14:27:58 -05:00
2021-06-22 18:55:01 +02:00
in_fence_count = args - > in_sync_count ;
2018-09-10 14:27:58 -05:00
2021-06-22 18:55:01 +02:00
if ( ! in_fence_count )
2018-09-10 14:27:58 -05:00
return 0 ;
2021-06-22 18:55:01 +02:00
handles = kvmalloc_array ( in_fence_count , sizeof ( u32 ) , GFP_KERNEL ) ;
2018-09-10 14:27:58 -05:00
if ( ! handles ) {
ret = - ENOMEM ;
DRM_DEBUG ( " Failed to allocate incoming syncobj handles \n " ) ;
goto fail ;
}
if ( copy_from_user ( handles ,
( void __user * ) ( uintptr_t ) args - > in_syncs ,
2021-06-22 18:55:01 +02:00
in_fence_count * sizeof ( u32 ) ) ) {
2018-09-10 14:27:58 -05:00
ret = - EFAULT ;
DRM_DEBUG ( " Failed to copy in syncobj handles \n " ) ;
goto fail ;
}
2021-06-22 18:55:01 +02:00
for ( i = 0 ; i < in_fence_count ; i + + ) {
struct dma_fence * fence ;
2018-09-10 14:27:58 -05:00
ret = drm_syncobj_find_fence ( file_priv , handles [ i ] , 0 , 0 ,
2021-06-22 18:55:01 +02:00
& fence ) ;
if ( ret )
goto fail ;
2021-08-05 12:46:52 +02:00
ret = drm_sched_job_add_dependency ( & job - > base , fence ) ;
2021-06-22 18:55:01 +02:00
if ( ret )
2018-09-10 14:27:58 -05:00
goto fail ;
}
fail :
kvfree ( handles ) ;
return ret ;
}
static int panfrost_ioctl_submit ( struct drm_device * dev , void * data ,
struct drm_file * file )
{
struct panfrost_device * pfdev = dev - > dev_private ;
2022-05-19 16:20:03 +01:00
struct panfrost_file_priv * file_priv = file - > driver_priv ;
2018-09-10 14:27:58 -05:00
struct drm_panfrost_submit * args = data ;
2019-04-24 15:13:53 +02:00
struct drm_syncobj * sync_out = NULL ;
2018-09-10 14:27:58 -05:00
struct panfrost_job * job ;
2021-08-05 12:46:52 +02:00
int ret = 0 , slot ;
2018-09-10 14:27:58 -05:00
2019-04-24 15:13:53 +02:00
if ( ! args - > jc )
return - EINVAL ;
if ( args - > requirements & & args - > requirements ! = PANFROST_JD_REQ_FS )
return - EINVAL ;
if ( args - > out_sync > 0 ) {
sync_out = drm_syncobj_find ( file , args - > out_sync ) ;
if ( ! sync_out )
return - ENODEV ;
}
2018-09-10 14:27:58 -05:00
job = kzalloc ( sizeof ( * job ) , GFP_KERNEL ) ;
2019-04-24 15:13:53 +02:00
if ( ! job ) {
ret = - ENOMEM ;
2021-08-31 15:35:56 +02:00
goto out_put_syncout ;
2019-04-24 15:13:53 +02:00
}
2018-09-10 14:27:58 -05:00
kref_init ( & job - > refcount ) ;
job - > pfdev = pfdev ;
job - > jc = args - > jc ;
job - > requirements = args - > requirements ;
job - > flush_id = panfrost_gpu_get_latest_flush_id ( pfdev ) ;
2022-05-19 16:20:03 +01:00
job - > mmu = file_priv - > mmu ;
2018-09-10 14:27:58 -05:00
2021-08-05 12:46:52 +02:00
slot = panfrost_job_get_slot ( job ) ;
ret = drm_sched_job_init ( & job - > base ,
2022-05-19 16:20:03 +01:00
& file_priv - > sched_entity [ slot ] ,
2021-08-05 12:46:52 +02:00
NULL ) ;
if ( ret )
2021-08-31 15:35:56 +02:00
goto out_put_job ;
2021-08-05 12:46:52 +02:00
2018-09-10 14:27:58 -05:00
ret = panfrost_copy_in_sync ( dev , file , args , job ) ;
if ( ret )
2021-08-31 15:35:56 +02:00
goto out_cleanup_job ;
2018-09-10 14:27:58 -05:00
ret = panfrost_lookup_bos ( dev , file , args , job ) ;
if ( ret )
2021-08-31 15:35:56 +02:00
goto out_cleanup_job ;
2018-09-10 14:27:58 -05:00
ret = panfrost_job_push ( job ) ;
if ( ret )
2021-08-31 15:35:56 +02:00
goto out_cleanup_job ;
2018-09-10 14:27:58 -05:00
/* Update the return sync object for the job */
2019-04-24 15:13:53 +02:00
if ( sync_out )
2018-09-10 14:27:58 -05:00
drm_syncobj_replace_fence ( sync_out , job - > render_done_fence ) ;
2021-08-31 15:35:56 +02:00
out_cleanup_job :
if ( ret )
drm_sched_job_cleanup ( & job - > base ) ;
out_put_job :
2018-09-10 14:27:58 -05:00
panfrost_job_put ( job ) ;
2021-08-31 15:35:56 +02:00
out_put_syncout :
2019-05-09 10:21:51 +02:00
if ( sync_out )
drm_syncobj_put ( sync_out ) ;
2018-09-10 14:27:58 -05:00
return ret ;
}
static int
panfrost_ioctl_wait_bo ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
long ret ;
struct drm_panfrost_wait_bo * args = data ;
struct drm_gem_object * gem_obj ;
unsigned long timeout = drm_timeout_abs_to_jiffies ( args - > timeout_ns ) ;
if ( args - > pad )
return - EINVAL ;
gem_obj = drm_gem_object_lookup ( file_priv , args - > handle ) ;
if ( ! gem_obj )
return - ENOENT ;
2021-11-09 11:08:18 +01:00
ret = dma_resv_wait_timeout ( gem_obj - > resv , DMA_RESV_USAGE_READ ,
true , timeout ) ;
2018-09-10 14:27:58 -05:00
if ( ! ret )
ret = timeout ? - ETIMEDOUT : - EBUSY ;
2020-05-15 10:51:07 +01:00
drm_gem_object_put ( gem_obj ) ;
2018-09-10 14:27:58 -05:00
return ret ;
}
static int panfrost_ioctl_mmap_bo ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_panfrost_mmap_bo * args = data ;
2019-08-07 10:52:48 -04:00
struct drm_gem_object * gem_obj ;
int ret ;
2018-09-10 14:27:58 -05:00
if ( args - > flags ! = 0 ) {
DRM_INFO ( " unknown mmap_bo flags: %d \n " , args - > flags ) ;
return - EINVAL ;
}
2019-08-07 10:52:48 -04:00
gem_obj = drm_gem_object_lookup ( file_priv , args - > handle ) ;
if ( ! gem_obj ) {
DRM_DEBUG ( " Failed to look up GEM BO %d \n " , args - > handle ) ;
return - ENOENT ;
}
2019-07-26 16:09:43 -06:00
/* Don't allow mmapping of heap objects as pages are not pinned. */
2019-11-29 14:59:03 +01:00
if ( to_panfrost_bo ( gem_obj ) - > is_heap ) {
ret = - EINVAL ;
goto out ;
}
2019-07-26 16:09:43 -06:00
2019-08-07 10:52:48 -04:00
ret = drm_gem_create_mmap_offset ( gem_obj ) ;
if ( ret = = 0 )
args - > offset = drm_vma_node_offset_addr ( & gem_obj - > vma_node ) ;
2019-11-29 14:59:03 +01:00
out :
2020-05-15 10:51:07 +01:00
drm_gem_object_put ( gem_obj ) ;
2019-08-07 10:52:48 -04:00
return ret ;
2018-09-10 14:27:58 -05:00
}
static int panfrost_ioctl_get_bo_offset ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2020-01-15 20:15:54 -06:00
struct panfrost_file_priv * priv = file_priv - > driver_priv ;
2018-09-10 14:27:58 -05:00
struct drm_panfrost_get_bo_offset * args = data ;
2020-01-15 20:15:54 -06:00
struct panfrost_gem_mapping * mapping ;
2018-09-10 14:27:58 -05:00
struct drm_gem_object * gem_obj ;
struct panfrost_gem_object * bo ;
gem_obj = drm_gem_object_lookup ( file_priv , args - > handle ) ;
if ( ! gem_obj ) {
DRM_DEBUG ( " Failed to look up GEM BO %d \n " , args - > handle ) ;
return - ENOENT ;
}
bo = to_panfrost_bo ( gem_obj ) ;
2020-01-15 20:15:54 -06:00
mapping = panfrost_gem_mapping_get ( bo , priv ) ;
2020-05-15 10:51:07 +01:00
drm_gem_object_put ( gem_obj ) ;
2020-01-15 20:15:54 -06:00
if ( ! mapping )
return - EINVAL ;
args - > offset = mapping - > mmnode . start < < PAGE_SHIFT ;
panfrost_gem_mapping_put ( mapping ) ;
2018-09-10 14:27:58 -05:00
return 0 ;
}
2019-08-05 08:33:58 -06:00
static int panfrost_ioctl_madvise ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2020-01-15 20:15:54 -06:00
struct panfrost_file_priv * priv = file_priv - > driver_priv ;
2019-08-05 08:33:58 -06:00
struct drm_panfrost_madvise * args = data ;
struct panfrost_device * pfdev = dev - > dev_private ;
struct drm_gem_object * gem_obj ;
2020-01-15 20:15:54 -06:00
struct panfrost_gem_object * bo ;
int ret = 0 ;
2019-08-05 08:33:58 -06:00
gem_obj = drm_gem_object_lookup ( file_priv , args - > handle ) ;
if ( ! gem_obj ) {
DRM_DEBUG ( " Failed to look up GEM BO %d \n " , args - > handle ) ;
return - ENOENT ;
}
2020-01-15 20:15:54 -06:00
bo = to_panfrost_bo ( gem_obj ) ;
2019-11-29 14:59:02 +01:00
mutex_lock ( & pfdev - > shrinker_lock ) ;
2020-01-15 20:15:54 -06:00
mutex_lock ( & bo - > mappings . lock ) ;
if ( args - > madv = = PANFROST_MADV_DONTNEED ) {
struct panfrost_gem_mapping * first ;
first = list_first_entry ( & bo - > mappings . list ,
struct panfrost_gem_mapping ,
node ) ;
/*
* If we want to mark the BO purgeable , there must be only one
* user : the caller FD .
* We could do something smarter and mark the BO purgeable only
* when all its users have marked it purgeable , but globally
* visible / shared BOs are likely to never be marked purgeable
* anyway , so let ' s not bother .
*/
if ( ! list_is_singular ( & bo - > mappings . list ) | |
2021-06-21 15:38:56 +02:00
WARN_ON_ONCE ( first - > mmu ! = priv - > mmu ) ) {
2020-01-15 20:15:54 -06:00
ret = - EINVAL ;
goto out_unlock_mappings ;
}
}
2021-11-08 10:31:49 +01:00
args - > retained = drm_gem_shmem_madvise ( & bo - > base , args - > madv ) ;
2019-08-05 08:33:58 -06:00
if ( args - > retained ) {
if ( args - > madv = = PANFROST_MADV_DONTNEED )
2022-06-30 23:06:01 +03:00
list_move_tail ( & bo - > base . madv_list ,
& pfdev - > shrinker_list ) ;
2019-08-05 08:33:58 -06:00
else if ( args - > madv = = PANFROST_MADV_WILLNEED )
list_del_init ( & bo - > base . madv_list ) ;
}
2020-01-15 20:15:54 -06:00
out_unlock_mappings :
mutex_unlock ( & bo - > mappings . lock ) ;
2019-11-29 14:59:02 +01:00
mutex_unlock ( & pfdev - > shrinker_lock ) ;
2019-08-05 08:33:58 -06:00
2020-05-15 10:51:07 +01:00
drm_gem_object_put ( gem_obj ) ;
2020-01-15 20:15:54 -06:00
return ret ;
2019-08-05 08:33:58 -06:00
}
2019-06-18 10:16:46 +02:00
int panfrost_unstable_ioctl_check ( void )
{
if ( ! unstable_ioctls )
return - ENOSYS ;
return 0 ;
}
2018-09-10 14:27:58 -05:00
static int
panfrost_open ( struct drm_device * dev , struct drm_file * file )
{
2019-08-13 09:01:15 -06:00
int ret ;
2018-09-10 14:27:58 -05:00
struct panfrost_device * pfdev = dev - > dev_private ;
struct panfrost_file_priv * panfrost_priv ;
panfrost_priv = kzalloc ( sizeof ( * panfrost_priv ) , GFP_KERNEL ) ;
if ( ! panfrost_priv )
return - ENOMEM ;
panfrost_priv - > pfdev = pfdev ;
file - > driver_priv = panfrost_priv ;
2021-06-21 15:38:56 +02:00
panfrost_priv - > mmu = panfrost_mmu_ctx_create ( pfdev ) ;
if ( IS_ERR ( panfrost_priv - > mmu ) ) {
ret = PTR_ERR ( panfrost_priv - > mmu ) ;
goto err_free ;
}
2019-08-13 09:01:15 -06:00
ret = panfrost_job_open ( panfrost_priv ) ;
if ( ret )
goto err_job ;
return 0 ;
err_job :
2021-06-21 15:38:56 +02:00
panfrost_mmu_ctx_put ( panfrost_priv - > mmu ) ;
err_free :
2019-08-13 09:01:15 -06:00
kfree ( panfrost_priv ) ;
return ret ;
2018-09-10 14:27:58 -05:00
}
static void
panfrost_postclose ( struct drm_device * dev , struct drm_file * file )
{
struct panfrost_file_priv * panfrost_priv = file - > driver_priv ;
2019-11-29 14:59:05 +01:00
panfrost_perfcnt_close ( file ) ;
2018-09-10 14:27:58 -05:00
panfrost_job_close ( panfrost_priv ) ;
2021-06-21 15:38:56 +02:00
panfrost_mmu_ctx_put ( panfrost_priv - > mmu ) ;
2018-09-10 14:27:58 -05:00
kfree ( panfrost_priv ) ;
}
static const struct drm_ioctl_desc panfrost_drm_driver_ioctls [ ] = {
# define PANFROST_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV ( PANFROST_ # # n , panfrost_ioctl_ # # func , flags )
2019-11-01 13:03:12 +00:00
PANFROST_IOCTL ( SUBMIT , submit , DRM_RENDER_ALLOW ) ,
2018-09-10 14:27:58 -05:00
PANFROST_IOCTL ( WAIT_BO , wait_bo , DRM_RENDER_ALLOW ) ,
PANFROST_IOCTL ( CREATE_BO , create_bo , DRM_RENDER_ALLOW ) ,
PANFROST_IOCTL ( MMAP_BO , mmap_bo , DRM_RENDER_ALLOW ) ,
PANFROST_IOCTL ( GET_PARAM , get_param , DRM_RENDER_ALLOW ) ,
PANFROST_IOCTL ( GET_BO_OFFSET , get_bo_offset , DRM_RENDER_ALLOW ) ,
2019-06-18 10:16:48 +02:00
PANFROST_IOCTL ( PERFCNT_ENABLE , perfcnt_enable , DRM_RENDER_ALLOW ) ,
PANFROST_IOCTL ( PERFCNT_DUMP , perfcnt_dump , DRM_RENDER_ALLOW ) ,
2019-08-05 08:33:58 -06:00
PANFROST_IOCTL ( MADVISE , madvise , DRM_RENDER_ALLOW ) ,
2018-09-10 14:27:58 -05:00
} ;
2019-10-16 13:51:57 +02:00
DEFINE_DRM_GEM_FOPS ( panfrost_drm_driver_fops ) ;
2018-09-10 14:27:58 -05:00
2019-07-02 12:49:36 -06:00
/*
* Panfrost driver version :
* - 1.0 - initial interface
* - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
2021-06-04 09:00:11 -04:00
* - 1.2 - adds AFBC_FEATURES query
2019-07-02 12:49:36 -06:00
*/
2020-11-04 11:04:24 +01:00
static const struct drm_driver panfrost_drm_driver = {
2019-06-17 17:39:24 +02:00
. driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ ,
2018-09-10 14:27:58 -05:00
. open = panfrost_open ,
. postclose = panfrost_postclose ,
. ioctls = panfrost_drm_driver_ioctls ,
. num_ioctls = ARRAY_SIZE ( panfrost_drm_driver_ioctls ) ,
. fops = & panfrost_drm_driver_fops ,
. name = " panfrost " ,
. desc = " panfrost DRM " ,
. date = " 20180908 " ,
. major = 1 ,
2021-06-04 09:00:11 -04:00
. minor = 2 ,
2018-09-10 14:27:58 -05:00
. gem_create_object = panfrost_gem_create_object ,
. prime_handle_to_fd = drm_gem_prime_handle_to_fd ,
. prime_fd_to_handle = drm_gem_prime_fd_to_handle ,
. gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table ,
. gem_prime_mmap = drm_gem_prime_mmap ,
} ;
static int panfrost_probe ( struct platform_device * pdev )
{
struct panfrost_device * pfdev ;
struct drm_device * ddev ;
int err ;
pfdev = devm_kzalloc ( & pdev - > dev , sizeof ( * pfdev ) , GFP_KERNEL ) ;
if ( ! pfdev )
return - ENOMEM ;
pfdev - > pdev = pdev ;
pfdev - > dev = & pdev - > dev ;
platform_set_drvdata ( pdev , pfdev ) ;
2020-02-07 13:26:24 +08:00
pfdev - > comp = of_device_get_match_data ( & pdev - > dev ) ;
if ( ! pfdev - > comp )
return - ENODEV ;
2020-09-22 15:16:49 +01:00
pfdev - > coherent = device_get_dma_attr ( & pdev - > dev ) = = DEV_DMA_COHERENT ;
2022-03-02 04:45:35 -08:00
/* Allocate and initialize the DRM device. */
2018-09-10 14:27:58 -05:00
ddev = drm_dev_alloc ( & panfrost_drm_driver , & pdev - > dev ) ;
if ( IS_ERR ( ddev ) )
return PTR_ERR ( ddev ) ;
ddev - > dev_private = pfdev ;
pfdev - > ddev = ddev ;
2019-08-05 08:33:58 -06:00
mutex_init ( & pfdev - > shrinker_lock ) ;
INIT_LIST_HEAD ( & pfdev - > shrinker_list ) ;
2018-09-10 14:27:58 -05:00
err = panfrost_device_init ( pfdev ) ;
if ( err ) {
2019-05-03 16:31:44 +01:00
if ( err ! = - EPROBE_DEFER )
dev_err ( & pdev - > dev , " Fatal error during GPU init \n " ) ;
2018-09-10 14:27:58 -05:00
goto err_out0 ;
}
2019-08-26 17:33:10 -05:00
pm_runtime_set_active ( pfdev - > dev ) ;
pm_runtime_mark_last_busy ( pfdev - > dev ) ;
pm_runtime_enable ( pfdev - > dev ) ;
pm_runtime_set_autosuspend_delay ( pfdev - > dev , 50 ) ; /* ~3 frames */
pm_runtime_use_autosuspend ( pfdev - > dev ) ;
2018-09-10 14:27:58 -05:00
/*
* Register the DRM device with the core and the connectors with
* sysfs
*/
err = drm_dev_register ( ddev , 0 ) ;
if ( err < 0 )
2020-07-10 11:54:03 +02:00
goto err_out1 ;
2018-09-10 14:27:58 -05:00
2019-08-05 08:33:58 -06:00
panfrost_gem_shrinker_init ( ddev ) ;
2018-09-10 14:27:58 -05:00
return 0 ;
err_out1 :
2020-07-10 11:54:03 +02:00
pm_runtime_disable ( pfdev - > dev ) ;
2018-09-10 14:27:58 -05:00
panfrost_device_fini ( pfdev ) ;
2020-10-30 14:58:33 +00:00
pm_runtime_set_suspended ( pfdev - > dev ) ;
2018-09-10 14:27:58 -05:00
err_out0 :
drm_dev_put ( ddev ) ;
return err ;
}
static int panfrost_remove ( struct platform_device * pdev )
{
struct panfrost_device * pfdev = platform_get_drvdata ( pdev ) ;
struct drm_device * ddev = pfdev - > ddev ;
drm_dev_unregister ( ddev ) ;
2019-08-05 08:33:58 -06:00
panfrost_gem_shrinker_cleanup ( ddev ) ;
2019-08-22 21:12:09 -05:00
2018-09-10 14:27:58 -05:00
pm_runtime_get_sync ( pfdev - > dev ) ;
2019-08-22 21:12:09 -05:00
pm_runtime_disable ( pfdev - > dev ) ;
2020-10-30 14:58:33 +00:00
panfrost_device_fini ( pfdev ) ;
pm_runtime_set_suspended ( pfdev - > dev ) ;
2019-08-22 21:12:09 -05:00
2018-09-10 14:27:58 -05:00
drm_dev_put ( ddev ) ;
return 0 ;
}
2022-07-04 16:10:39 +05:30
/*
* The OPP core wants the supply names to be NULL terminated , but we need the
* correct num_supplies value for regulator core . Hence , we NULL terminate here
* and then initialize num_supplies with ARRAY_SIZE - 1.
*/
static const char * const default_supplies [ ] = { " mali " , NULL } ;
2020-02-07 13:26:24 +08:00
static const struct panfrost_compatible default_data = {
2022-07-04 16:10:39 +05:30
. num_supplies = ARRAY_SIZE ( default_supplies ) - 1 ,
2020-02-07 13:26:24 +08:00
. supply_names = default_supplies ,
2020-02-07 13:26:25 +08:00
. num_pm_domains = 1 , /* optional */
. pm_domain_names = NULL ,
2020-02-07 13:26:24 +08:00
} ;
2020-09-16 17:01:47 +02:00
static const struct panfrost_compatible amlogic_data = {
2022-07-04 16:10:39 +05:30
. num_supplies = ARRAY_SIZE ( default_supplies ) - 1 ,
2020-09-16 17:01:47 +02:00
. supply_names = default_supplies ,
. vendor_quirk = panfrost_gpu_amlogic_quirk ,
} ;
2022-07-04 16:10:39 +05:30
static const char * const mediatek_mt8183_supplies [ ] = { " mali " , " sram " , NULL } ;
2021-09-18 17:13:34 +08:00
static const char * const mediatek_mt8183_pm_domains [ ] = { " core0 " , " core1 " , " core2 " } ;
2021-04-21 13:28:55 +08:00
static const struct panfrost_compatible mediatek_mt8183_data = {
2022-07-04 16:10:39 +05:30
. num_supplies = ARRAY_SIZE ( mediatek_mt8183_supplies ) - 1 ,
2021-04-21 13:28:55 +08:00
. supply_names = mediatek_mt8183_supplies ,
. num_pm_domains = ARRAY_SIZE ( mediatek_mt8183_pm_domains ) ,
. pm_domain_names = mediatek_mt8183_pm_domains ,
} ;
2018-09-10 14:27:58 -05:00
static const struct of_device_id dt_match [ ] = {
2020-09-16 17:01:47 +02:00
/* Set first to probe before the generic compatibles */
{ . compatible = " amlogic,meson-gxm-mali " ,
. data = & amlogic_data , } ,
{ . compatible = " amlogic,meson-g12a-mali " ,
. data = & amlogic_data , } ,
2020-02-07 13:26:24 +08:00
{ . compatible = " arm,mali-t604 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t624 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t628 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t720 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t760 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t820 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t830 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t860 " , . data = & default_data , } ,
{ . compatible = " arm,mali-t880 " , . data = & default_data , } ,
2020-06-11 10:58:44 +02:00
{ . compatible = " arm,mali-bifrost " , . data = & default_data , } ,
2022-05-25 10:57:54 -04:00
{ . compatible = " arm,mali-valhall-jm " , . data = & default_data , } ,
2021-04-21 13:28:55 +08:00
{ . compatible = " mediatek,mt8183-mali " , . data = & mediatek_mt8183_data } ,
2018-09-10 14:27:58 -05:00
{ }
} ;
MODULE_DEVICE_TABLE ( of , dt_match ) ;
static struct platform_driver panfrost_driver = {
. probe = panfrost_probe ,
. remove = panfrost_remove ,
. driver = {
. name = " panfrost " ,
2022-11-29 19:19:32 +00:00
. pm = pm_ptr ( & panfrost_pm_ops ) ,
2018-09-10 14:27:58 -05:00
. of_match_table = dt_match ,
} ,
} ;
module_platform_driver ( panfrost_driver ) ;
MODULE_AUTHOR ( " Panfrost Project Developers " ) ;
MODULE_DESCRIPTION ( " Panfrost DRM Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;