2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2013-07-19 12:59:32 -04:00
/*
* Copyright ( C ) 2013 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*/
# include "msm_ringbuffer.h"
# include "msm_gpu.h"
2021-07-27 18:06:14 -07:00
static uint num_hw_submissions = 8 ;
MODULE_PARM_DESC ( num_hw_submissions , " The max # of jobs to write into ringbuffer (default 8) " ) ;
module_param ( num_hw_submissions , uint , 0600 ) ;
static struct dma_fence * msm_job_run ( struct drm_sched_job * job )
{
struct msm_gem_submit * submit = to_msm_submit ( job ) ;
2022-04-11 14:58:38 -07:00
struct msm_fence_context * fctx = submit - > ring - > fctx ;
2021-07-27 18:06:14 -07:00
struct msm_gpu * gpu = submit - > gpu ;
2023-08-02 15:21:49 -07:00
struct msm_drm_private * priv = gpu - > dev - > dev_private ;
2022-04-11 14:58:38 -07:00
int i ;
2021-07-27 18:06:14 -07:00
2023-03-20 07:43:23 -07:00
msm_fence_init ( submit - > hw_fence , fctx ) ;
2022-04-11 14:58:38 -07:00
2023-08-10 14:31:41 -07:00
submit - > seqno = submit - > hw_fence - > seqno ;
2023-08-02 15:21:49 -07:00
mutex_lock ( & priv - > lru . lock ) ;
2022-04-11 14:58:38 -07:00
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
2023-08-02 15:21:50 -07:00
struct drm_gem_object * obj = submit - > bos [ i ] . obj ;
2022-04-11 14:58:38 -07:00
2023-03-20 07:43:31 -07:00
msm_gem_unpin_active ( obj ) ;
2023-08-02 15:21:52 -07:00
submit - > bos [ i ] . flags & = ~ BO_PINNED ;
2022-04-11 14:58:38 -07:00
}
2021-07-27 18:06:14 -07:00
2023-08-02 15:21:49 -07:00
mutex_unlock ( & priv - > lru . lock ) ;
2021-07-27 18:06:14 -07:00
msm_gpu_submit ( gpu , submit ) ;
return dma_fence_get ( submit - > hw_fence ) ;
}
static void msm_job_free ( struct drm_sched_job * job )
{
struct msm_gem_submit * submit = to_msm_submit ( job ) ;
drm_sched_job_cleanup ( job ) ;
msm_gem_submit_put ( submit ) ;
}
2022-04-21 09:15:07 -04:00
static const struct drm_sched_backend_ops msm_sched_ops = {
2021-07-27 18:06:14 -07:00
. run_job = msm_job_run ,
. free_job = msm_job_free
} ;
2017-10-20 11:06:57 -06:00
struct msm_ringbuffer * msm_ringbuffer_new ( struct msm_gpu * gpu , int id ,
void * memptrs , uint64_t memptrs_iova )
2013-07-19 12:59:32 -04:00
{
struct msm_ringbuffer * ring ;
2021-07-27 18:06:14 -07:00
long sched_timeout ;
2017-10-20 11:06:57 -06:00
char name [ 32 ] ;
2013-07-19 12:59:32 -04:00
int ret ;
2017-10-20 11:06:57 -06:00
/* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
BUILD_BUG_ON ( ! is_power_of_2 ( MSM_GPU_RINGBUFFER_SZ ) ) ;
2013-07-19 12:59:32 -04:00
ring = kzalloc ( sizeof ( * ring ) , GFP_KERNEL ) ;
if ( ! ring ) {
ret = - ENOMEM ;
goto fail ;
}
ring - > gpu = gpu ;
2017-10-20 11:06:57 -06:00
ring - > id = id ;
2018-11-07 15:35:54 -07:00
2017-10-20 11:06:57 -06:00
ring - > start = msm_gem_kernel_new ( gpu - > dev , MSM_GPU_RINGBUFFER_SZ ,
2020-09-03 20:03:11 -06:00
check_apriv ( gpu , MSM_BO_WC | MSM_BO_GPU_READONLY ) ,
gpu - > aspace , & ring - > bo , & ring - > iova ) ;
2017-07-27 10:42:40 -06:00
2016-05-24 18:29:38 -04:00
if ( IS_ERR ( ring - > start ) ) {
ret = PTR_ERR ( ring - > start ) ;
2021-07-27 18:06:06 -07:00
ring - > start = NULL ;
2016-05-24 18:29:38 -04:00
goto fail ;
}
2018-11-07 15:35:52 -07:00
msm_gem_object_set_name ( ring - > bo , " ring%d " , id ) ;
2017-10-20 11:06:57 -06:00
ring - > end = ring - > start + ( MSM_GPU_RINGBUFFER_SZ > > 2 ) ;
2017-10-20 11:06:59 -06:00
ring - > next = ring - > start ;
2013-07-19 12:59:32 -04:00
ring - > cur = ring - > start ;
2017-10-20 11:06:57 -06:00
ring - > memptrs = memptrs ;
ring - > memptrs_iova = memptrs_iova ;
2021-07-27 18:06:14 -07:00
/* currently managing hangcheck ourselves: */
sched_timeout = MAX_SCHEDULE_TIMEOUT ;
ret = drm_sched_init ( & ring - > sched , & msm_sched_ops ,
2023-10-14 21:15:35 -04:00
DRM_SCHED_PRIORITY_COUNT ,
num_hw_submissions , 0 , sched_timeout ,
NULL , NULL , to_msm_bo ( ring - > bo ) - > name , gpu - > dev - > dev ) ;
2021-07-27 18:06:14 -07:00
if ( ret ) {
goto fail ;
}
2017-10-20 11:06:57 -06:00
INIT_LIST_HEAD ( & ring - > submits ) ;
2020-10-23 09:51:16 -07:00
spin_lock_init ( & ring - > submit_lock ) ;
2020-10-23 09:51:15 -07:00
spin_lock_init ( & ring - > preempt_lock ) ;
2017-10-20 11:06:57 -06:00
snprintf ( name , sizeof ( name ) , " gpu-ring-%d " , ring - > id ) ;
2021-07-26 07:43:57 -07:00
ring - > fctx = msm_fence_context_alloc ( gpu - > dev , & ring - > memptrs - > fence , name ) ;
2013-07-19 12:59:32 -04:00
return ring ;
fail :
2017-10-20 11:06:57 -06:00
msm_ringbuffer_destroy ( ring ) ;
2013-07-19 12:59:32 -04:00
return ERR_PTR ( ret ) ;
}
void msm_ringbuffer_destroy ( struct msm_ringbuffer * ring )
{
2017-10-20 11:06:57 -06:00
if ( IS_ERR_OR_NULL ( ring ) )
return ;
2021-07-27 18:06:14 -07:00
drm_sched_fini ( & ring - > sched ) ;
2017-10-20 11:06:57 -06:00
msm_fence_context_free ( ring - > fctx ) ;
2021-07-27 18:06:08 -07:00
msm_gem_kernel_put ( ring - > bo , ring - > gpu - > aspace ) ;
2018-11-07 15:35:46 -07:00
2013-07-19 12:59:32 -04:00
kfree ( ring ) ;
}