2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2015-12-03 18:21:29 +01:00
/*
* Copyright ( C ) 2015 Etnaviv Project
*/
2019-06-30 07:21:03 +02:00
# include <drm/drm_file.h>
2017-03-02 16:05:45 +01:00
# include <linux/dma-fence-array.h>
2019-06-30 07:21:03 +02:00
# include <linux/file.h>
2019-07-04 12:43:37 +02:00
# include <linux/pm_runtime.h>
2019-08-11 10:06:32 +02:00
# include <linux/dma-resv.h>
2017-03-02 16:05:45 +01:00
# include <linux/sync_file.h>
2019-06-30 07:21:03 +02:00
# include <linux/uaccess.h>
# include <linux/vmalloc.h>
2017-01-16 16:09:51 +01:00
# include "etnaviv_cmdbuf.h"
2015-12-03 18:21:29 +01:00
# include "etnaviv_drv.h"
# include "etnaviv_gpu.h"
# include "etnaviv_gem.h"
2017-09-24 15:15:26 +02:00
# include "etnaviv_perfmon.h"
2017-12-04 18:41:58 +01:00
# include "etnaviv_sched.h"
2015-12-03 18:21:29 +01:00
/*
* Cmdstream submission :
*/
# define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
# define BO_LOCKED 0x4000
# define BO_PINNED 0x2000
static struct etnaviv_gem_submit * submit_create ( struct drm_device * dev ,
2017-11-24 12:02:38 +01:00
struct etnaviv_gpu * gpu , size_t nr_bos , size_t nr_pmrs )
2015-12-03 18:21:29 +01:00
{
struct etnaviv_gem_submit * submit ;
2017-11-24 12:02:38 +01:00
size_t sz = size_vstruct ( nr_bos , sizeof ( submit - > bos [ 0 ] ) , sizeof ( * submit ) ) ;
2015-12-03 18:21:29 +01:00
2017-11-24 10:43:07 +01:00
submit = kzalloc ( sz , GFP_KERNEL ) ;
if ( ! submit )
return NULL ;
2015-12-03 18:21:29 +01:00
2017-11-24 12:02:38 +01:00
submit - > pmrs = kcalloc ( nr_pmrs , sizeof ( struct etnaviv_perfmon_request ) ,
GFP_KERNEL ) ;
if ( ! submit - > pmrs ) {
kfree ( submit ) ;
return NULL ;
}
submit - > nr_pmrs = nr_pmrs ;
2017-11-24 10:43:07 +01:00
submit - > gpu = gpu ;
2017-11-24 11:36:03 +01:00
kref_init ( & submit - > refcount ) ;
2015-12-03 18:21:29 +01:00
return submit ;
}
static int submit_lookup_objects ( struct etnaviv_gem_submit * submit ,
struct drm_file * file , struct drm_etnaviv_gem_submit_bo * submit_bos ,
unsigned nr_bos )
{
struct drm_etnaviv_gem_submit_bo * bo ;
unsigned i ;
int ret = 0 ;
spin_lock ( & file - > table_lock ) ;
for ( i = 0 , bo = submit_bos ; i < nr_bos ; i + + , bo + + ) {
struct drm_gem_object * obj ;
if ( bo - > flags & BO_INVALID_FLAGS ) {
DRM_ERROR ( " invalid flags: %x \n " , bo - > flags ) ;
ret = - EINVAL ;
goto out_unlock ;
}
submit - > bos [ i ] . flags = bo - > flags ;
2019-08-02 14:27:33 +02:00
if ( submit - > flags & ETNA_SUBMIT_SOFTPIN ) {
if ( bo - > presumed < ETNAVIV_SOFTPIN_START_ADDRESS ) {
DRM_ERROR ( " invalid softpin address \n " ) ;
ret = - EINVAL ;
goto out_unlock ;
}
submit - > bos [ i ] . va = bo - > presumed ;
}
2015-12-03 18:21:29 +01:00
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly :
*/
obj = idr_find ( & file - > object_idr , bo - > handle ) ;
if ( ! obj ) {
DRM_ERROR ( " invalid handle %u at index %u \n " ,
bo - > handle , i ) ;
ret = - EINVAL ;
goto out_unlock ;
}
/*
* Take a refcount on the object . The file table lock
* prevents the object_idr ' s refcount on this being dropped .
*/
2017-08-03 14:58:23 +03:00
drm_gem_object_get ( obj ) ;
2015-12-03 18:21:29 +01:00
submit - > bos [ i ] . obj = to_etnaviv_bo ( obj ) ;
}
out_unlock :
submit - > nr_bos = i ;
spin_unlock ( & file - > table_lock ) ;
return ret ;
}
static void submit_unlock_object ( struct etnaviv_gem_submit * submit , int i )
{
if ( submit - > bos [ i ] . flags & BO_LOCKED ) {
2019-02-02 09:41:55 -06:00
struct drm_gem_object * obj = & submit - > bos [ i ] . obj - > base ;
2015-12-03 18:21:29 +01:00
2019-12-14 01:09:27 +01:00
dma_resv_unlock ( obj - > resv ) ;
2015-12-03 18:21:29 +01:00
submit - > bos [ i ] . flags & = ~ BO_LOCKED ;
}
}
2017-11-24 14:34:46 +01:00
static int submit_lock_objects ( struct etnaviv_gem_submit * submit ,
struct ww_acquire_ctx * ticket )
2015-12-03 18:21:29 +01:00
{
int contended , slow_locked = - 1 , i , ret = 0 ;
retry :
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
2019-02-02 09:41:55 -06:00
struct drm_gem_object * obj = & submit - > bos [ i ] . obj - > base ;
2015-12-03 18:21:29 +01:00
if ( slow_locked = = i )
slow_locked = - 1 ;
contended = i ;
if ( ! ( submit - > bos [ i ] . flags & BO_LOCKED ) ) {
2019-12-14 01:09:27 +01:00
ret = dma_resv_lock_interruptible ( obj - > resv , ticket ) ;
2015-12-03 18:21:29 +01:00
if ( ret = = - EALREADY )
DRM_ERROR ( " BO at index %u already on submit list \n " ,
i ) ;
if ( ret )
goto fail ;
submit - > bos [ i ] . flags | = BO_LOCKED ;
}
}
2017-11-24 14:34:46 +01:00
ww_acquire_done ( ticket ) ;
2015-12-03 18:21:29 +01:00
return 0 ;
fail :
for ( ; i > = 0 ; i - - )
submit_unlock_object ( submit , i ) ;
if ( slow_locked > 0 )
submit_unlock_object ( submit , slow_locked ) ;
if ( ret = = - EDEADLK ) {
2019-02-02 09:41:55 -06:00
struct drm_gem_object * obj ;
2015-12-03 18:21:29 +01:00
2019-02-02 09:41:55 -06:00
obj = & submit - > bos [ contended ] . obj - > base ;
2015-12-03 18:21:29 +01:00
/* we lost out in a seqno race, lock and retry.. */
2019-12-14 01:09:27 +01:00
ret = dma_resv_lock_slow_interruptible ( obj - > resv , ticket ) ;
2015-12-03 18:21:29 +01:00
if ( ! ret ) {
submit - > bos [ contended ] . flags | = BO_LOCKED ;
slow_locked = contended ;
goto retry ;
}
}
return ret ;
}
2017-12-04 19:24:06 +01:00
static int submit_fence_sync ( struct etnaviv_gem_submit * submit )
2015-12-03 18:21:29 +01:00
{
int i , ret = 0 ;
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
2017-12-04 19:24:06 +01:00
struct etnaviv_gem_submit_bo * bo = & submit - > bos [ i ] ;
2019-08-11 10:06:32 +02:00
struct dma_resv * robj = bo - > obj - > base . resv ;
2015-12-03 18:21:29 +01:00
2017-12-04 19:24:06 +01:00
if ( ! ( bo - > flags & ETNA_SUBMIT_BO_WRITE ) ) {
2019-08-11 10:06:32 +02:00
ret = dma_resv_reserve_shared ( robj , 1 ) ;
2017-12-04 19:24:06 +01:00
if ( ret )
return ret ;
}
if ( submit - > flags & ETNA_SUBMIT_NO_IMPLICIT )
continue ;
if ( bo - > flags & ETNA_SUBMIT_BO_WRITE ) {
2021-06-02 13:01:15 +02:00
ret = dma_resv_get_fences ( robj , & bo - > excl ,
& bo - > nr_shared ,
& bo - > shared ) ;
2017-12-04 19:24:06 +01:00
if ( ret )
return ret ;
} else {
2021-06-02 12:44:32 +02:00
bo - > excl = dma_resv_get_excl_unlocked ( robj ) ;
2017-12-04 19:24:06 +01:00
}
2015-12-03 18:21:29 +01:00
2017-11-23 18:02:43 +01:00
}
2015-12-03 18:21:29 +01:00
return ret ;
}
2017-11-23 17:49:59 +01:00
static void submit_attach_object_fences ( struct etnaviv_gem_submit * submit )
{
int i ;
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
2019-02-02 09:41:55 -06:00
struct drm_gem_object * obj = & submit - > bos [ i ] . obj - > base ;
2017-11-23 17:49:59 +01:00
if ( submit - > bos [ i ] . flags & ETNA_SUBMIT_BO_WRITE )
2019-08-11 10:06:32 +02:00
dma_resv_add_excl_fence ( obj - > resv ,
2017-11-23 17:57:12 +01:00
submit - > out_fence ) ;
2017-11-23 17:49:59 +01:00
else
2019-08-11 10:06:32 +02:00
dma_resv_add_shared_fence ( obj - > resv ,
2017-11-23 17:57:12 +01:00
submit - > out_fence ) ;
2017-11-23 17:49:59 +01:00
submit_unlock_object ( submit , i ) ;
}
}
2015-12-03 18:21:29 +01:00
static int submit_pin_objects ( struct etnaviv_gem_submit * submit )
{
int i , ret = 0 ;
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
struct etnaviv_gem_object * etnaviv_obj = submit - > bos [ i ] . obj ;
2016-01-21 15:20:50 +00:00
struct etnaviv_vram_mapping * mapping ;
2015-12-03 18:21:29 +01:00
2016-01-21 15:20:50 +00:00
mapping = etnaviv_gem_mapping_get ( & etnaviv_obj - > base ,
2019-08-02 14:27:33 +02:00
submit - > mmu_context ,
submit - > bos [ i ] . va ) ;
2016-01-21 15:20:50 +00:00
if ( IS_ERR ( mapping ) ) {
ret = PTR_ERR ( mapping ) ;
2015-12-03 18:21:29 +01:00
break ;
2016-01-21 15:20:50 +00:00
}
2019-08-02 14:27:33 +02:00
if ( ( submit - > flags & ETNA_SUBMIT_SOFTPIN ) & &
2020-05-18 14:29:55 +03:00
submit - > bos [ i ] . va ! = mapping - > iova ) {
etnaviv_gem_mapping_unreference ( mapping ) ;
2019-08-02 14:27:33 +02:00
return - EINVAL ;
2020-05-18 14:29:55 +03:00
}
2019-08-02 14:27:33 +02:00
2017-11-27 17:46:15 +01:00
atomic_inc ( & etnaviv_obj - > gpu_active ) ;
2015-12-03 18:21:29 +01:00
submit - > bos [ i ] . flags | = BO_PINNED ;
2016-01-21 15:20:50 +00:00
submit - > bos [ i ] . mapping = mapping ;
2015-12-03 18:21:29 +01:00
}
return ret ;
}
static int submit_bo ( struct etnaviv_gem_submit * submit , u32 idx ,
2016-01-21 15:20:55 +00:00
struct etnaviv_gem_submit_bo * * bo )
2015-12-03 18:21:29 +01:00
{
if ( idx > = submit - > nr_bos ) {
DRM_ERROR ( " invalid buffer index: %u (out of %u) \n " ,
idx , submit - > nr_bos ) ;
return - EINVAL ;
}
2016-01-21 15:20:55 +00:00
* bo = & submit - > bos [ idx ] ;
2015-12-03 18:21:29 +01:00
return 0 ;
}
/* process the reloc's and patch up the cmdstream as needed: */
static int submit_reloc ( struct etnaviv_gem_submit * submit , void * stream ,
u32 size , const struct drm_etnaviv_gem_submit_reloc * relocs ,
u32 nr_relocs )
{
u32 i , last_offset = 0 ;
u32 * ptr = stream ;
int ret ;
2019-08-02 14:27:33 +02:00
/* Submits using softpin don't blend with relocs */
if ( ( submit - > flags & ETNA_SUBMIT_SOFTPIN ) & & nr_relocs ! = 0 )
return - EINVAL ;
2015-12-03 18:21:29 +01:00
for ( i = 0 ; i < nr_relocs ; i + + ) {
const struct drm_etnaviv_gem_submit_reloc * r = relocs + i ;
2016-01-21 15:20:55 +00:00
struct etnaviv_gem_submit_bo * bo ;
u32 off ;
2015-12-03 18:21:29 +01:00
if ( unlikely ( r - > flags ) ) {
DRM_ERROR ( " invalid reloc flags \n " ) ;
return - EINVAL ;
}
if ( r - > submit_offset % 4 ) {
DRM_ERROR ( " non-aligned reloc offset: %u \n " ,
r - > submit_offset ) ;
return - EINVAL ;
}
/* offset in dwords: */
off = r - > submit_offset / 4 ;
if ( ( off > = size ) | |
( off < last_offset ) ) {
DRM_ERROR ( " invalid offset %u at reloc %u \n " , off , i ) ;
return - EINVAL ;
}
2016-01-21 15:20:55 +00:00
ret = submit_bo ( submit , r - > reloc_idx , & bo ) ;
2015-12-03 18:21:29 +01:00
if ( ret )
return ret ;
2017-07-25 14:33:36 +02:00
if ( r - > reloc_offset > bo - > obj - > base . size - sizeof ( * ptr ) ) {
DRM_ERROR ( " relocation %u outside object \n " , i ) ;
2015-12-03 18:21:29 +01:00
return - EINVAL ;
}
2016-01-21 15:20:55 +00:00
ptr [ off ] = bo - > mapping - > iova + r - > reloc_offset ;
2015-12-03 18:21:29 +01:00
last_offset = off ;
}
return 0 ;
}
2017-09-24 15:15:26 +02:00
static int submit_perfmon_validate ( struct etnaviv_gem_submit * submit ,
2017-11-24 12:02:38 +01:00
u32 exec_state , const struct drm_etnaviv_gem_submit_pmr * pmrs )
2017-09-24 15:15:26 +02:00
{
u32 i ;
2017-11-24 12:02:38 +01:00
for ( i = 0 ; i < submit - > nr_pmrs ; i + + ) {
2017-09-24 15:15:26 +02:00
const struct drm_etnaviv_gem_submit_pmr * r = pmrs + i ;
struct etnaviv_gem_submit_bo * bo ;
int ret ;
ret = submit_bo ( submit , r - > read_idx , & bo ) ;
if ( ret )
return ret ;
/* at offset 0 a sequence number gets stored used for userspace sync */
if ( r - > read_offset = = 0 ) {
DRM_ERROR ( " perfmon request: offset is 0 " ) ;
return - EINVAL ;
}
if ( r - > read_offset > = bo - > obj - > base . size - sizeof ( u32 ) ) {
DRM_ERROR ( " perfmon request: offset %u outside object " , i ) ;
return - EINVAL ;
}
if ( r - > flags & ~ ( ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST ) ) {
DRM_ERROR ( " perfmon request: flags are not valid " ) ;
return - EINVAL ;
}
2017-11-24 12:02:38 +01:00
if ( etnaviv_pm_req_validate ( r , exec_state ) ) {
2017-09-24 15:15:26 +02:00
DRM_ERROR ( " perfmon request: domain or signal not valid " ) ;
return - EINVAL ;
}
2017-11-24 12:02:38 +01:00
submit - > pmrs [ i ] . flags = r - > flags ;
submit - > pmrs [ i ] . domain = r - > domain ;
submit - > pmrs [ i ] . signal = r - > signal ;
submit - > pmrs [ i ] . sequence = r - > sequence ;
submit - > pmrs [ i ] . offset = r - > read_offset ;
submit - > pmrs [ i ] . bo_vma = etnaviv_gem_vmap ( & bo - > obj - > base ) ;
2017-09-24 15:15:26 +02:00
}
return 0 ;
}
2017-11-24 11:36:03 +01:00
static void submit_cleanup ( struct kref * kref )
2015-12-03 18:21:29 +01:00
{
2017-11-24 11:36:03 +01:00
struct etnaviv_gem_submit * submit =
container_of ( kref , struct etnaviv_gem_submit , refcount ) ;
2015-12-03 18:21:29 +01:00
unsigned i ;
2017-11-24 17:56:29 +01:00
if ( submit - > runtime_resumed )
pm_runtime_put_autosuspend ( submit - > gpu - > dev ) ;
2017-11-24 16:56:37 +01:00
if ( submit - > cmdbuf . suballoc )
etnaviv_cmdbuf_free ( & submit - > cmdbuf ) ;
2019-07-05 19:17:27 +02:00
if ( submit - > mmu_context )
etnaviv_iommu_context_put ( submit - > mmu_context ) ;
if ( submit - > prev_mmu_context )
etnaviv_iommu_context_put ( submit - > prev_mmu_context ) ;
2015-12-03 18:21:29 +01:00
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
struct etnaviv_gem_object * etnaviv_obj = submit - > bos [ i ] . obj ;
2017-11-23 18:13:40 +01:00
/* unpin all objects */
if ( submit - > bos [ i ] . flags & BO_PINNED ) {
etnaviv_gem_mapping_unreference ( submit - > bos [ i ] . mapping ) ;
2017-11-27 17:46:15 +01:00
atomic_dec ( & etnaviv_obj - > gpu_active ) ;
2017-11-23 18:13:40 +01:00
submit - > bos [ i ] . mapping = NULL ;
submit - > bos [ i ] . flags & = ~ BO_PINNED ;
}
2017-11-23 17:49:59 +01:00
/* if the GPU submit failed, objects might still be locked */
2015-12-03 18:21:29 +01:00
submit_unlock_object ( submit , i ) ;
2020-05-15 10:50:57 +01:00
drm_gem_object_put ( & etnaviv_obj - > base ) ;
2015-12-03 18:21:29 +01:00
}
2017-11-27 17:46:15 +01:00
wake_up_all ( & submit - > gpu - > fence_event ) ;
2017-11-23 18:02:43 +01:00
if ( submit - > in_fence )
dma_fence_put ( submit - > in_fence ) ;
2017-12-04 18:41:58 +01:00
if ( submit - > out_fence ) {
/* first remove from IDR, so fence can not be found anymore */
2018-05-25 16:51:25 +02:00
mutex_lock ( & submit - > gpu - > fence_lock ) ;
2017-12-04 18:41:58 +01:00
idr_remove ( & submit - > gpu - > fence_idr , submit - > out_fence_id ) ;
2018-05-25 16:51:25 +02:00
mutex_unlock ( & submit - > gpu - > fence_lock ) ;
2017-11-23 17:57:12 +01:00
dma_fence_put ( submit - > out_fence ) ;
2017-12-04 18:41:58 +01:00
}
2017-11-24 12:02:38 +01:00
kfree ( submit - > pmrs ) ;
2015-12-03 18:21:29 +01:00
kfree ( submit ) ;
}
2017-11-24 11:36:03 +01:00
void etnaviv_submit_put ( struct etnaviv_gem_submit * submit )
{
kref_put ( & submit - > refcount , submit_cleanup ) ;
}
2015-12-03 18:21:29 +01:00
int etnaviv_ioctl_gem_submit ( struct drm_device * dev , void * data ,
struct drm_file * file )
{
2017-12-04 18:41:58 +01:00
struct etnaviv_file_private * ctx = file - > driver_priv ;
2015-12-03 18:21:29 +01:00
struct etnaviv_drm_private * priv = dev - > dev_private ;
struct drm_etnaviv_gem_submit * args = data ;
struct drm_etnaviv_gem_submit_reloc * relocs ;
2017-09-24 15:15:26 +02:00
struct drm_etnaviv_gem_submit_pmr * pmrs ;
2015-12-03 18:21:29 +01:00
struct drm_etnaviv_gem_submit_bo * bos ;
struct etnaviv_gem_submit * submit ;
struct etnaviv_gpu * gpu ;
2017-03-02 16:14:43 +01:00
struct sync_file * sync_file = NULL ;
2017-11-24 14:34:46 +01:00
struct ww_acquire_ctx ticket ;
2017-03-02 16:14:43 +01:00
int out_fence_fd = - 1 ;
2015-12-03 18:21:29 +01:00
void * stream ;
int ret ;
if ( args - > pipe > = ETNA_MAX_PIPES )
return - EINVAL ;
gpu = priv - > gpu [ args - > pipe ] ;
if ( ! gpu )
return - ENXIO ;
if ( args - > stream_size % 4 ) {
DRM_ERROR ( " non-aligned cmdstream buffer size: %u \n " ,
args - > stream_size ) ;
return - EINVAL ;
}
if ( args - > exec_state ! = ETNA_PIPE_3D & &
args - > exec_state ! = ETNA_PIPE_2D & &
args - > exec_state ! = ETNA_PIPE_VG ) {
DRM_ERROR ( " invalid exec_state: 0x%x \n " , args - > exec_state ) ;
return - EINVAL ;
}
2017-03-02 16:05:45 +01:00
if ( args - > flags & ~ ETNA_SUBMIT_FLAGS ) {
DRM_ERROR ( " invalid flags: 0x%x \n " , args - > flags ) ;
return - EINVAL ;
}
2019-08-02 14:27:33 +02:00
if ( ( args - > flags & ETNA_SUBMIT_SOFTPIN ) & &
priv - > mmu_global - > version ! = ETNAVIV_IOMMU_V2 ) {
DRM_ERROR ( " softpin requested on incompatible MMU \n " ) ;
return - EINVAL ;
}
2015-12-03 18:21:29 +01:00
/*
* Copy the command submission and bo array to kernel space in
* one go , and do this outside of any locks .
*/
2017-05-17 14:23:12 +02:00
bos = kvmalloc_array ( args - > nr_bos , sizeof ( * bos ) , GFP_KERNEL ) ;
relocs = kvmalloc_array ( args - > nr_relocs , sizeof ( * relocs ) , GFP_KERNEL ) ;
2017-09-24 15:15:26 +02:00
pmrs = kvmalloc_array ( args - > nr_pmrs , sizeof ( * pmrs ) , GFP_KERNEL ) ;
2017-05-17 14:23:12 +02:00
stream = kvmalloc_array ( 1 , args - > stream_size , GFP_KERNEL ) ;
2017-11-24 16:56:37 +01:00
if ( ! bos | | ! relocs | | ! pmrs | | ! stream ) {
2015-12-03 18:21:29 +01:00
ret = - ENOMEM ;
goto err_submit_cmds ;
}
2016-04-26 12:32:27 -03:00
ret = copy_from_user ( bos , u64_to_user_ptr ( args - > bos ) ,
2015-12-03 18:21:29 +01:00
args - > nr_bos * sizeof ( * bos ) ) ;
if ( ret ) {
ret = - EFAULT ;
goto err_submit_cmds ;
}
2016-04-26 12:32:27 -03:00
ret = copy_from_user ( relocs , u64_to_user_ptr ( args - > relocs ) ,
2015-12-03 18:21:29 +01:00
args - > nr_relocs * sizeof ( * relocs ) ) ;
if ( ret ) {
ret = - EFAULT ;
goto err_submit_cmds ;
}
2017-09-24 15:15:26 +02:00
ret = copy_from_user ( pmrs , u64_to_user_ptr ( args - > pmrs ) ,
args - > nr_pmrs * sizeof ( * pmrs ) ) ;
if ( ret ) {
ret = - EFAULT ;
goto err_submit_cmds ;
}
2016-04-26 12:32:27 -03:00
ret = copy_from_user ( stream , u64_to_user_ptr ( args - > stream ) ,
2015-12-03 18:21:29 +01:00
args - > stream_size ) ;
if ( ret ) {
ret = - EFAULT ;
goto err_submit_cmds ;
}
2017-03-02 16:14:43 +01:00
if ( args - > flags & ETNA_SUBMIT_FENCE_FD_OUT ) {
out_fence_fd = get_unused_fd_flags ( O_CLOEXEC ) ;
if ( out_fence_fd < 0 ) {
ret = out_fence_fd ;
goto err_submit_cmds ;
}
}
2017-11-24 14:34:46 +01:00
ww_acquire_init ( & ticket , & reservation_ww_class ) ;
2017-11-24 12:02:38 +01:00
submit = submit_create ( dev , gpu , args - > nr_bos , args - > nr_pmrs ) ;
2015-12-03 18:21:29 +01:00
if ( ! submit ) {
ret = - ENOMEM ;
2017-11-24 14:34:46 +01:00
goto err_submit_ww_acquire ;
2015-12-03 18:21:29 +01:00
}
2019-07-05 19:17:22 +02:00
ret = etnaviv_cmdbuf_init ( priv - > cmdbuf_suballoc , & submit - > cmdbuf ,
2017-11-24 16:56:37 +01:00
ALIGN ( args - > stream_size , 8 ) + 8 ) ;
if ( ret )
goto err_submit_objects ;
2018-11-23 16:26:04 +01:00
submit - > ctx = file - > driver_priv ;
2019-07-05 19:17:27 +02:00
etnaviv_iommu_context_get ( submit - > ctx - > mmu ) ;
submit - > mmu_context = submit - > ctx - > mmu ;
2017-11-24 15:16:58 +01:00
submit - > exec_state = args - > exec_state ;
2017-03-02 16:05:45 +01:00
submit - > flags = args - > flags ;
2015-12-03 18:21:29 +01:00
ret = submit_lookup_objects ( submit , file , bos , args - > nr_bos ) ;
if ( ret )
goto err_submit_objects ;
2019-08-02 14:24:06 +02:00
if ( ( priv - > mmu_global - > version ! = ETNAVIV_IOMMU_V2 ) & &
! etnaviv_cmd_validate_one ( gpu , stream , args - > stream_size / 4 ,
2015-12-03 18:21:29 +01:00
relocs , args - > nr_relocs ) ) {
ret = - EINVAL ;
goto err_submit_objects ;
}
2017-03-02 16:05:45 +01:00
if ( args - > flags & ETNA_SUBMIT_FENCE_FD_IN ) {
2017-11-23 18:02:43 +01:00
submit - > in_fence = sync_file_get_fence ( args - > fence_fd ) ;
if ( ! submit - > in_fence ) {
2017-03-02 16:05:45 +01:00
ret = - EINVAL ;
goto err_submit_objects ;
}
}
2015-12-03 18:21:29 +01:00
ret = submit_pin_objects ( submit ) ;
if ( ret )
2017-11-23 18:13:40 +01:00
goto err_submit_objects ;
2015-12-03 18:21:29 +01:00
ret = submit_reloc ( submit , stream , args - > stream_size / 4 ,
relocs , args - > nr_relocs ) ;
if ( ret )
2017-11-23 18:13:40 +01:00
goto err_submit_objects ;
2015-12-03 18:21:29 +01:00
2017-11-24 12:02:38 +01:00
ret = submit_perfmon_validate ( submit , args - > exec_state , pmrs ) ;
2017-09-24 15:15:26 +02:00
if ( ret )
2017-11-23 18:13:40 +01:00
goto err_submit_objects ;
2017-09-24 15:15:26 +02:00
2017-11-24 16:56:37 +01:00
memcpy ( submit - > cmdbuf . vaddr , stream , args - > stream_size ) ;
2015-12-03 18:21:29 +01:00
2017-12-05 10:55:02 +01:00
ret = submit_lock_objects ( submit , & ticket ) ;
if ( ret )
goto err_submit_objects ;
ret = submit_fence_sync ( submit ) ;
if ( ret )
goto err_submit_objects ;
2017-12-04 18:41:58 +01:00
ret = etnaviv_sched_push_job ( & ctx - > sched_entity [ args - > pipe ] , submit ) ;
2017-09-08 16:24:32 +02:00
if ( ret )
2017-11-23 18:13:40 +01:00
goto err_submit_objects ;
2017-09-08 16:24:32 +02:00
2017-11-23 17:49:59 +01:00
submit_attach_object_fences ( submit ) ;
2017-03-02 16:14:43 +01:00
if ( args - > flags & ETNA_SUBMIT_FENCE_FD_OUT ) {
/*
* This can be improved : ideally we want to allocate the sync
* file before kicking off the GPU job and just attach the
* fence to the sync file here , eliminating the ENOMEM
* possibility at this stage .
*/
2017-11-23 17:57:12 +01:00
sync_file = sync_file_create ( submit - > out_fence ) ;
2017-03-02 16:14:43 +01:00
if ( ! sync_file ) {
ret = - ENOMEM ;
2017-11-23 18:13:40 +01:00
goto err_submit_objects ;
2017-03-02 16:14:43 +01:00
}
fd_install ( out_fence_fd , sync_file - > file ) ;
}
args - > fence_fd = out_fence_fd ;
2017-11-29 14:49:04 +01:00
args - > fence = submit - > out_fence_id ;
2015-12-03 18:21:29 +01:00
err_submit_objects :
2017-11-24 11:36:03 +01:00
etnaviv_submit_put ( submit ) ;
2015-12-03 18:21:29 +01:00
2017-11-24 14:34:46 +01:00
err_submit_ww_acquire :
ww_acquire_fini ( & ticket ) ;
2015-12-03 18:21:29 +01:00
err_submit_cmds :
2017-03-02 16:14:43 +01:00
if ( ret & & ( out_fence_fd > = 0 ) )
put_unused_fd ( out_fence_fd ) ;
2021-03-23 10:46:12 +08:00
kvfree ( stream ) ;
kvfree ( bos ) ;
kvfree ( relocs ) ;
kvfree ( pmrs ) ;
2015-12-03 18:21:29 +01:00
return ret ;
}