2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2013-07-19 12:59:32 -04:00
/*
* Copyright ( C ) 2013 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*/
2019-08-04 08:55:51 +02:00
# include <linux/file.h>
2016-06-16 16:08:19 -04:00
# include <linux/sync_file.h>
2019-08-04 08:55:51 +02:00
# include <linux/uaccess.h>
2020-01-24 00:57:10 +01:00
# include <drm/drm_drv.h>
2019-08-04 08:55:51 +02:00
# include <drm/drm_file.h>
2020-01-24 00:57:10 +01:00
# include <drm/drm_syncobj.h>
2016-06-16 16:08:19 -04:00
2013-07-19 12:59:32 -04:00
# include "msm_drv.h"
# include "msm_gpu.h"
# include "msm_gem.h"
2018-11-02 09:25:21 -06:00
# include "msm_gpu_trace.h"
2013-07-19 12:59:32 -04:00
/*
* Cmdstream submission :
*/
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
2016-03-14 13:56:37 -04:00
# define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
2021-07-27 18:06:11 -07:00
# define BO_LOCKED 0x4000 /* obj lock is held */
2021-07-27 18:06:18 -07:00
# define BO_ACTIVE 0x2000 /* active refcnt is held */
# define BO_PINNED 0x1000 /* obj is pinned and on active list */
2013-07-19 12:59:32 -04:00
static struct msm_gem_submit * submit_create ( struct drm_device * dev ,
2020-08-17 15:01:36 -07:00
struct msm_gpu * gpu ,
2019-05-07 12:02:07 -06:00
struct msm_gpu_submitqueue * queue , uint32_t nr_bos ,
uint32_t nr_cmds )
2013-07-19 12:59:32 -04:00
{
struct msm_gem_submit * submit ;
2021-07-27 18:06:13 -07:00
uint64_t sz ;
2021-07-27 18:06:14 -07:00
int ret ;
2021-07-27 18:06:13 -07:00
sz = struct_size ( submit , bos , nr_bos ) +
( ( u64 ) nr_cmds * sizeof ( submit - > cmd [ 0 ] ) ) ;
2017-06-19 15:36:53 -06:00
if ( sz > SIZE_MAX )
2021-07-27 18:06:13 -07:00
return ERR_PTR ( - ENOMEM ) ;
2013-07-19 12:59:32 -04:00
2021-07-27 18:06:13 -07:00
submit = kzalloc ( sz , GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY ) ;
2016-05-03 09:49:40 -04:00
if ( ! submit )
2021-07-27 18:06:13 -07:00
return ERR_PTR ( - ENOMEM ) ;
2013-07-19 12:59:32 -04:00
2021-10-01 09:42:05 -07:00
ret = drm_sched_job_init ( & submit - > base , queue - > entity , queue ) ;
2021-07-27 18:06:14 -07:00
if ( ret ) {
kfree ( submit ) ;
return ERR_PTR ( ret ) ;
}
2020-10-23 09:51:17 -07:00
kref_init ( & submit - > ref ) ;
2016-05-03 09:49:40 -04:00
submit - > dev = dev ;
2020-08-17 15:01:36 -07:00
submit - > aspace = queue - > ctx - > aspace ;
2016-05-03 09:49:40 -04:00
submit - > gpu = gpu ;
2016-06-01 14:17:40 -04:00
submit - > cmd = ( void * ) & submit - > bos [ nr_bos ] ;
2017-10-20 11:06:55 -06:00
submit - > queue = queue ;
2021-07-27 18:06:17 -07:00
submit - > ring = gpu - > rb [ queue - > ring_nr ] ;
2021-06-10 14:44:13 -07:00
submit - > fault_dumped = false ;
2013-07-19 12:59:32 -04:00
2016-05-24 18:43:26 -04:00
INIT_LIST_HEAD ( & submit - > node ) ;
2013-07-19 12:59:32 -04:00
return submit ;
}
2020-10-23 09:51:17 -07:00
void __msm_gem_submit_destroy ( struct kref * kref )
2016-05-03 09:50:26 -04:00
{
2020-10-23 09:51:17 -07:00
struct msm_gem_submit * submit =
container_of ( kref , struct msm_gem_submit , ref ) ;
2020-10-23 09:51:08 -07:00
unsigned i ;
2021-07-27 18:06:12 -07:00
if ( submit - > fence_id ) {
mutex_lock ( & submit - > queue - > lock ) ;
idr_remove ( & submit - > queue - > fence_idr , submit - > fence_id ) ;
mutex_unlock ( & submit - > queue - > lock ) ;
}
2021-07-27 18:06:14 -07:00
dma_fence_put ( submit - > user_fence ) ;
dma_fence_put ( submit - > hw_fence ) ;
2021-07-27 18:06:12 -07:00
2016-05-03 10:10:15 -04:00
put_pid ( submit - > pid ) ;
2017-10-20 11:06:55 -06:00
msm_submitqueue_put ( submit - > queue ) ;
2020-10-23 09:51:08 -07:00
for ( i = 0 ; i < submit - > nr_cmds ; i + + )
kfree ( submit - > cmd [ i ] . relocs ) ;
2016-05-03 09:50:26 -04:00
kfree ( submit ) ;
}
2013-07-19 12:59:32 -04:00
static int submit_lookup_objects ( struct msm_gem_submit * submit ,
struct drm_msm_gem_submit * args , struct drm_file * file )
{
unsigned i ;
int ret = 0 ;
for ( i = 0 ; i < args - > nr_bos ; i + + ) {
struct drm_msm_gem_submit_bo submit_bo ;
void __user * userptr =
2016-04-26 12:32:27 -03:00
u64_to_user_ptr ( args - > bos + ( i * sizeof ( submit_bo ) ) ) ;
2013-07-19 12:59:32 -04:00
2016-06-01 14:02:51 -04:00
/* make sure we don't have garbage flags, in case we hit
* error path before flags is initialized :
*/
submit - > bos [ i ] . flags = 0 ;
2019-03-20 10:09:10 -07:00
if ( copy_from_user ( & submit_bo , userptr , sizeof ( submit_bo ) ) ) {
ret = - EFAULT ;
i = 0 ;
goto out ;
2013-07-19 12:59:32 -04:00
}
2018-10-23 14:42:37 -04:00
/* at least one of READ and/or WRITE flags should be set: */
# define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
2016-12-20 08:54:31 -07:00
if ( ( submit_bo . flags & ~ MSM_SUBMIT_BO_FLAGS ) | |
2018-10-23 14:42:37 -04:00
! ( submit_bo . flags & MANDATORY_FLAGS ) ) {
2013-09-06 15:36:40 -04:00
DRM_ERROR ( " invalid flags: %x \n " , submit_bo . flags ) ;
2013-07-19 12:59:32 -04:00
ret = - EINVAL ;
2019-03-20 10:09:10 -07:00
i = 0 ;
goto out ;
2013-07-19 12:59:32 -04:00
}
2019-03-20 10:09:10 -07:00
submit - > bos [ i ] . handle = submit_bo . handle ;
2013-07-19 12:59:32 -04:00
submit - > bos [ i ] . flags = submit_bo . flags ;
/* in validate_objects() we figure out if this is true: */
submit - > bos [ i ] . iova = submit_bo . presumed ;
2019-03-20 10:09:10 -07:00
}
spin_lock ( & file - > table_lock ) ;
for ( i = 0 ; i < args - > nr_bos ; i + + ) {
struct drm_gem_object * obj ;
2013-07-19 12:59:32 -04:00
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly :
*/
2019-03-20 10:09:10 -07:00
obj = idr_find ( & file - > object_idr , submit - > bos [ i ] . handle ) ;
2013-07-19 12:59:32 -04:00
if ( ! obj ) {
2019-03-20 10:09:10 -07:00
DRM_ERROR ( " invalid handle %u at index %u \n " , submit - > bos [ i ] . handle , i ) ;
2013-07-19 12:59:32 -04:00
ret = - EINVAL ;
goto out_unlock ;
}
2018-09-26 13:48:58 +02:00
drm_gem_object_get ( obj ) ;
2013-07-19 12:59:32 -04:00
2021-07-27 18:06:15 -07:00
submit - > bos [ i ] . obj = to_msm_bo ( obj ) ;
2013-07-19 12:59:32 -04:00
}
out_unlock :
spin_unlock ( & file - > table_lock ) ;
2016-08-22 15:15:23 -04:00
out :
submit - > nr_bos = i ;
2013-07-19 12:59:32 -04:00
return ret ;
}
2020-10-23 09:51:08 -07:00
static int submit_lookup_cmds ( struct msm_gem_submit * submit ,
struct drm_msm_gem_submit * args , struct drm_file * file )
{
2021-09-27 13:36:23 +02:00
unsigned i ;
size_t sz ;
2020-10-23 09:51:08 -07:00
int ret = 0 ;
for ( i = 0 ; i < args - > nr_cmds ; i + + ) {
struct drm_msm_gem_submit_cmd submit_cmd ;
void __user * userptr =
u64_to_user_ptr ( args - > cmds + ( i * sizeof ( submit_cmd ) ) ) ;
ret = copy_from_user ( & submit_cmd , userptr , sizeof ( submit_cmd ) ) ;
if ( ret ) {
ret = - EFAULT ;
goto out ;
}
/* validate input from userspace: */
switch ( submit_cmd . type ) {
case MSM_SUBMIT_CMD_BUF :
case MSM_SUBMIT_CMD_IB_TARGET_BUF :
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF :
break ;
default :
DRM_ERROR ( " invalid type: %08x \n " , submit_cmd . type ) ;
return - EINVAL ;
}
if ( submit_cmd . size % 4 ) {
DRM_ERROR ( " non-aligned cmdstream buffer size: %u \n " ,
submit_cmd . size ) ;
ret = - EINVAL ;
goto out ;
}
submit - > cmd [ i ] . type = submit_cmd . type ;
submit - > cmd [ i ] . size = submit_cmd . size / 4 ;
submit - > cmd [ i ] . offset = submit_cmd . submit_offset / 4 ;
submit - > cmd [ i ] . idx = submit_cmd . submit_idx ;
submit - > cmd [ i ] . nr_relocs = submit_cmd . nr_relocs ;
2021-02-04 14:53:11 -08:00
userptr = u64_to_user_ptr ( submit_cmd . relocs ) ;
2020-10-23 09:51:08 -07:00
sz = array_size ( submit_cmd . nr_relocs ,
sizeof ( struct drm_msm_gem_submit_reloc ) ) ;
/* check for overflow: */
if ( sz = = SIZE_MAX ) {
ret = - ENOMEM ;
goto out ;
}
submit - > cmd [ i ] . relocs = kmalloc ( sz , GFP_KERNEL ) ;
ret = copy_from_user ( submit - > cmd [ i ] . relocs , userptr , sz ) ;
if ( ret ) {
ret = - EFAULT ;
goto out ;
}
}
out :
return ret ;
}
2021-07-27 18:06:11 -07:00
/* Unwind bo state, according to cleanup_flags. In the success case, only
* the lock is dropped at the end of the submit ( and active / pin ref is dropped
* later when the submit is retired ) .
*/
static void submit_cleanup_bo ( struct msm_gem_submit * submit , int i ,
unsigned cleanup_flags )
2013-07-19 12:59:32 -04:00
{
2021-07-27 18:06:11 -07:00
struct drm_gem_object * obj = & submit - > bos [ i ] . obj - > base ;
unsigned flags = submit - > bos [ i ] . flags & cleanup_flags ;
2013-07-19 12:59:32 -04:00
2021-07-27 18:06:18 -07:00
if ( flags & BO_PINNED )
2021-07-27 18:06:11 -07:00
msm_gem_unpin_iova_locked ( obj , submit - > aspace ) ;
2013-07-19 12:59:32 -04:00
2021-07-27 18:06:18 -07:00
if ( flags & BO_ACTIVE )
2021-07-27 18:06:11 -07:00
msm_gem_active_put ( obj ) ;
2013-07-19 12:59:32 -04:00
2021-07-27 18:06:11 -07:00
if ( flags & BO_LOCKED )
dma_resv_unlock ( obj - > resv ) ;
2013-07-19 12:59:32 -04:00
2021-07-27 18:06:11 -07:00
submit - > bos [ i ] . flags & = ~ cleanup_flags ;
}
2013-07-19 12:59:32 -04:00
2021-07-27 18:06:11 -07:00
static void submit_unlock_unpin_bo ( struct msm_gem_submit * submit , int i )
{
2021-07-27 18:06:18 -07:00
submit_cleanup_bo ( submit , i , BO_PINNED | BO_ACTIVE | BO_LOCKED ) ;
2021-07-27 18:06:11 -07:00
if ( ! ( submit - > bos [ i ] . flags & BO_VALID ) )
submit - > bos [ i ] . iova = 0 ;
2013-07-19 12:59:32 -04:00
}
/* This is where we make sure all the bo's are reserved and pin'd: */
2016-03-14 13:56:37 -04:00
static int submit_lock_objects ( struct msm_gem_submit * submit )
2013-07-19 12:59:32 -04:00
{
int contended , slow_locked = - 1 , i , ret = 0 ;
retry :
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
struct msm_gem_object * msm_obj = submit - > bos [ i ] . obj ;
if ( slow_locked = = i )
slow_locked = - 1 ;
contended = i ;
if ( ! ( submit - > bos [ i ] . flags & BO_LOCKED ) ) {
2019-11-25 10:43:55 +01:00
ret = dma_resv_lock_interruptible ( msm_obj - > base . resv ,
& submit - > ticket ) ;
2013-07-19 12:59:32 -04:00
if ( ret )
goto fail ;
submit - > bos [ i ] . flags | = BO_LOCKED ;
}
}
ww_acquire_done ( & submit - > ticket ) ;
return 0 ;
fail :
2021-07-27 18:06:15 -07:00
if ( ret = = - EALREADY ) {
DRM_ERROR ( " handle %u at index %u already on submit list \n " ,
submit - > bos [ i ] . handle , i ) ;
ret = - EINVAL ;
}
2013-07-19 12:59:32 -04:00
for ( ; i > = 0 ; i - - )
2021-07-27 18:06:11 -07:00
submit_unlock_unpin_bo ( submit , i ) ;
2013-07-19 12:59:32 -04:00
if ( slow_locked > 0 )
2021-07-27 18:06:11 -07:00
submit_unlock_unpin_bo ( submit , slow_locked ) ;
2013-07-19 12:59:32 -04:00
if ( ret = = - EDEADLK ) {
struct msm_gem_object * msm_obj = submit - > bos [ contended ] . obj ;
/* we lost out in a seqno race, lock and retry.. */
2019-11-25 10:43:55 +01:00
ret = dma_resv_lock_slow_interruptible ( msm_obj - > base . resv ,
& submit - > ticket ) ;
2013-07-19 12:59:32 -04:00
if ( ! ret ) {
submit - > bos [ contended ] . flags | = BO_LOCKED ;
slow_locked = contended ;
goto retry ;
}
2021-07-27 18:06:15 -07:00
/* Not expecting -EALREADY here, if the bo was already
* locked , we should have gotten - EALREADY already from
* the dma_resv_lock_interruptable ( ) call .
*/
WARN_ON_ONCE ( ret = = - EALREADY ) ;
2013-07-19 12:59:32 -04:00
}
return ret ;
}
2017-09-12 14:23:05 -04:00
static int submit_fence_sync ( struct msm_gem_submit * submit , bool no_implicit )
2016-03-15 18:26:28 -04:00
{
int i , ret = 0 ;
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
2021-07-27 18:06:14 -07:00
struct drm_gem_object * obj = & submit - > bos [ i ] . obj - > base ;
2016-03-15 18:26:28 -04:00
bool write = submit - > bos [ i ] . flags & MSM_SUBMIT_BO_WRITE ;
2017-09-12 14:23:05 -04:00
if ( ! write ) {
/* NOTE: _reserve_shared() must happen before
* _add_shared_fence ( ) , which makes this a slightly
* strange place to call it . OTOH this is a
* convenient can - fail point to hook it in .
*/
2021-07-27 18:06:14 -07:00
ret = dma_resv_reserve_shared ( obj - > resv , 1 ) ;
2017-09-12 14:23:05 -04:00
if ( ret )
return ret ;
}
2021-08-05 12:47:01 +02:00
/* exclusive fences must be ordered */
if ( no_implicit & & ! write )
2017-09-12 14:23:05 -04:00
continue ;
2021-08-05 12:46:57 +02:00
ret = drm_sched_job_add_implicit_dependencies ( & submit - > base ,
obj ,
write ) ;
2016-03-15 18:26:28 -04:00
if ( ret )
break ;
}
return ret ;
}
2016-03-14 13:56:37 -04:00
static int submit_pin_objects ( struct msm_gem_submit * submit )
{
int i , ret = 0 ;
submit - > valid = true ;
2021-07-27 18:06:18 -07:00
/*
* Increment active_count first , so if under memory pressure , we
* don ' t inadvertently evict a bo needed by the submit in order
* to pin an earlier bo in the same submit .
*/
2016-03-14 13:56:37 -04:00
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
2021-07-27 18:06:18 -07:00
struct drm_gem_object * obj = & submit - > bos [ i ] . obj - > base ;
msm_gem_active_get ( obj , submit - > gpu ) ;
submit - > bos [ i ] . flags | = BO_ACTIVE ;
}
2016-03-14 13:56:37 -04:00
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
2021-07-27 18:06:11 -07:00
struct drm_gem_object * obj = & submit - > bos [ i ] . obj - > base ;
2016-11-11 12:06:46 -05:00
uint64_t iova ;
2016-03-14 13:56:37 -04:00
/* if locking succeeded, pin bo: */
2021-07-27 18:06:11 -07:00
ret = msm_gem_get_and_pin_iova_locked ( obj ,
2019-05-07 12:02:07 -06:00
submit - > aspace , & iova ) ;
2016-03-14 13:56:37 -04:00
if ( ret )
break ;
submit - > bos [ i ] . flags | = BO_PINNED ;
if ( iova = = submit - > bos [ i ] . iova ) {
submit - > bos [ i ] . flags | = BO_VALID ;
} else {
submit - > bos [ i ] . iova = iova ;
/* iova changed, so address in cmdstream is not valid: */
submit - > bos [ i ] . flags & = ~ BO_VALID ;
submit - > valid = false ;
}
}
return ret ;
}
2021-07-27 18:06:11 -07:00
static void submit_attach_object_fences ( struct msm_gem_submit * submit )
{
int i ;
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
struct drm_gem_object * obj = & submit - > bos [ i ] . obj - > base ;
if ( submit - > bos [ i ] . flags & MSM_SUBMIT_BO_WRITE )
2021-07-27 18:06:14 -07:00
dma_resv_add_excl_fence ( obj - > resv , submit - > user_fence ) ;
2021-07-27 18:06:11 -07:00
else if ( submit - > bos [ i ] . flags & MSM_SUBMIT_BO_READ )
2021-07-27 18:06:14 -07:00
dma_resv_add_shared_fence ( obj - > resv , submit - > user_fence ) ;
2021-07-27 18:06:11 -07:00
}
}
2013-07-19 12:59:32 -04:00
static int submit_bo ( struct msm_gem_submit * submit , uint32_t idx ,
2016-11-11 12:06:46 -05:00
struct msm_gem_object * * obj , uint64_t * iova , bool * valid )
2013-07-19 12:59:32 -04:00
{
if ( idx > = submit - > nr_bos ) {
2013-09-06 15:36:40 -04:00
DRM_ERROR ( " invalid buffer index: %u (out of %u) \n " ,
idx , submit - > nr_bos ) ;
return - EINVAL ;
2013-07-19 12:59:32 -04:00
}
if ( obj )
* obj = submit - > bos [ idx ] . obj ;
if ( iova )
* iova = submit - > bos [ idx ] . iova ;
if ( valid )
* valid = ! ! ( submit - > bos [ idx ] . flags & BO_VALID ) ;
return 0 ;
}
/* process the reloc's and patch up the cmdstream as needed: */
static int submit_reloc ( struct msm_gem_submit * submit , struct msm_gem_object * obj ,
2020-10-23 09:51:08 -07:00
uint32_t offset , uint32_t nr_relocs , struct drm_msm_gem_submit_reloc * relocs )
2013-07-19 12:59:32 -04:00
{
uint32_t i , last_offset = 0 ;
uint32_t * ptr ;
2016-12-20 08:54:30 -07:00
int ret = 0 ;
2013-07-19 12:59:32 -04:00
2018-10-15 15:31:54 -06:00
if ( ! nr_relocs )
return 0 ;
2013-07-19 12:59:32 -04:00
if ( offset % 4 ) {
2013-09-06 15:36:40 -04:00
DRM_ERROR ( " non-aligned cmdstream buffer: %u \n " , offset ) ;
2013-07-19 12:59:32 -04:00
return - EINVAL ;
}
/* For now, just map the entire thing. Eventually we probably
* to do it page - by - page , w / kmap ( ) if not vmap ( ) d . .
*/
2020-10-23 09:51:10 -07:00
ptr = msm_gem_get_vaddr_locked ( & obj - > base ) ;
2013-07-19 12:59:32 -04:00
if ( IS_ERR ( ptr ) ) {
ret = PTR_ERR ( ptr ) ;
DBG ( " failed to map: %d " , ret ) ;
return ret ;
}
for ( i = 0 ; i < nr_relocs ; i + + ) {
2020-10-23 09:51:08 -07:00
struct drm_msm_gem_submit_reloc submit_reloc = relocs [ i ] ;
2016-11-11 12:06:46 -05:00
uint32_t off ;
uint64_t iova ;
2013-07-19 12:59:32 -04:00
bool valid ;
if ( submit_reloc . submit_offset % 4 ) {
2013-09-06 15:36:40 -04:00
DRM_ERROR ( " non-aligned reloc offset: %u \n " ,
2013-07-19 12:59:32 -04:00
submit_reloc . submit_offset ) ;
2016-12-20 08:54:30 -07:00
ret = - EINVAL ;
goto out ;
2013-07-19 12:59:32 -04:00
}
/* offset in dwords: */
off = submit_reloc . submit_offset / 4 ;
if ( ( off > = ( obj - > base . size / 4 ) ) | |
( off < last_offset ) ) {
2013-09-06 15:36:40 -04:00
DRM_ERROR ( " invalid offset %u at reloc %u \n " , off , i ) ;
2016-12-20 08:54:30 -07:00
ret = - EINVAL ;
goto out ;
2013-07-19 12:59:32 -04:00
}
ret = submit_bo ( submit , submit_reloc . reloc_idx , NULL , & iova , & valid ) ;
if ( ret )
2016-12-20 08:54:30 -07:00
goto out ;
2013-07-19 12:59:32 -04:00
if ( valid )
continue ;
iova + = submit_reloc . reloc_offset ;
if ( submit_reloc . shift < 0 )
iova > > = - submit_reloc . shift ;
else
iova < < = submit_reloc . shift ;
ptr [ off ] = iova | submit_reloc . or ;
last_offset = off ;
}
2016-12-20 08:54:30 -07:00
out :
2020-10-23 09:51:10 -07:00
msm_gem_put_vaddr_locked ( & obj - > base ) ;
2016-05-26 16:24:35 -04:00
2016-12-20 08:54:30 -07:00
return ret ;
2013-07-19 12:59:32 -04:00
}
2021-07-27 18:06:11 -07:00
/* Cleanup submit at end of ioctl. In the error case, this also drops
* references , unpins , and drops active refcnt . In the non - error case ,
* this is done when the submit is retired .
*/
static void submit_cleanup ( struct msm_gem_submit * submit , bool error )
2013-07-19 12:59:32 -04:00
{
2021-07-27 18:06:11 -07:00
unsigned cleanup_flags = BO_LOCKED ;
2013-07-19 12:59:32 -04:00
unsigned i ;
2021-07-27 18:06:11 -07:00
if ( error )
2021-07-27 18:06:18 -07:00
cleanup_flags | = BO_PINNED | BO_ACTIVE ;
2021-07-27 18:06:11 -07:00
2013-07-19 12:59:32 -04:00
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
struct msm_gem_object * msm_obj = submit - > bos [ i ] . obj ;
2021-07-27 18:06:11 -07:00
submit_cleanup_bo ( submit , i , cleanup_flags ) ;
if ( error )
drm_gem_object_put ( & msm_obj - > base ) ;
2013-07-19 12:59:32 -04:00
}
}
2021-07-27 18:06:11 -07:00
void msm_submit_retire ( struct msm_gem_submit * submit )
{
int i ;
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
struct drm_gem_object * obj = & submit - > bos [ i ] . obj - > base ;
msm_gem_lock ( obj ) ;
2021-07-27 18:06:18 -07:00
submit_cleanup_bo ( submit , i , BO_PINNED | BO_ACTIVE ) ;
2021-07-27 18:06:11 -07:00
msm_gem_unlock ( obj ) ;
drm_gem_object_put ( obj ) ;
}
}
2020-01-24 00:57:10 +01:00
struct msm_submit_post_dep {
struct drm_syncobj * syncobj ;
uint64_t point ;
struct dma_fence_chain * chain ;
} ;
2021-07-27 18:06:14 -07:00
static struct drm_syncobj * * msm_parse_deps ( struct msm_gem_submit * submit ,
struct drm_file * file ,
uint64_t in_syncobjs_addr ,
uint32_t nr_in_syncobjs ,
size_t syncobj_stride ,
struct msm_ringbuffer * ring )
2020-01-24 00:57:10 +01:00
{
struct drm_syncobj * * syncobjs = NULL ;
struct drm_msm_gem_submit_syncobj syncobj_desc = { 0 } ;
int ret = 0 ;
uint32_t i , j ;
syncobjs = kcalloc ( nr_in_syncobjs , sizeof ( * syncobjs ) ,
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY ) ;
if ( ! syncobjs )
return ERR_PTR ( - ENOMEM ) ;
for ( i = 0 ; i < nr_in_syncobjs ; + + i ) {
uint64_t address = in_syncobjs_addr + i * syncobj_stride ;
struct dma_fence * fence ;
if ( copy_from_user ( & syncobj_desc ,
u64_to_user_ptr ( address ) ,
min ( syncobj_stride , sizeof ( syncobj_desc ) ) ) ) {
ret = - EFAULT ;
break ;
}
if ( syncobj_desc . point & &
2021-07-27 18:06:14 -07:00
! drm_core_check_feature ( submit - > dev , DRIVER_SYNCOBJ_TIMELINE ) ) {
2020-01-24 00:57:10 +01:00
ret = - EOPNOTSUPP ;
break ;
}
if ( syncobj_desc . flags & ~ MSM_SUBMIT_SYNCOBJ_FLAGS ) {
ret = - EINVAL ;
break ;
}
ret = drm_syncobj_find_fence ( file , syncobj_desc . handle ,
syncobj_desc . point , 0 , & fence ) ;
if ( ret )
break ;
2021-08-05 12:46:57 +02:00
ret = drm_sched_job_add_dependency ( & submit - > base , fence ) ;
2020-01-24 00:57:10 +01:00
if ( ret )
break ;
if ( syncobj_desc . flags & MSM_SUBMIT_SYNCOBJ_RESET ) {
syncobjs [ i ] =
drm_syncobj_find ( file , syncobj_desc . handle ) ;
if ( ! syncobjs [ i ] ) {
ret = - EINVAL ;
break ;
}
}
}
if ( ret ) {
for ( j = 0 ; j < = i ; + + j ) {
if ( syncobjs [ j ] )
drm_syncobj_put ( syncobjs [ j ] ) ;
}
kfree ( syncobjs ) ;
return ERR_PTR ( ret ) ;
}
return syncobjs ;
}
static void msm_reset_syncobjs ( struct drm_syncobj * * syncobjs ,
uint32_t nr_syncobjs )
{
uint32_t i ;
for ( i = 0 ; syncobjs & & i < nr_syncobjs ; + + i ) {
if ( syncobjs [ i ] )
drm_syncobj_replace_fence ( syncobjs [ i ] , NULL ) ;
}
}
static struct msm_submit_post_dep * msm_parse_post_deps ( struct drm_device * dev ,
struct drm_file * file ,
uint64_t syncobjs_addr ,
uint32_t nr_syncobjs ,
size_t syncobj_stride )
{
struct msm_submit_post_dep * post_deps ;
struct drm_msm_gem_submit_syncobj syncobj_desc = { 0 } ;
int ret = 0 ;
uint32_t i , j ;
post_deps = kmalloc_array ( nr_syncobjs , sizeof ( * post_deps ) ,
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY ) ;
if ( ! post_deps )
return ERR_PTR ( - ENOMEM ) ;
for ( i = 0 ; i < nr_syncobjs ; + + i ) {
uint64_t address = syncobjs_addr + i * syncobj_stride ;
if ( copy_from_user ( & syncobj_desc ,
u64_to_user_ptr ( address ) ,
min ( syncobj_stride , sizeof ( syncobj_desc ) ) ) ) {
ret = - EFAULT ;
break ;
}
post_deps [ i ] . point = syncobj_desc . point ;
post_deps [ i ] . chain = NULL ;
if ( syncobj_desc . flags ) {
ret = - EINVAL ;
break ;
}
if ( syncobj_desc . point ) {
if ( ! drm_core_check_feature ( dev ,
DRIVER_SYNCOBJ_TIMELINE ) ) {
ret = - EOPNOTSUPP ;
break ;
}
2021-05-05 13:38:12 +02:00
post_deps [ i ] . chain = dma_fence_chain_alloc ( ) ;
2020-01-24 00:57:10 +01:00
if ( ! post_deps [ i ] . chain ) {
ret = - ENOMEM ;
break ;
}
}
post_deps [ i ] . syncobj =
drm_syncobj_find ( file , syncobj_desc . handle ) ;
if ( ! post_deps [ i ] . syncobj ) {
ret = - EINVAL ;
break ;
}
}
if ( ret ) {
for ( j = 0 ; j < = i ; + + j ) {
2021-05-05 13:38:12 +02:00
dma_fence_chain_free ( post_deps [ j ] . chain ) ;
2020-01-24 00:57:10 +01:00
if ( post_deps [ j ] . syncobj )
drm_syncobj_put ( post_deps [ j ] . syncobj ) ;
}
kfree ( post_deps ) ;
return ERR_PTR ( ret ) ;
}
return post_deps ;
}
static void msm_process_post_deps ( struct msm_submit_post_dep * post_deps ,
uint32_t count , struct dma_fence * fence )
{
uint32_t i ;
for ( i = 0 ; post_deps & & i < count ; + + i ) {
if ( post_deps [ i ] . chain ) {
drm_syncobj_add_point ( post_deps [ i ] . syncobj ,
post_deps [ i ] . chain ,
fence , post_deps [ i ] . point ) ;
post_deps [ i ] . chain = NULL ;
} else {
drm_syncobj_replace_fence ( post_deps [ i ] . syncobj ,
fence ) ;
}
}
}
2013-07-19 12:59:32 -04:00
int msm_ioctl_gem_submit ( struct drm_device * dev , void * data ,
struct drm_file * file )
{
2018-11-02 09:25:21 -06:00
static atomic_t ident = ATOMIC_INIT ( 0 ) ;
2013-07-19 12:59:32 -04:00
struct msm_drm_private * priv = dev - > dev_private ;
struct drm_msm_gem_submit * args = data ;
struct msm_file_private * ctx = file - > driver_priv ;
2021-07-27 18:06:16 -07:00
struct msm_gem_submit * submit = NULL ;
2016-02-03 13:12:31 -05:00
struct msm_gpu * gpu = priv - > gpu ;
2017-10-20 11:06:55 -06:00
struct msm_gpu_submitqueue * queue ;
2017-10-20 11:06:57 -06:00
struct msm_ringbuffer * ring ;
2020-01-24 00:57:10 +01:00
struct msm_submit_post_dep * post_deps = NULL ;
struct drm_syncobj * * syncobjs_to_reset = NULL ;
2016-06-16 16:43:49 -04:00
int out_fence_fd = - 1 ;
2018-11-02 09:25:21 -06:00
struct pid * pid = get_pid ( task_pid ( current ) ) ;
2019-11-20 11:56:07 +01:00
bool has_ww_ticket = false ;
2013-07-19 12:59:32 -04:00
unsigned i ;
2018-11-02 09:25:21 -06:00
int ret , submitid ;
2021-07-27 18:06:06 -07:00
2016-02-03 13:12:31 -05:00
if ( ! gpu )
return - ENXIO ;
2020-01-24 00:57:10 +01:00
if ( args - > pad )
return - EINVAL ;
2013-07-19 12:59:32 -04:00
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module :
*/
2016-04-23 10:08:59 -04:00
if ( MSM_PIPE_ID ( args - > flags ) ! = MSM_PIPE_3D0 )
return - EINVAL ;
if ( MSM_PIPE_FLAGS ( args - > flags ) & ~ MSM_SUBMIT_FLAGS )
2013-07-19 12:59:32 -04:00
return - EINVAL ;
2017-12-13 15:12:57 -05:00
if ( args - > flags & MSM_SUBMIT_SUDO ) {
if ( ! IS_ENABLED ( CONFIG_DRM_MSM_GPU_SUDO ) | |
! capable ( CAP_SYS_RAWIO ) )
return - EINVAL ;
}
2017-10-20 11:06:55 -06:00
queue = msm_submitqueue_get ( ctx , args - > queueid ) ;
if ( ! queue )
return - ENOENT ;
2018-11-02 09:25:21 -06:00
/* Get a unique identifier for the submission for logging purposes */
submitid = atomic_inc_return ( & ident ) - 1 ;
2021-07-27 18:06:17 -07:00
ring = gpu - > rb [ queue - > ring_nr ] ;
2018-11-02 09:25:21 -06:00
trace_msm_gpu_submit ( pid_nr ( pid ) , ring - > id , submitid ,
args - > nr_bos , args - > nr_cmds ) ;
2017-10-20 11:06:57 -06:00
2021-07-27 18:06:16 -07:00
ret = mutex_lock_interruptible ( & queue - > lock ) ;
2016-05-17 15:43:35 -04:00
if ( ret )
2020-01-24 00:57:10 +01:00
goto out_post_unlock ;
2016-02-03 13:24:35 -05:00
2016-06-16 16:43:49 -04:00
if ( args - > flags & MSM_SUBMIT_FENCE_FD_OUT ) {
out_fence_fd = get_unused_fd_flags ( O_CLOEXEC ) ;
if ( out_fence_fd < 0 ) {
ret = out_fence_fd ;
goto out_unlock ;
}
}
2020-08-17 15:01:36 -07:00
submit = submit_create ( dev , gpu , queue , args - > nr_bos ,
2019-05-07 12:02:07 -06:00
args - > nr_cmds ) ;
2021-07-27 18:06:13 -07:00
if ( IS_ERR ( submit ) ) {
ret = PTR_ERR ( submit ) ;
2021-11-18 15:50:30 +05:30
submit = NULL ;
2016-05-17 15:43:35 -04:00
goto out_unlock ;
}
2013-07-19 12:59:32 -04:00
2018-11-02 09:25:21 -06:00
submit - > pid = pid ;
submit - > ident = submitid ;
2017-12-13 15:12:57 -05:00
if ( args - > flags & MSM_SUBMIT_SUDO )
submit - > in_rb = true ;
2017-02-25 10:36:30 -05:00
if ( args - > flags & MSM_SUBMIT_FENCE_FD_IN ) {
2018-11-05 11:13:12 +01:00
struct dma_fence * in_fence ;
2017-02-25 10:36:30 -05:00
in_fence = sync_file_get_fence ( args - > fence_fd ) ;
2021-07-27 18:06:14 -07:00
if ( ! in_fence ) {
ret = - EINVAL ;
goto out_unlock ;
}
2018-11-05 11:13:12 +01:00
2021-08-05 12:46:57 +02:00
ret = drm_sched_job_add_dependency ( & submit - > base , in_fence ) ;
2018-11-05 11:13:12 +01:00
if ( ret )
2021-07-27 18:06:14 -07:00
goto out_unlock ;
2017-02-25 10:36:30 -05:00
}
2020-01-24 00:57:10 +01:00
if ( args - > flags & MSM_SUBMIT_SYNCOBJ_IN ) {
2021-07-27 18:06:14 -07:00
syncobjs_to_reset = msm_parse_deps ( submit , file ,
args - > in_syncobjs ,
args - > nr_in_syncobjs ,
args - > syncobj_stride , ring ) ;
if ( IS_ERR ( syncobjs_to_reset ) ) {
ret = PTR_ERR ( syncobjs_to_reset ) ;
goto out_unlock ;
}
2020-01-24 00:57:10 +01:00
}
if ( args - > flags & MSM_SUBMIT_SYNCOBJ_OUT ) {
post_deps = msm_parse_post_deps ( dev , file ,
args - > out_syncobjs ,
args - > nr_out_syncobjs ,
args - > syncobj_stride ) ;
if ( IS_ERR ( post_deps ) ) {
ret = PTR_ERR ( post_deps ) ;
2016-06-16 16:43:49 -04:00
goto out_unlock ;
}
}
2013-07-19 12:59:32 -04:00
ret = submit_lookup_objects ( submit , args , file ) ;
if ( ret )
2021-07-27 18:06:14 -07:00
goto out ;
2013-07-19 12:59:32 -04:00
2020-10-23 09:51:08 -07:00
ret = submit_lookup_cmds ( submit , args , file ) ;
if ( ret )
2021-07-27 18:06:14 -07:00
goto out ;
2020-10-23 09:51:08 -07:00
2019-11-20 11:56:07 +01:00
/* copy_*_user while holding a ww ticket upsets lockdep */
ww_acquire_init ( & submit - > ticket , & reservation_ww_class ) ;
has_ww_ticket = true ;
2016-03-14 13:56:37 -04:00
ret = submit_lock_objects ( submit ) ;
if ( ret )
goto out ;
2017-09-12 14:23:05 -04:00
ret = submit_fence_sync ( submit , ! ! ( args - > flags & MSM_SUBMIT_NO_IMPLICIT ) ) ;
if ( ret )
goto out ;
2016-03-15 18:26:28 -04:00
2016-03-14 13:56:37 -04:00
ret = submit_pin_objects ( submit ) ;
2013-07-19 12:59:32 -04:00
if ( ret )
goto out ;
for ( i = 0 ; i < args - > nr_cmds ; i + + ) {
struct msm_gem_object * msm_obj ;
2016-11-11 12:06:46 -05:00
uint64_t iova ;
2013-07-19 12:59:32 -04:00
2020-10-23 09:51:08 -07:00
ret = submit_bo ( submit , submit - > cmd [ i ] . idx ,
2013-07-19 12:59:32 -04:00
& msm_obj , & iova , NULL ) ;
if ( ret )
goto out ;
2020-10-23 09:51:08 -07:00
if ( ! submit - > cmd [ i ] . size | |
( ( submit - > cmd [ i ] . size + submit - > cmd [ i ] . offset ) >
msm_obj - > base . size / 4 ) ) {
DRM_ERROR ( " invalid cmdstream size: %u \n " , submit - > cmd [ i ] . size * 4 ) ;
2013-07-19 12:59:32 -04:00
ret = - EINVAL ;
goto out ;
}
2020-10-23 09:51:08 -07:00
submit - > cmd [ i ] . iova = iova + ( submit - > cmd [ i ] . offset * 4 ) ;
2013-07-19 12:59:32 -04:00
if ( submit - > valid )
continue ;
2020-10-23 09:51:08 -07:00
ret = submit_reloc ( submit , msm_obj , submit - > cmd [ i ] . offset * 4 ,
submit - > cmd [ i ] . nr_relocs , submit - > cmd [ i ] . relocs ) ;
2013-07-19 12:59:32 -04:00
if ( ret )
goto out ;
}
submit - > nr_cmds = i ;
2021-08-26 11:33:34 +02:00
drm_sched_job_arm ( & submit - > base ) ;
2021-07-27 18:06:14 -07:00
submit - > user_fence = dma_fence_get ( & submit - > base . s_fence - > finished ) ;
2016-06-16 16:37:38 -04:00
2021-07-27 18:06:12 -07:00
/*
* Allocate an id which can be used by WAIT_FENCE ioctl to map back
* to the underlying fence .
*/
submit - > fence_id = idr_alloc_cyclic ( & queue - > fence_idr ,
2021-07-27 18:06:14 -07:00
submit - > user_fence , 0 , INT_MAX , GFP_KERNEL ) ;
2021-07-27 18:06:12 -07:00
if ( submit - > fence_id < 0 ) {
ret = submit - > fence_id = 0 ;
submit - > fence_id = 0 ;
2016-06-16 16:37:38 -04:00
}
2021-08-26 11:33:34 +02:00
if ( ret = = 0 & & args - > flags & MSM_SUBMIT_FENCE_FD_OUT ) {
2021-07-27 18:06:14 -07:00
struct sync_file * sync_file = sync_file_create ( submit - > user_fence ) ;
2016-06-16 16:43:49 -04:00
if ( ! sync_file ) {
ret = - ENOMEM ;
2021-08-26 11:33:34 +02:00
} else {
fd_install ( out_fence_fd , sync_file - > file ) ;
args - > fence_fd = out_fence_fd ;
2016-06-16 16:43:49 -04:00
}
}
2021-07-27 18:06:11 -07:00
submit_attach_object_fences ( submit ) ;
2013-07-19 12:59:32 -04:00
2021-07-27 18:06:14 -07:00
/* The scheduler owns a ref now: */
msm_gem_submit_get ( submit ) ;
2013-07-19 12:59:32 -04:00
2021-08-05 12:46:50 +02:00
drm_sched_entity_push_job ( & submit - > base ) ;
2013-07-19 12:59:32 -04:00
2021-07-27 18:06:12 -07:00
args - > fence = submit - > fence_id ;
2021-11-11 11:24:56 -08:00
queue - > last_fence = submit - > fence_id ;
2016-06-16 16:43:49 -04:00
2020-01-24 00:57:10 +01:00
msm_reset_syncobjs ( syncobjs_to_reset , args - > nr_in_syncobjs ) ;
msm_process_post_deps ( post_deps , args - > nr_out_syncobjs ,
2021-07-27 18:06:14 -07:00
submit - > user_fence ) ;
2020-01-24 00:57:10 +01:00
2013-07-19 12:59:32 -04:00
out :
2021-07-27 18:06:11 -07:00
submit_cleanup ( submit , ! ! ret ) ;
2019-11-20 11:56:07 +01:00
if ( has_ww_ticket )
ww_acquire_fini ( & submit - > ticket ) ;
2016-05-17 15:43:35 -04:00
out_unlock :
2016-06-16 16:43:49 -04:00
if ( ret & & ( out_fence_fd > = 0 ) )
put_unused_fd ( out_fence_fd ) ;
2021-07-27 18:06:16 -07:00
mutex_unlock ( & queue - > lock ) ;
if ( submit )
msm_gem_submit_put ( submit ) ;
2020-01-24 00:57:10 +01:00
out_post_unlock :
if ( ! IS_ERR_OR_NULL ( post_deps ) ) {
for ( i = 0 ; i < args - > nr_out_syncobjs ; + + i ) {
kfree ( post_deps [ i ] . chain ) ;
drm_syncobj_put ( post_deps [ i ] . syncobj ) ;
}
kfree ( post_deps ) ;
}
if ( ! IS_ERR_OR_NULL ( syncobjs_to_reset ) ) {
for ( i = 0 ; i < args - > nr_in_syncobjs ; + + i ) {
if ( syncobjs_to_reset [ i ] )
drm_syncobj_put ( syncobjs_to_reset [ i ] ) ;
}
kfree ( syncobjs_to_reset ) ;
}
2013-07-19 12:59:32 -04:00
return ret ;
}