2015-04-20 16:55:21 -04:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : monk liu < monk . liu @ amd . com >
*/
2016-12-22 17:06:50 -05:00
# include <drm/drm_auth.h>
2015-04-20 16:55:21 -04:00
# include "amdgpu.h"
2017-06-26 16:17:13 -04:00
# include "amdgpu_sched.h"
2018-12-17 14:31:12 +08:00
# include "amdgpu_ras.h"
2015-04-20 16:55:21 -04:00
2018-08-01 16:00:52 +02:00
# define to_amdgpu_ctx_entity(e) \
container_of ( ( e ) , struct amdgpu_ctx_entity , entity )
const unsigned int amdgpu_ctx_num_entities [ AMDGPU_HW_IP_NUM ] = {
[ AMDGPU_HW_IP_GFX ] = 1 ,
[ AMDGPU_HW_IP_COMPUTE ] = 4 ,
[ AMDGPU_HW_IP_DMA ] = 2 ,
[ AMDGPU_HW_IP_UVD ] = 1 ,
[ AMDGPU_HW_IP_VCE ] = 1 ,
[ AMDGPU_HW_IP_UVD_ENC ] = 1 ,
[ AMDGPU_HW_IP_VCN_DEC ] = 1 ,
[ AMDGPU_HW_IP_VCN_ENC ] = 1 ,
2018-11-27 11:41:27 -05:00
[ AMDGPU_HW_IP_VCN_JPEG ] = 1 ,
2018-08-01 16:00:52 +02:00
} ;
2019-08-13 16:48:28 +08:00
static int amdgpu_ctx_total_num_entities ( void )
2018-08-01 16:00:52 +02:00
{
unsigned i , num_entities = 0 ;
for ( i = 0 ; i < AMDGPU_HW_IP_NUM ; + + i )
num_entities + = amdgpu_ctx_num_entities [ i ] ;
return num_entities ;
}
2018-07-19 14:22:25 +02:00
2016-12-22 17:06:50 -05:00
static int amdgpu_ctx_priority_permit ( struct drm_file * filp ,
2017-12-06 17:49:39 +01:00
enum drm_sched_priority priority )
2016-12-22 17:06:50 -05:00
{
/* NORMAL and below are accessible by everyone */
2017-12-06 17:49:39 +01:00
if ( priority < = DRM_SCHED_PRIORITY_NORMAL )
2016-12-22 17:06:50 -05:00
return 0 ;
if ( capable ( CAP_SYS_NICE ) )
return 0 ;
if ( drm_is_current_master ( filp ) )
return 0 ;
return - EACCES ;
}
static int amdgpu_ctx_init ( struct amdgpu_device * adev ,
2017-12-06 17:49:39 +01:00
enum drm_sched_priority priority ,
2016-12-22 17:06:50 -05:00
struct drm_file * filp ,
struct amdgpu_ctx * ctx )
2015-04-20 16:55:21 -04:00
{
2019-08-13 16:48:28 +08:00
unsigned num_entities = amdgpu_ctx_total_num_entities ( ) ;
2019-07-01 19:12:14 -04:00
unsigned i , j , k ;
2015-08-04 17:51:05 +02:00
int r ;
2015-04-20 16:55:21 -04:00
2017-12-06 17:49:39 +01:00
if ( priority < 0 | | priority > = DRM_SCHED_PRIORITY_MAX )
2016-12-22 17:06:50 -05:00
return - EINVAL ;
r = amdgpu_ctx_priority_permit ( filp , priority ) ;
if ( r )
return r ;
2015-07-06 13:42:58 +08:00
memset ( ctx , 0 , sizeof ( * ctx ) ) ;
ctx - > adev = adev ;
2018-08-01 16:00:52 +02:00
ctx - > fences = kcalloc ( amdgpu_sched_jobs * num_entities ,
2016-10-25 13:00:45 +01:00
sizeof ( struct dma_fence * ) , GFP_KERNEL ) ;
2015-12-10 15:45:11 +08:00
if ( ! ctx - > fences )
return - ENOMEM ;
2015-04-20 16:55:21 -04:00
2018-08-01 16:00:52 +02:00
ctx - > entities [ 0 ] = kcalloc ( num_entities ,
sizeof ( struct amdgpu_ctx_entity ) ,
GFP_KERNEL ) ;
if ( ! ctx - > entities [ 0 ] ) {
r = - ENOMEM ;
goto error_free_fences ;
}
2017-10-10 16:50:17 -04:00
2018-08-01 16:00:52 +02:00
for ( i = 0 ; i < num_entities ; + + i ) {
struct amdgpu_ctx_entity * entity = & ctx - > entities [ 0 ] [ i ] ;
entity - > sequence = 1 ;
entity - > fences = & ctx - > fences [ amdgpu_sched_jobs * i ] ;
2015-12-10 15:45:11 +08:00
}
2018-08-01 16:00:52 +02:00
for ( i = 1 ; i < AMDGPU_HW_IP_NUM ; + + i )
ctx - > entities [ i ] = ctx - > entities [ i - 1 ] +
amdgpu_ctx_num_entities [ i - 1 ] ;
kref_init ( & ctx - > refcount ) ;
spin_lock_init ( & ctx - > ring_lock ) ;
mutex_init ( & ctx - > lock ) ;
2016-10-04 09:43:30 +02:00
ctx - > reset_counter = atomic_read ( & adev - > gpu_reset_counter ) ;
2017-10-17 14:39:23 +08:00
ctx - > reset_counter_query = ctx - > reset_counter ;
2017-10-09 15:18:43 +02:00
ctx - > vram_lost_counter = atomic_read ( & adev - > vram_lost_counter ) ;
2017-06-06 20:20:38 -04:00
ctx - > init_priority = priority ;
2017-12-06 17:49:39 +01:00
ctx - > override_priority = DRM_SCHED_PRIORITY_UNSET ;
2016-10-04 09:43:30 +02:00
2018-08-01 16:00:52 +02:00
for ( i = 0 ; i < AMDGPU_HW_IP_NUM ; + + i ) {
struct amdgpu_ring * rings [ AMDGPU_MAX_RINGS ] ;
struct drm_sched_rq * rqs [ AMDGPU_MAX_RINGS ] ;
2019-07-01 19:12:14 -04:00
unsigned num_rings = 0 ;
2019-01-30 02:53:20 +01:00
unsigned num_rqs = 0 ;
2018-08-01 16:00:52 +02:00
switch ( i ) {
case AMDGPU_HW_IP_GFX :
rings [ 0 ] = & adev - > gfx . gfx_ring [ 0 ] ;
num_rings = 1 ;
break ;
case AMDGPU_HW_IP_COMPUTE :
for ( j = 0 ; j < adev - > gfx . num_compute_rings ; + + j )
rings [ j ] = & adev - > gfx . compute_ring [ j ] ;
num_rings = adev - > gfx . num_compute_rings ;
break ;
case AMDGPU_HW_IP_DMA :
for ( j = 0 ; j < adev - > sdma . num_instances ; + + j )
rings [ j ] = & adev - > sdma . instance [ j ] . ring ;
num_rings = adev - > sdma . num_instances ;
break ;
case AMDGPU_HW_IP_UVD :
rings [ 0 ] = & adev - > uvd . inst [ 0 ] . ring ;
num_rings = 1 ;
break ;
case AMDGPU_HW_IP_VCE :
rings [ 0 ] = & adev - > vce . ring [ 0 ] ;
num_rings = 1 ;
break ;
case AMDGPU_HW_IP_UVD_ENC :
rings [ 0 ] = & adev - > uvd . inst [ 0 ] . ring_enc [ 0 ] ;
num_rings = 1 ;
break ;
case AMDGPU_HW_IP_VCN_DEC :
2019-07-01 19:12:14 -04:00
for ( j = 0 ; j < adev - > vcn . num_vcn_inst ; + + j ) {
if ( adev - > vcn . harvest_config & ( 1 < < j ) )
continue ;
rings [ num_rings + + ] = & adev - > vcn . inst [ j ] . ring_dec ;
}
2018-08-01 16:00:52 +02:00
break ;
case AMDGPU_HW_IP_VCN_ENC :
2019-07-01 19:12:14 -04:00
for ( j = 0 ; j < adev - > vcn . num_vcn_inst ; + + j ) {
if ( adev - > vcn . harvest_config & ( 1 < < j ) )
continue ;
for ( k = 0 ; k < adev - > vcn . num_enc_rings ; + + k )
rings [ num_rings + + ] = & adev - > vcn . inst [ j ] . ring_enc [ k ] ;
}
2018-08-01 16:00:52 +02:00
break ;
case AMDGPU_HW_IP_VCN_JPEG :
2019-07-01 19:12:14 -04:00
for ( j = 0 ; j < adev - > vcn . num_vcn_inst ; + + j ) {
if ( adev - > vcn . harvest_config & ( 1 < < j ) )
continue ;
rings [ num_rings + + ] = & adev - > vcn . inst [ j ] . ring_jpeg ;
}
2018-08-01 16:00:52 +02:00
break ;
}
2017-05-11 13:36:33 +08:00
2019-01-30 02:53:20 +01:00
for ( j = 0 ; j < num_rings ; + + j ) {
if ( ! rings [ j ] - > adev )
continue ;
rqs [ num_rqs + + ] = & rings [ j ] - > sched . sched_rq [ priority ] ;
}
2017-05-11 13:36:33 +08:00
2018-08-01 16:00:52 +02:00
for ( j = 0 ; j < amdgpu_ctx_num_entities [ i ] ; + + j )
r = drm_sched_entity_init ( & ctx - > entities [ i ] [ j ] . entity ,
2019-01-30 02:53:20 +01:00
rqs , num_rqs , & ctx - > guilty ) ;
2016-01-15 11:25:00 +08:00
if ( r )
2018-08-01 16:00:52 +02:00
goto error_cleanup_entities ;
2016-01-15 11:25:00 +08:00
}
2015-04-20 16:55:21 -04:00
return 0 ;
2016-10-26 17:07:03 +08:00
2018-08-01 16:00:52 +02:00
error_cleanup_entities :
for ( i = 0 ; i < num_entities ; + + i )
drm_sched_entity_destroy ( & ctx - > entities [ 0 ] [ i ] . entity ) ;
kfree ( ctx - > entities [ 0 ] ) ;
error_free_fences :
2016-10-26 17:07:03 +08:00
kfree ( ctx - > fences ) ;
ctx - > fences = NULL ;
return r ;
2015-04-20 16:55:21 -04:00
}
2018-04-16 10:07:02 +08:00
static void amdgpu_ctx_fini ( struct kref * ref )
2015-04-20 16:55:21 -04:00
{
2018-04-16 10:07:02 +08:00
struct amdgpu_ctx * ctx = container_of ( ref , struct amdgpu_ctx , refcount ) ;
2019-08-13 16:48:28 +08:00
unsigned num_entities = amdgpu_ctx_total_num_entities ( ) ;
2015-08-04 17:51:05 +02:00
struct amdgpu_device * adev = ctx - > adev ;
unsigned i , j ;
2015-11-03 11:07:11 -05:00
if ( ! adev )
return ;
2018-08-01 16:00:52 +02:00
for ( i = 0 ; i < num_entities ; + + i )
2015-12-10 15:45:11 +08:00
for ( j = 0 ; j < amdgpu_sched_jobs ; + + j )
2018-08-01 16:00:52 +02:00
dma_fence_put ( ctx - > entities [ 0 ] [ i ] . fences [ j ] ) ;
2015-12-10 15:45:11 +08:00
kfree ( ctx - > fences ) ;
2018-08-01 16:00:52 +02:00
kfree ( ctx - > entities [ 0 ] ) ;
2015-08-04 17:51:05 +02:00
2017-10-10 16:50:17 -04:00
mutex_destroy ( & ctx - > lock ) ;
2018-04-16 10:07:02 +08:00
kfree ( ctx ) ;
2015-08-04 17:51:05 +02:00
}
2018-07-19 14:22:25 +02:00
int amdgpu_ctx_get_entity ( struct amdgpu_ctx * ctx , u32 hw_ip , u32 instance ,
u32 ring , struct drm_sched_entity * * entity )
2018-07-16 15:19:20 +02:00
{
2018-08-01 16:00:52 +02:00
if ( hw_ip > = AMDGPU_HW_IP_NUM ) {
DRM_ERROR ( " unknown HW IP type: %d \n " , hw_ip ) ;
return - EINVAL ;
}
2018-07-16 15:19:20 +02:00
/* Right now all IPs have only one instance - multiple rings. */
if ( instance ! = 0 ) {
DRM_DEBUG ( " invalid ip instance: %d \n " , instance ) ;
return - EINVAL ;
}
2018-08-01 16:00:52 +02:00
if ( ring > = amdgpu_ctx_num_entities [ hw_ip ] ) {
DRM_DEBUG ( " invalid ring: %d %d \n " , hw_ip , ring ) ;
2018-07-16 15:19:20 +02:00
return - EINVAL ;
}
2018-08-01 16:00:52 +02:00
* entity = & ctx - > entities [ hw_ip ] [ ring ] . entity ;
2018-07-16 15:19:20 +02:00
return 0 ;
}
2015-08-04 17:51:05 +02:00
static int amdgpu_ctx_alloc ( struct amdgpu_device * adev ,
struct amdgpu_fpriv * fpriv ,
2016-12-22 17:06:50 -05:00
struct drm_file * filp ,
2017-12-06 17:49:39 +01:00
enum drm_sched_priority priority ,
2015-08-04 17:51:05 +02:00
uint32_t * id )
{
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
2015-04-20 16:55:21 -04:00
struct amdgpu_ctx * ctx ;
2015-08-04 17:51:05 +02:00
int r ;
2015-04-20 16:55:21 -04:00
2015-08-04 17:51:05 +02:00
ctx = kmalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx )
return - ENOMEM ;
mutex_lock ( & mgr - > lock ) ;
2018-10-24 16:10:33 +08:00
r = idr_alloc ( & mgr - > ctx_handles , ctx , 1 , AMDGPU_VM_MAX_NUM_CTX , GFP_KERNEL ) ;
2015-08-04 17:51:05 +02:00
if ( r < 0 ) {
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-08-04 17:51:05 +02:00
kfree ( ctx ) ;
return r ;
}
2016-12-22 17:06:50 -05:00
2015-08-04 17:51:05 +02:00
* id = ( uint32_t ) r ;
2016-12-22 17:06:50 -05:00
r = amdgpu_ctx_init ( adev , priority , filp , ctx ) ;
2015-12-10 15:50:02 +08:00
if ( r ) {
idr_remove ( & mgr - > ctx_handles , * id ) ;
* id = 0 ;
kfree ( ctx ) ;
}
2015-08-04 17:51:05 +02:00
mutex_unlock ( & mgr - > lock ) ;
return r ;
}
static void amdgpu_ctx_do_release ( struct kref * ref )
{
struct amdgpu_ctx * ctx ;
2018-08-01 16:00:52 +02:00
unsigned num_entities ;
2018-04-16 10:07:02 +08:00
u32 i ;
2015-08-04 17:51:05 +02:00
ctx = container_of ( ref , struct amdgpu_ctx , refcount ) ;
2018-08-01 16:00:52 +02:00
num_entities = 0 ;
for ( i = 0 ; i < AMDGPU_HW_IP_NUM ; i + + )
num_entities + = amdgpu_ctx_num_entities [ i ] ;
2018-05-15 14:12:21 -04:00
2018-08-01 16:00:52 +02:00
for ( i = 0 ; i < num_entities ; i + + )
drm_sched_entity_destroy ( & ctx - > entities [ 0 ] [ i ] . entity ) ;
2015-08-04 17:51:05 +02:00
2018-04-16 10:07:02 +08:00
amdgpu_ctx_fini ( ref ) ;
2015-08-04 17:51:05 +02:00
}
static int amdgpu_ctx_free ( struct amdgpu_fpriv * fpriv , uint32_t id )
{
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
struct amdgpu_ctx * ctx ;
mutex_lock ( & mgr - > lock ) ;
2016-12-22 13:30:22 -05:00
ctx = idr_remove ( & mgr - > ctx_handles , id ) ;
if ( ctx )
2015-07-06 13:42:58 +08:00
kref_put ( & ctx - > refcount , amdgpu_ctx_do_release ) ;
2015-08-04 17:51:05 +02:00
mutex_unlock ( & mgr - > lock ) ;
2016-12-22 13:30:22 -05:00
return ctx ? 0 : - EINVAL ;
2015-04-20 16:55:21 -04:00
}
2015-05-05 21:13:49 +02:00
static int amdgpu_ctx_query ( struct amdgpu_device * adev ,
struct amdgpu_fpriv * fpriv , uint32_t id ,
union drm_amdgpu_ctx_out * out )
2015-04-20 16:55:21 -04:00
{
struct amdgpu_ctx * ctx ;
2015-07-06 13:42:58 +08:00
struct amdgpu_ctx_mgr * mgr ;
2015-05-05 21:13:49 +02:00
unsigned reset_counter ;
2015-04-20 16:55:21 -04:00
2015-07-06 13:42:58 +08:00
if ( ! fpriv )
return - EINVAL ;
mgr = & fpriv - > ctx_mgr ;
2015-05-05 20:52:00 +02:00
mutex_lock ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
2015-05-05 21:13:49 +02:00
if ( ! ctx ) {
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-05-05 21:13:49 +02:00
return - EINVAL ;
2015-04-20 16:55:21 -04:00
}
2015-05-05 21:13:49 +02:00
/* TODO: these two are always zero */
2015-08-16 22:48:26 -04:00
out - > state . flags = 0x0 ;
out - > state . hangs = 0x0 ;
2015-05-05 21:13:49 +02:00
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read ( & adev - > gpu_reset_counter ) ;
/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
2017-10-17 14:39:23 +08:00
if ( ctx - > reset_counter_query = = reset_counter )
2015-05-05 21:13:49 +02:00
out - > state . reset_status = AMDGPU_CTX_NO_RESET ;
else
out - > state . reset_status = AMDGPU_CTX_UNKNOWN_RESET ;
2017-10-17 14:39:23 +08:00
ctx - > reset_counter_query = reset_counter ;
2015-05-05 21:13:49 +02:00
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-05-05 21:13:49 +02:00
return 0 ;
2015-04-20 16:55:21 -04:00
}
2017-10-17 14:58:01 +08:00
static int amdgpu_ctx_query2 ( struct amdgpu_device * adev ,
struct amdgpu_fpriv * fpriv , uint32_t id ,
union drm_amdgpu_ctx_out * out )
{
struct amdgpu_ctx * ctx ;
struct amdgpu_ctx_mgr * mgr ;
2018-12-17 14:31:12 +08:00
uint32_t ras_counter ;
2017-10-17 14:58:01 +08:00
if ( ! fpriv )
return - EINVAL ;
mgr = & fpriv - > ctx_mgr ;
mutex_lock ( & mgr - > lock ) ;
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
if ( ! ctx ) {
mutex_unlock ( & mgr - > lock ) ;
return - EINVAL ;
}
out - > state . flags = 0x0 ;
out - > state . hangs = 0x0 ;
if ( ctx - > reset_counter ! = atomic_read ( & adev - > gpu_reset_counter ) )
out - > state . flags | = AMDGPU_CTX_QUERY2_FLAGS_RESET ;
if ( ctx - > vram_lost_counter ! = atomic_read ( & adev - > vram_lost_counter ) )
out - > state . flags | = AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST ;
if ( atomic_read ( & ctx - > guilty ) )
out - > state . flags | = AMDGPU_CTX_QUERY2_FLAGS_GUILTY ;
2018-12-17 14:31:12 +08:00
/*query ue count*/
ras_counter = amdgpu_ras_query_error_count ( adev , false ) ;
/*ras counter is monotonic increasing*/
if ( ras_counter ! = ctx - > ras_counter_ue ) {
out - > state . flags | = AMDGPU_CTX_QUERY2_FLAGS_RAS_UE ;
ctx - > ras_counter_ue = ras_counter ;
}
/*query ce count*/
ras_counter = amdgpu_ras_query_error_count ( adev , true ) ;
if ( ras_counter ! = ctx - > ras_counter_ce ) {
out - > state . flags | = AMDGPU_CTX_QUERY2_FLAGS_RAS_CE ;
ctx - > ras_counter_ce = ras_counter ;
}
2017-10-17 14:58:01 +08:00
mutex_unlock ( & mgr - > lock ) ;
return 0 ;
}
2015-04-20 16:55:21 -04:00
int amdgpu_ctx_ioctl ( struct drm_device * dev , void * data ,
2015-05-05 21:13:49 +02:00
struct drm_file * filp )
2015-04-20 16:55:21 -04:00
{
int r ;
uint32_t id ;
2017-12-06 17:49:39 +01:00
enum drm_sched_priority priority ;
2015-04-20 16:55:21 -04:00
union drm_amdgpu_ctx * args = data ;
struct amdgpu_device * adev = dev - > dev_private ;
struct amdgpu_fpriv * fpriv = filp - > driver_priv ;
r = 0 ;
id = args - > in . ctx_id ;
2016-12-22 17:06:50 -05:00
priority = amdgpu_to_sched_priority ( args - > in . priority ) ;
2017-05-24 17:00:10 -04:00
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
2017-12-06 17:49:39 +01:00
if ( priority = = DRM_SCHED_PRIORITY_INVALID )
priority = DRM_SCHED_PRIORITY_NORMAL ;
2015-04-20 16:55:21 -04:00
switch ( args - > in . op ) {
2016-02-11 10:20:53 +01:00
case AMDGPU_CTX_OP_ALLOC_CTX :
2016-12-22 17:06:50 -05:00
r = amdgpu_ctx_alloc ( adev , fpriv , filp , priority , & id ) ;
2016-02-11 10:20:53 +01:00
args - > out . alloc . ctx_id = id ;
break ;
case AMDGPU_CTX_OP_FREE_CTX :
r = amdgpu_ctx_free ( fpriv , id ) ;
break ;
case AMDGPU_CTX_OP_QUERY_STATE :
r = amdgpu_ctx_query ( adev , fpriv , id , & args - > out ) ;
break ;
2017-10-17 14:58:01 +08:00
case AMDGPU_CTX_OP_QUERY_STATE2 :
r = amdgpu_ctx_query2 ( adev , fpriv , id , & args - > out ) ;
break ;
2016-02-11 10:20:53 +01:00
default :
return - EINVAL ;
2015-04-20 16:55:21 -04:00
}
return r ;
}
2015-05-08 17:29:40 +08:00
struct amdgpu_ctx * amdgpu_ctx_get ( struct amdgpu_fpriv * fpriv , uint32_t id )
{
struct amdgpu_ctx * ctx ;
2015-07-06 13:42:58 +08:00
struct amdgpu_ctx_mgr * mgr ;
if ( ! fpriv )
return NULL ;
mgr = & fpriv - > ctx_mgr ;
2015-05-08 17:29:40 +08:00
mutex_lock ( & mgr - > lock ) ;
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
if ( ctx )
kref_get ( & ctx - > refcount ) ;
mutex_unlock ( & mgr - > lock ) ;
return ctx ;
}
int amdgpu_ctx_put ( struct amdgpu_ctx * ctx )
{
if ( ctx = = NULL )
return - EINVAL ;
kref_put ( & ctx - > refcount , amdgpu_ctx_do_release ) ;
return 0 ;
}
2015-07-07 17:24:49 +02:00
2018-08-24 14:23:33 +02:00
void amdgpu_ctx_add_fence ( struct amdgpu_ctx * ctx ,
struct drm_sched_entity * entity ,
struct dma_fence * fence , uint64_t * handle )
2015-07-07 17:24:49 +02:00
{
2018-08-01 16:00:52 +02:00
struct amdgpu_ctx_entity * centity = to_amdgpu_ctx_entity ( entity ) ;
uint64_t seq = centity - > sequence ;
2016-10-25 13:00:45 +01:00
struct dma_fence * other = NULL ;
2018-07-19 14:22:25 +02:00
unsigned idx = 0 ;
2015-07-07 17:24:49 +02:00
2015-12-10 17:34:33 +08:00
idx = seq & ( amdgpu_sched_jobs - 1 ) ;
2018-08-01 16:00:52 +02:00
other = centity - > fences [ idx ] ;
2017-10-10 16:50:17 -04:00
if ( other )
BUG_ON ( ! dma_fence_is_signaled ( other ) ) ;
2015-07-07 17:24:49 +02:00
2016-10-25 13:00:45 +01:00
dma_fence_get ( fence ) ;
2015-07-07 17:24:49 +02:00
spin_lock ( & ctx - > ring_lock ) ;
2018-08-01 16:00:52 +02:00
centity - > fences [ idx ] = fence ;
centity - > sequence + + ;
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
2016-10-25 13:00:45 +01:00
dma_fence_put ( other ) ;
2018-07-19 14:22:25 +02:00
if ( handle )
* handle = seq ;
2015-07-07 17:24:49 +02:00
}
2016-10-25 13:00:45 +01:00
struct dma_fence * amdgpu_ctx_get_fence ( struct amdgpu_ctx * ctx ,
2018-07-19 14:22:25 +02:00
struct drm_sched_entity * entity ,
uint64_t seq )
2015-07-07 17:24:49 +02:00
{
2018-08-01 16:00:52 +02:00
struct amdgpu_ctx_entity * centity = to_amdgpu_ctx_entity ( entity ) ;
2016-10-25 13:00:45 +01:00
struct dma_fence * fence ;
2015-07-07 17:24:49 +02:00
spin_lock ( & ctx - > ring_lock ) ;
2015-07-21 15:13:53 +08:00
2017-04-07 18:39:07 +08:00
if ( seq = = ~ 0ull )
2018-08-01 16:00:52 +02:00
seq = centity - > sequence - 1 ;
2017-04-07 18:39:07 +08:00
2018-08-01 16:00:52 +02:00
if ( seq > = centity - > sequence ) {
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
return ERR_PTR ( - EINVAL ) ;
}
2015-07-21 15:13:53 +08:00
2018-08-01 16:00:52 +02:00
if ( seq + amdgpu_sched_jobs < centity - > sequence ) {
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
return NULL ;
}
2018-08-01 16:00:52 +02:00
fence = dma_fence_get ( centity - > fences [ seq & ( amdgpu_sched_jobs - 1 ) ] ) ;
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
return fence ;
}
2015-08-04 16:20:31 +02:00
2017-06-06 20:20:38 -04:00
void amdgpu_ctx_priority_override ( struct amdgpu_ctx * ctx ,
2017-12-06 17:49:39 +01:00
enum drm_sched_priority priority )
2017-06-06 20:20:38 -04:00
{
2019-08-13 16:48:28 +08:00
unsigned num_entities = amdgpu_ctx_total_num_entities ( ) ;
2017-12-06 17:49:39 +01:00
enum drm_sched_priority ctx_prio ;
2018-08-01 16:00:52 +02:00
unsigned i ;
2017-06-06 20:20:38 -04:00
ctx - > override_priority = priority ;
2017-12-06 17:49:39 +01:00
ctx_prio = ( ctx - > override_priority = = DRM_SCHED_PRIORITY_UNSET ) ?
2017-06-06 20:20:38 -04:00
ctx - > init_priority : ctx - > override_priority ;
2018-08-01 16:00:52 +02:00
for ( i = 0 ; i < num_entities ; i + + ) {
struct drm_sched_entity * entity = & ctx - > entities [ 0 ] [ i ] . entity ;
2017-06-06 20:20:38 -04:00
2018-08-01 16:22:39 +02:00
drm_sched_entity_set_priority ( entity , ctx_prio ) ;
2017-06-06 20:20:38 -04:00
}
}
2018-07-19 14:22:25 +02:00
int amdgpu_ctx_wait_prev_fence ( struct amdgpu_ctx * ctx ,
struct drm_sched_entity * entity )
2017-10-10 16:50:17 -04:00
{
2018-08-01 16:00:52 +02:00
struct amdgpu_ctx_entity * centity = to_amdgpu_ctx_entity ( entity ) ;
unsigned idx = centity - > sequence & ( amdgpu_sched_jobs - 1 ) ;
struct dma_fence * other = centity - > fences [ idx ] ;
2017-10-10 16:50:17 -04:00
if ( other ) {
signed long r ;
2018-04-30 10:04:42 -04:00
r = dma_fence_wait ( other , true ) ;
2017-10-10 16:50:17 -04:00
if ( r < 0 ) {
2018-04-30 10:04:42 -04:00
if ( r ! = - ERESTARTSYS )
DRM_ERROR ( " Error (%ld) waiting for fence! \n " , r ) ;
2017-10-10 16:50:17 -04:00
return r ;
}
}
return 0 ;
}
2015-08-04 16:20:31 +02:00
void amdgpu_ctx_mgr_init ( struct amdgpu_ctx_mgr * mgr )
{
mutex_init ( & mgr - > lock ) ;
idr_init ( & mgr - > ctx_handles ) ;
}
2019-01-10 16:48:23 +01:00
long amdgpu_ctx_mgr_entity_flush ( struct amdgpu_ctx_mgr * mgr , long timeout )
2018-04-16 10:07:02 +08:00
{
2019-08-13 16:48:28 +08:00
unsigned num_entities = amdgpu_ctx_total_num_entities ( ) ;
2018-04-16 10:07:02 +08:00
struct amdgpu_ctx * ctx ;
struct idr * idp ;
uint32_t id , i ;
idp = & mgr - > ctx_handles ;
2018-05-30 15:28:52 -04:00
mutex_lock ( & mgr - > lock ) ;
2018-04-16 10:07:02 +08:00
idr_for_each_entry ( idp , ctx , id ) {
2018-08-01 16:00:52 +02:00
for ( i = 0 ; i < num_entities ; i + + ) {
struct drm_sched_entity * entity ;
2018-05-15 14:12:21 -04:00
2018-08-01 16:00:52 +02:00
entity = & ctx - > entities [ 0 ] [ i ] . entity ;
2019-01-10 16:48:23 +01:00
timeout = drm_sched_entity_flush ( entity , timeout ) ;
2018-05-15 14:12:21 -04:00
}
2018-04-16 10:07:02 +08:00
}
2018-05-30 15:28:52 -04:00
mutex_unlock ( & mgr - > lock ) ;
2019-01-10 16:48:23 +01:00
return timeout ;
2018-04-16 10:07:02 +08:00
}
2018-06-05 12:56:26 -04:00
void amdgpu_ctx_mgr_entity_fini ( struct amdgpu_ctx_mgr * mgr )
2018-04-16 10:07:02 +08:00
{
2019-08-13 16:48:28 +08:00
unsigned num_entities = amdgpu_ctx_total_num_entities ( ) ;
2018-04-16 10:07:02 +08:00
struct amdgpu_ctx * ctx ;
struct idr * idp ;
uint32_t id , i ;
idp = & mgr - > ctx_handles ;
idr_for_each_entry ( idp , ctx , id ) {
2018-08-01 16:00:52 +02:00
if ( kref_read ( & ctx - > refcount ) ! = 1 ) {
DRM_ERROR ( " ctx %p is still alive \n " , ctx ) ;
continue ;
2018-05-15 14:12:21 -04:00
}
2018-08-01 16:00:52 +02:00
for ( i = 0 ; i < num_entities ; i + + )
drm_sched_entity_fini ( & ctx - > entities [ 0 ] [ i ] . entity ) ;
2018-04-16 10:07:02 +08:00
}
}
2015-08-04 16:20:31 +02:00
void amdgpu_ctx_mgr_fini ( struct amdgpu_ctx_mgr * mgr )
{
struct amdgpu_ctx * ctx ;
struct idr * idp ;
uint32_t id ;
2018-06-05 12:56:26 -04:00
amdgpu_ctx_mgr_entity_fini ( mgr ) ;
2018-04-16 10:07:02 +08:00
2015-08-04 16:20:31 +02:00
idp = & mgr - > ctx_handles ;
idr_for_each_entry ( idp , ctx , id ) {
2018-04-16 10:07:02 +08:00
if ( kref_put ( & ctx - > refcount , amdgpu_ctx_fini ) ! = 1 )
2015-08-04 16:20:31 +02:00
DRM_ERROR ( " ctx %p is still alive \n " , ctx ) ;
}
idr_destroy ( & mgr - > ctx_handles ) ;
mutex_destroy ( & mgr - > lock ) ;
}