2015-04-20 16:55:21 -04:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : monk liu < monk . liu @ amd . com >
*/
2016-12-22 17:06:50 -05:00
# include <drm/drm_auth.h>
2015-04-20 16:55:21 -04:00
# include "amdgpu.h"
2017-06-26 16:17:13 -04:00
# include "amdgpu_sched.h"
2018-12-17 14:31:12 +08:00
# include "amdgpu_ras.h"
2020-04-01 11:46:57 +02:00
# include <linux/nospec.h>
2015-04-20 16:55:21 -04:00
2018-08-01 16:00:52 +02:00
# define to_amdgpu_ctx_entity(e) \
container_of ( ( e ) , struct amdgpu_ctx_entity , entity )
const unsigned int amdgpu_ctx_num_entities [ AMDGPU_HW_IP_NUM ] = {
[ AMDGPU_HW_IP_GFX ] = 1 ,
[ AMDGPU_HW_IP_COMPUTE ] = 4 ,
[ AMDGPU_HW_IP_DMA ] = 2 ,
[ AMDGPU_HW_IP_UVD ] = 1 ,
[ AMDGPU_HW_IP_VCE ] = 1 ,
[ AMDGPU_HW_IP_UVD_ENC ] = 1 ,
[ AMDGPU_HW_IP_VCN_DEC ] = 1 ,
[ AMDGPU_HW_IP_VCN_ENC ] = 1 ,
2018-11-27 11:41:27 -05:00
[ AMDGPU_HW_IP_VCN_JPEG ] = 1 ,
2018-08-01 16:00:52 +02:00
} ;
2016-12-22 17:06:50 -05:00
static int amdgpu_ctx_priority_permit ( struct drm_file * filp ,
2017-12-06 17:49:39 +01:00
enum drm_sched_priority priority )
2016-12-22 17:06:50 -05:00
{
2020-08-11 19:59:58 -04:00
if ( priority < 0 | | priority > = DRM_SCHED_PRIORITY_COUNT )
2020-01-21 15:53:53 +01:00
return - EINVAL ;
2016-12-22 17:06:50 -05:00
/* NORMAL and below are accessible by everyone */
2017-12-06 17:49:39 +01:00
if ( priority < = DRM_SCHED_PRIORITY_NORMAL )
2016-12-22 17:06:50 -05:00
return 0 ;
if ( capable ( CAP_SYS_NICE ) )
return 0 ;
if ( drm_is_current_master ( filp ) )
return 0 ;
return - EACCES ;
}
2020-02-27 13:59:08 +01:00
static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio ( enum drm_sched_priority prio )
{
switch ( prio ) {
2020-08-11 19:59:58 -04:00
case DRM_SCHED_PRIORITY_HIGH :
2020-02-27 13:59:08 +01:00
case DRM_SCHED_PRIORITY_KERNEL :
return AMDGPU_GFX_PIPE_PRIO_HIGH ;
default :
return AMDGPU_GFX_PIPE_PRIO_NORMAL ;
}
}
2020-04-01 11:46:57 +02:00
static unsigned int amdgpu_ctx_prio_sched_to_hw ( struct amdgpu_device * adev ,
enum drm_sched_priority prio ,
u32 hw_ip )
{
unsigned int hw_prio ;
hw_prio = ( hw_ip = = AMDGPU_HW_IP_COMPUTE ) ?
amdgpu_ctx_sched_prio_to_compute_prio ( prio ) :
AMDGPU_RING_PRIO_DEFAULT ;
hw_ip = array_index_nospec ( hw_ip , AMDGPU_HW_IP_NUM ) ;
if ( adev - > gpu_sched [ hw_ip ] [ hw_prio ] . num_scheds = = 0 )
hw_prio = AMDGPU_RING_PRIO_DEFAULT ;
return hw_prio ;
}
static int amdgpu_ctx_init_entity ( struct amdgpu_ctx * ctx , u32 hw_ip ,
const u32 ring )
2015-04-20 16:55:21 -04:00
{
2020-01-21 15:53:53 +01:00
struct amdgpu_device * adev = ctx - > adev ;
struct amdgpu_ctx_entity * entity ;
struct drm_gpu_scheduler * * scheds = NULL , * sched = NULL ;
unsigned num_scheds = 0 ;
2020-04-01 11:46:57 +02:00
unsigned int hw_prio ;
2020-01-21 15:53:53 +01:00
enum drm_sched_priority priority ;
2015-08-04 17:51:05 +02:00
int r ;
2015-04-20 16:55:21 -04:00
2020-01-21 15:53:53 +01:00
entity = kcalloc ( 1 , offsetof ( typeof ( * entity ) , fences [ amdgpu_sched_jobs ] ) ,
GFP_KERNEL ) ;
if ( ! entity )
return - ENOMEM ;
2017-10-10 16:50:17 -04:00
2020-01-21 15:53:53 +01:00
entity - > sequence = 1 ;
priority = ( ctx - > override_priority = = DRM_SCHED_PRIORITY_UNSET ) ?
ctx - > init_priority : ctx - > override_priority ;
2020-04-01 11:46:57 +02:00
hw_prio = amdgpu_ctx_prio_sched_to_hw ( adev , priority , hw_ip ) ;
hw_ip = array_index_nospec ( hw_ip , AMDGPU_HW_IP_NUM ) ;
scheds = adev - > gpu_sched [ hw_ip ] [ hw_prio ] . sched ;
num_scheds = adev - > gpu_sched [ hw_ip ] [ hw_prio ] . num_scheds ;
if ( hw_ip = = AMDGPU_HW_IP_VCN_ENC | | hw_ip = = AMDGPU_HW_IP_VCN_DEC ) {
sched = drm_sched_pick_best ( scheds , num_scheds ) ;
2020-03-13 15:26:54 +01:00
scheds = & sched ;
num_scheds = 1 ;
2016-01-15 11:25:00 +08:00
}
2020-01-21 15:53:53 +01:00
r = drm_sched_entity_init ( & entity - > entity , priority , scheds , num_scheds ,
& ctx - > guilty ) ;
if ( r )
goto error_free_entity ;
ctx - > entities [ hw_ip ] [ ring ] = entity ;
2015-04-20 16:55:21 -04:00
return 0 ;
2016-10-26 17:07:03 +08:00
2020-01-21 15:53:53 +01:00
error_free_entity :
kfree ( entity ) ;
2018-08-01 16:00:52 +02:00
2020-01-21 15:53:53 +01:00
return r ;
}
2020-01-21 13:29:20 +01:00
2020-01-21 15:53:53 +01:00
static int amdgpu_ctx_init ( struct amdgpu_device * adev ,
enum drm_sched_priority priority ,
struct drm_file * filp ,
struct amdgpu_ctx * ctx )
{
int r ;
2020-01-21 13:29:20 +01:00
2020-01-21 15:53:53 +01:00
r = amdgpu_ctx_priority_permit ( filp , priority ) ;
if ( r )
return r ;
memset ( ctx , 0 , sizeof ( * ctx ) ) ;
ctx - > adev = adev ;
kref_init ( & ctx - > refcount ) ;
spin_lock_init ( & ctx - > ring_lock ) ;
mutex_init ( & ctx - > lock ) ;
ctx - > reset_counter = atomic_read ( & adev - > gpu_reset_counter ) ;
ctx - > reset_counter_query = ctx - > reset_counter ;
ctx - > vram_lost_counter = atomic_read ( & adev - > vram_lost_counter ) ;
ctx - > init_priority = priority ;
ctx - > override_priority = DRM_SCHED_PRIORITY_UNSET ;
return 0 ;
}
static void amdgpu_ctx_fini_entity ( struct amdgpu_ctx_entity * entity )
{
int i ;
if ( ! entity )
return ;
for ( i = 0 ; i < amdgpu_sched_jobs ; + + i )
dma_fence_put ( entity - > fences [ i ] ) ;
kfree ( entity ) ;
2015-04-20 16:55:21 -04:00
}
2018-04-16 10:07:02 +08:00
static void amdgpu_ctx_fini ( struct kref * ref )
2015-04-20 16:55:21 -04:00
{
2018-04-16 10:07:02 +08:00
struct amdgpu_ctx * ctx = container_of ( ref , struct amdgpu_ctx , refcount ) ;
2015-08-04 17:51:05 +02:00
struct amdgpu_device * adev = ctx - > adev ;
unsigned i , j ;
2015-11-03 11:07:11 -05:00
if ( ! adev )
return ;
2020-01-21 15:53:53 +01:00
for ( i = 0 ; i < AMDGPU_HW_IP_NUM ; + + i ) {
for ( j = 0 ; j < AMDGPU_MAX_ENTITY_NUM ; + + j ) {
amdgpu_ctx_fini_entity ( ctx - > entities [ i ] [ j ] ) ;
ctx - > entities [ i ] [ j ] = NULL ;
}
2020-01-21 13:29:20 +01:00
}
2017-10-10 16:50:17 -04:00
mutex_destroy ( & ctx - > lock ) ;
2018-04-16 10:07:02 +08:00
kfree ( ctx ) ;
2015-08-04 17:51:05 +02:00
}
2018-07-19 14:22:25 +02:00
int amdgpu_ctx_get_entity ( struct amdgpu_ctx * ctx , u32 hw_ip , u32 instance ,
u32 ring , struct drm_sched_entity * * entity )
2018-07-16 15:19:20 +02:00
{
2020-01-21 15:53:53 +01:00
int r ;
2018-08-01 16:00:52 +02:00
if ( hw_ip > = AMDGPU_HW_IP_NUM ) {
DRM_ERROR ( " unknown HW IP type: %d \n " , hw_ip ) ;
return - EINVAL ;
}
2018-07-16 15:19:20 +02:00
/* Right now all IPs have only one instance - multiple rings. */
if ( instance ! = 0 ) {
DRM_DEBUG ( " invalid ip instance: %d \n " , instance ) ;
return - EINVAL ;
}
2018-08-01 16:00:52 +02:00
if ( ring > = amdgpu_ctx_num_entities [ hw_ip ] ) {
DRM_DEBUG ( " invalid ring: %d %d \n " , hw_ip , ring ) ;
2018-07-16 15:19:20 +02:00
return - EINVAL ;
}
2020-01-21 15:53:53 +01:00
if ( ctx - > entities [ hw_ip ] [ ring ] = = NULL ) {
r = amdgpu_ctx_init_entity ( ctx , hw_ip , ring ) ;
if ( r )
return r ;
}
* entity = & ctx - > entities [ hw_ip ] [ ring ] - > entity ;
2018-07-16 15:19:20 +02:00
return 0 ;
}
2015-08-04 17:51:05 +02:00
static int amdgpu_ctx_alloc ( struct amdgpu_device * adev ,
struct amdgpu_fpriv * fpriv ,
2016-12-22 17:06:50 -05:00
struct drm_file * filp ,
2017-12-06 17:49:39 +01:00
enum drm_sched_priority priority ,
2015-08-04 17:51:05 +02:00
uint32_t * id )
{
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
2015-04-20 16:55:21 -04:00
struct amdgpu_ctx * ctx ;
2015-08-04 17:51:05 +02:00
int r ;
2015-04-20 16:55:21 -04:00
2015-08-04 17:51:05 +02:00
ctx = kmalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx )
return - ENOMEM ;
mutex_lock ( & mgr - > lock ) ;
2018-10-24 16:10:33 +08:00
r = idr_alloc ( & mgr - > ctx_handles , ctx , 1 , AMDGPU_VM_MAX_NUM_CTX , GFP_KERNEL ) ;
2015-08-04 17:51:05 +02:00
if ( r < 0 ) {
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-08-04 17:51:05 +02:00
kfree ( ctx ) ;
return r ;
}
2016-12-22 17:06:50 -05:00
2015-08-04 17:51:05 +02:00
* id = ( uint32_t ) r ;
2016-12-22 17:06:50 -05:00
r = amdgpu_ctx_init ( adev , priority , filp , ctx ) ;
2015-12-10 15:50:02 +08:00
if ( r ) {
idr_remove ( & mgr - > ctx_handles , * id ) ;
* id = 0 ;
kfree ( ctx ) ;
}
2015-08-04 17:51:05 +02:00
mutex_unlock ( & mgr - > lock ) ;
return r ;
}
static void amdgpu_ctx_do_release ( struct kref * ref )
{
struct amdgpu_ctx * ctx ;
2020-01-21 15:53:53 +01:00
u32 i , j ;
2015-08-04 17:51:05 +02:00
ctx = container_of ( ref , struct amdgpu_ctx , refcount ) ;
2020-01-21 15:53:53 +01:00
for ( i = 0 ; i < AMDGPU_HW_IP_NUM ; + + i ) {
for ( j = 0 ; j < amdgpu_ctx_num_entities [ i ] ; + + j ) {
if ( ! ctx - > entities [ i ] [ j ] )
continue ;
2015-08-04 17:51:05 +02:00
2020-01-21 15:53:53 +01:00
drm_sched_entity_destroy ( & ctx - > entities [ i ] [ j ] - > entity ) ;
}
}
2015-08-04 17:51:05 +02:00
2018-04-16 10:07:02 +08:00
amdgpu_ctx_fini ( ref ) ;
2015-08-04 17:51:05 +02:00
}
static int amdgpu_ctx_free ( struct amdgpu_fpriv * fpriv , uint32_t id )
{
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
struct amdgpu_ctx * ctx ;
mutex_lock ( & mgr - > lock ) ;
2016-12-22 13:30:22 -05:00
ctx = idr_remove ( & mgr - > ctx_handles , id ) ;
if ( ctx )
2015-07-06 13:42:58 +08:00
kref_put ( & ctx - > refcount , amdgpu_ctx_do_release ) ;
2015-08-04 17:51:05 +02:00
mutex_unlock ( & mgr - > lock ) ;
2016-12-22 13:30:22 -05:00
return ctx ? 0 : - EINVAL ;
2015-04-20 16:55:21 -04:00
}
2015-05-05 21:13:49 +02:00
static int amdgpu_ctx_query ( struct amdgpu_device * adev ,
struct amdgpu_fpriv * fpriv , uint32_t id ,
union drm_amdgpu_ctx_out * out )
2015-04-20 16:55:21 -04:00
{
struct amdgpu_ctx * ctx ;
2015-07-06 13:42:58 +08:00
struct amdgpu_ctx_mgr * mgr ;
2015-05-05 21:13:49 +02:00
unsigned reset_counter ;
2015-04-20 16:55:21 -04:00
2015-07-06 13:42:58 +08:00
if ( ! fpriv )
return - EINVAL ;
mgr = & fpriv - > ctx_mgr ;
2015-05-05 20:52:00 +02:00
mutex_lock ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
2015-05-05 21:13:49 +02:00
if ( ! ctx ) {
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-05-05 21:13:49 +02:00
return - EINVAL ;
2015-04-20 16:55:21 -04:00
}
2015-05-05 21:13:49 +02:00
/* TODO: these two are always zero */
2015-08-16 22:48:26 -04:00
out - > state . flags = 0x0 ;
out - > state . hangs = 0x0 ;
2015-05-05 21:13:49 +02:00
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read ( & adev - > gpu_reset_counter ) ;
/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
2017-10-17 14:39:23 +08:00
if ( ctx - > reset_counter_query = = reset_counter )
2015-05-05 21:13:49 +02:00
out - > state . reset_status = AMDGPU_CTX_NO_RESET ;
else
out - > state . reset_status = AMDGPU_CTX_UNKNOWN_RESET ;
2017-10-17 14:39:23 +08:00
ctx - > reset_counter_query = reset_counter ;
2015-05-05 21:13:49 +02:00
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-05-05 21:13:49 +02:00
return 0 ;
2015-04-20 16:55:21 -04:00
}
2017-10-17 14:58:01 +08:00
static int amdgpu_ctx_query2 ( struct amdgpu_device * adev ,
struct amdgpu_fpriv * fpriv , uint32_t id ,
union drm_amdgpu_ctx_out * out )
{
struct amdgpu_ctx * ctx ;
struct amdgpu_ctx_mgr * mgr ;
2019-08-16 15:06:52 +08:00
unsigned long ras_counter ;
2017-10-17 14:58:01 +08:00
if ( ! fpriv )
return - EINVAL ;
mgr = & fpriv - > ctx_mgr ;
mutex_lock ( & mgr - > lock ) ;
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
if ( ! ctx ) {
mutex_unlock ( & mgr - > lock ) ;
return - EINVAL ;
}
out - > state . flags = 0x0 ;
out - > state . hangs = 0x0 ;
if ( ctx - > reset_counter ! = atomic_read ( & adev - > gpu_reset_counter ) )
out - > state . flags | = AMDGPU_CTX_QUERY2_FLAGS_RESET ;
if ( ctx - > vram_lost_counter ! = atomic_read ( & adev - > vram_lost_counter ) )
out - > state . flags | = AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST ;
if ( atomic_read ( & ctx - > guilty ) )
out - > state . flags | = AMDGPU_CTX_QUERY2_FLAGS_GUILTY ;
2018-12-17 14:31:12 +08:00
/*query ue count*/
ras_counter = amdgpu_ras_query_error_count ( adev , false ) ;
/*ras counter is monotonic increasing*/
if ( ras_counter ! = ctx - > ras_counter_ue ) {
out - > state . flags | = AMDGPU_CTX_QUERY2_FLAGS_RAS_UE ;
ctx - > ras_counter_ue = ras_counter ;
}
/*query ce count*/
ras_counter = amdgpu_ras_query_error_count ( adev , true ) ;
if ( ras_counter ! = ctx - > ras_counter_ce ) {
out - > state . flags | = AMDGPU_CTX_QUERY2_FLAGS_RAS_CE ;
ctx - > ras_counter_ce = ras_counter ;
}
2017-10-17 14:58:01 +08:00
mutex_unlock ( & mgr - > lock ) ;
return 0 ;
}
2015-04-20 16:55:21 -04:00
int amdgpu_ctx_ioctl ( struct drm_device * dev , void * data ,
2015-05-05 21:13:49 +02:00
struct drm_file * filp )
2015-04-20 16:55:21 -04:00
{
int r ;
uint32_t id ;
2017-12-06 17:49:39 +01:00
enum drm_sched_priority priority ;
2015-04-20 16:55:21 -04:00
union drm_amdgpu_ctx * args = data ;
2020-08-24 12:27:47 -04:00
struct amdgpu_device * adev = drm_to_adev ( dev ) ;
2015-04-20 16:55:21 -04:00
struct amdgpu_fpriv * fpriv = filp - > driver_priv ;
id = args - > in . ctx_id ;
2020-08-11 20:56:58 -04:00
r = amdgpu_to_sched_priority ( args - > in . priority , & priority ) ;
2016-12-22 17:06:50 -05:00
2017-05-24 17:00:10 -04:00
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
2020-08-11 20:56:58 -04:00
if ( r = = - EINVAL )
2017-12-06 17:49:39 +01:00
priority = DRM_SCHED_PRIORITY_NORMAL ;
2015-04-20 16:55:21 -04:00
switch ( args - > in . op ) {
2016-02-11 10:20:53 +01:00
case AMDGPU_CTX_OP_ALLOC_CTX :
2016-12-22 17:06:50 -05:00
r = amdgpu_ctx_alloc ( adev , fpriv , filp , priority , & id ) ;
2016-02-11 10:20:53 +01:00
args - > out . alloc . ctx_id = id ;
break ;
case AMDGPU_CTX_OP_FREE_CTX :
r = amdgpu_ctx_free ( fpriv , id ) ;
break ;
case AMDGPU_CTX_OP_QUERY_STATE :
r = amdgpu_ctx_query ( adev , fpriv , id , & args - > out ) ;
break ;
2017-10-17 14:58:01 +08:00
case AMDGPU_CTX_OP_QUERY_STATE2 :
r = amdgpu_ctx_query2 ( adev , fpriv , id , & args - > out ) ;
break ;
2016-02-11 10:20:53 +01:00
default :
return - EINVAL ;
2015-04-20 16:55:21 -04:00
}
return r ;
}
2015-05-08 17:29:40 +08:00
struct amdgpu_ctx * amdgpu_ctx_get ( struct amdgpu_fpriv * fpriv , uint32_t id )
{
struct amdgpu_ctx * ctx ;
2015-07-06 13:42:58 +08:00
struct amdgpu_ctx_mgr * mgr ;
if ( ! fpriv )
return NULL ;
mgr = & fpriv - > ctx_mgr ;
2015-05-08 17:29:40 +08:00
mutex_lock ( & mgr - > lock ) ;
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
if ( ctx )
kref_get ( & ctx - > refcount ) ;
mutex_unlock ( & mgr - > lock ) ;
return ctx ;
}
int amdgpu_ctx_put ( struct amdgpu_ctx * ctx )
{
if ( ctx = = NULL )
return - EINVAL ;
kref_put ( & ctx - > refcount , amdgpu_ctx_do_release ) ;
return 0 ;
}
2015-07-07 17:24:49 +02:00
2018-08-24 14:23:33 +02:00
void amdgpu_ctx_add_fence ( struct amdgpu_ctx * ctx ,
struct drm_sched_entity * entity ,
struct dma_fence * fence , uint64_t * handle )
2015-07-07 17:24:49 +02:00
{
2018-08-01 16:00:52 +02:00
struct amdgpu_ctx_entity * centity = to_amdgpu_ctx_entity ( entity ) ;
uint64_t seq = centity - > sequence ;
2016-10-25 13:00:45 +01:00
struct dma_fence * other = NULL ;
2018-07-19 14:22:25 +02:00
unsigned idx = 0 ;
2015-07-07 17:24:49 +02:00
2015-12-10 17:34:33 +08:00
idx = seq & ( amdgpu_sched_jobs - 1 ) ;
2018-08-01 16:00:52 +02:00
other = centity - > fences [ idx ] ;
2017-10-10 16:50:17 -04:00
if ( other )
BUG_ON ( ! dma_fence_is_signaled ( other ) ) ;
2015-07-07 17:24:49 +02:00
2016-10-25 13:00:45 +01:00
dma_fence_get ( fence ) ;
2015-07-07 17:24:49 +02:00
spin_lock ( & ctx - > ring_lock ) ;
2018-08-01 16:00:52 +02:00
centity - > fences [ idx ] = fence ;
centity - > sequence + + ;
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
2016-10-25 13:00:45 +01:00
dma_fence_put ( other ) ;
2018-07-19 14:22:25 +02:00
if ( handle )
* handle = seq ;
2015-07-07 17:24:49 +02:00
}
2016-10-25 13:00:45 +01:00
struct dma_fence * amdgpu_ctx_get_fence ( struct amdgpu_ctx * ctx ,
2018-07-19 14:22:25 +02:00
struct drm_sched_entity * entity ,
uint64_t seq )
2015-07-07 17:24:49 +02:00
{
2018-08-01 16:00:52 +02:00
struct amdgpu_ctx_entity * centity = to_amdgpu_ctx_entity ( entity ) ;
2016-10-25 13:00:45 +01:00
struct dma_fence * fence ;
2015-07-07 17:24:49 +02:00
spin_lock ( & ctx - > ring_lock ) ;
2015-07-21 15:13:53 +08:00
2017-04-07 18:39:07 +08:00
if ( seq = = ~ 0ull )
2018-08-01 16:00:52 +02:00
seq = centity - > sequence - 1 ;
2017-04-07 18:39:07 +08:00
2018-08-01 16:00:52 +02:00
if ( seq > = centity - > sequence ) {
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
return ERR_PTR ( - EINVAL ) ;
}
2015-07-21 15:13:53 +08:00
2018-08-01 16:00:52 +02:00
if ( seq + amdgpu_sched_jobs < centity - > sequence ) {
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
return NULL ;
}
2018-08-01 16:00:52 +02:00
fence = dma_fence_get ( centity - > fences [ seq & ( amdgpu_sched_jobs - 1 ) ] ) ;
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
return fence ;
}
2015-08-04 16:20:31 +02:00
2020-02-27 18:18:22 +01:00
static void amdgpu_ctx_set_entity_priority ( struct amdgpu_ctx * ctx ,
struct amdgpu_ctx_entity * aentity ,
int hw_ip ,
enum drm_sched_priority priority )
{
struct amdgpu_device * adev = ctx - > adev ;
2020-04-01 11:46:57 +02:00
unsigned int hw_prio ;
2020-02-27 18:18:22 +01:00
struct drm_gpu_scheduler * * scheds = NULL ;
unsigned num_scheds ;
/* set sw priority */
drm_sched_entity_set_priority ( & aentity - > entity , priority ) ;
/* set hw priority */
if ( hw_ip = = AMDGPU_HW_IP_COMPUTE ) {
2020-04-01 11:46:57 +02:00
hw_prio = amdgpu_ctx_prio_sched_to_hw ( adev , priority ,
AMDGPU_HW_IP_COMPUTE ) ;
hw_prio = array_index_nospec ( hw_prio , AMDGPU_RING_PRIO_MAX ) ;
scheds = adev - > gpu_sched [ hw_ip ] [ hw_prio ] . sched ;
num_scheds = adev - > gpu_sched [ hw_ip ] [ hw_prio ] . num_scheds ;
2020-02-27 18:18:22 +01:00
drm_sched_entity_modify_sched ( & aentity - > entity , scheds ,
num_scheds ) ;
}
}
2017-06-06 20:20:38 -04:00
void amdgpu_ctx_priority_override ( struct amdgpu_ctx * ctx ,
2017-12-06 17:49:39 +01:00
enum drm_sched_priority priority )
2017-06-06 20:20:38 -04:00
{
2017-12-06 17:49:39 +01:00
enum drm_sched_priority ctx_prio ;
2020-01-21 15:53:53 +01:00
unsigned i , j ;
2017-06-06 20:20:38 -04:00
ctx - > override_priority = priority ;
2017-12-06 17:49:39 +01:00
ctx_prio = ( ctx - > override_priority = = DRM_SCHED_PRIORITY_UNSET ) ?
2017-06-06 20:20:38 -04:00
ctx - > init_priority : ctx - > override_priority ;
2020-01-21 15:53:53 +01:00
for ( i = 0 ; i < AMDGPU_HW_IP_NUM ; + + i ) {
for ( j = 0 ; j < amdgpu_ctx_num_entities [ i ] ; + + j ) {
if ( ! ctx - > entities [ i ] [ j ] )
continue ;
2017-06-06 20:20:38 -04:00
2020-02-27 18:18:22 +01:00
amdgpu_ctx_set_entity_priority ( ctx , ctx - > entities [ i ] [ j ] ,
i , ctx_prio ) ;
2020-01-21 15:53:53 +01:00
}
2017-06-06 20:20:38 -04:00
}
}
2018-07-19 14:22:25 +02:00
int amdgpu_ctx_wait_prev_fence ( struct amdgpu_ctx * ctx ,
struct drm_sched_entity * entity )
2017-10-10 16:50:17 -04:00
{
2018-08-01 16:00:52 +02:00
struct amdgpu_ctx_entity * centity = to_amdgpu_ctx_entity ( entity ) ;
2019-08-16 14:56:35 +02:00
struct dma_fence * other ;
unsigned idx ;
long r ;
2017-10-10 16:50:17 -04:00
2019-08-16 14:56:35 +02:00
spin_lock ( & ctx - > ring_lock ) ;
idx = centity - > sequence & ( amdgpu_sched_jobs - 1 ) ;
other = dma_fence_get ( centity - > fences [ idx ] ) ;
spin_unlock ( & ctx - > ring_lock ) ;
2018-04-30 10:04:42 -04:00
2019-08-16 14:56:35 +02:00
if ( ! other )
return 0 ;
2017-10-10 16:50:17 -04:00
2019-08-16 14:56:35 +02:00
r = dma_fence_wait ( other , true ) ;
if ( r < 0 & & r ! = - ERESTARTSYS )
DRM_ERROR ( " Error (%ld) waiting for fence! \n " , r ) ;
dma_fence_put ( other ) ;
return r ;
2017-10-10 16:50:17 -04:00
}
2015-08-04 16:20:31 +02:00
void amdgpu_ctx_mgr_init ( struct amdgpu_ctx_mgr * mgr )
{
mutex_init ( & mgr - > lock ) ;
idr_init ( & mgr - > ctx_handles ) ;
}
2019-01-10 16:48:23 +01:00
long amdgpu_ctx_mgr_entity_flush ( struct amdgpu_ctx_mgr * mgr , long timeout )
2018-04-16 10:07:02 +08:00
{
struct amdgpu_ctx * ctx ;
struct idr * idp ;
2020-01-21 15:53:53 +01:00
uint32_t id , i , j ;
2018-04-16 10:07:02 +08:00
idp = & mgr - > ctx_handles ;
2018-05-30 15:28:52 -04:00
mutex_lock ( & mgr - > lock ) ;
2018-04-16 10:07:02 +08:00
idr_for_each_entry ( idp , ctx , id ) {
2020-01-21 15:53:53 +01:00
for ( i = 0 ; i < AMDGPU_HW_IP_NUM ; + + i ) {
for ( j = 0 ; j < amdgpu_ctx_num_entities [ i ] ; + + j ) {
struct drm_sched_entity * entity ;
if ( ! ctx - > entities [ i ] [ j ] )
continue ;
2018-05-15 14:12:21 -04:00
2020-01-21 15:53:53 +01:00
entity = & ctx - > entities [ i ] [ j ] - > entity ;
timeout = drm_sched_entity_flush ( entity , timeout ) ;
}
2018-05-15 14:12:21 -04:00
}
2018-04-16 10:07:02 +08:00
}
2018-05-30 15:28:52 -04:00
mutex_unlock ( & mgr - > lock ) ;
2019-01-10 16:48:23 +01:00
return timeout ;
2018-04-16 10:07:02 +08:00
}
2018-06-05 12:56:26 -04:00
void amdgpu_ctx_mgr_entity_fini ( struct amdgpu_ctx_mgr * mgr )
2018-04-16 10:07:02 +08:00
{
struct amdgpu_ctx * ctx ;
struct idr * idp ;
2020-01-21 15:53:53 +01:00
uint32_t id , i , j ;
2018-04-16 10:07:02 +08:00
idp = & mgr - > ctx_handles ;
idr_for_each_entry ( idp , ctx , id ) {
2018-08-01 16:00:52 +02:00
if ( kref_read ( & ctx - > refcount ) ! = 1 ) {
DRM_ERROR ( " ctx %p is still alive \n " , ctx ) ;
continue ;
2018-05-15 14:12:21 -04:00
}
2018-08-01 16:00:52 +02:00
2020-01-21 15:53:53 +01:00
for ( i = 0 ; i < AMDGPU_HW_IP_NUM ; + + i ) {
for ( j = 0 ; j < amdgpu_ctx_num_entities [ i ] ; + + j ) {
struct drm_sched_entity * entity ;
if ( ! ctx - > entities [ i ] [ j ] )
continue ;
entity = & ctx - > entities [ i ] [ j ] - > entity ;
drm_sched_entity_fini ( entity ) ;
}
}
2018-04-16 10:07:02 +08:00
}
}
2015-08-04 16:20:31 +02:00
void amdgpu_ctx_mgr_fini ( struct amdgpu_ctx_mgr * mgr )
{
struct amdgpu_ctx * ctx ;
struct idr * idp ;
uint32_t id ;
2018-06-05 12:56:26 -04:00
amdgpu_ctx_mgr_entity_fini ( mgr ) ;
2018-04-16 10:07:02 +08:00
2015-08-04 16:20:31 +02:00
idp = & mgr - > ctx_handles ;
idr_for_each_entry ( idp , ctx , id ) {
2018-04-16 10:07:02 +08:00
if ( kref_put ( & ctx - > refcount , amdgpu_ctx_fini ) ! = 1 )
2015-08-04 16:20:31 +02:00
DRM_ERROR ( " ctx %p is still alive \n " , ctx ) ;
}
idr_destroy ( & mgr - > ctx_handles ) ;
mutex_destroy ( & mgr - > lock ) ;
}