2015-04-20 16:55:21 -04:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : monk liu < monk . liu @ amd . com >
*/
# include <drm/drmP.h>
# include "amdgpu.h"
2016-02-11 09:56:44 +01:00
static int amdgpu_ctx_init ( struct amdgpu_device * adev , struct amdgpu_ctx * ctx )
2015-04-20 16:55:21 -04:00
{
2015-07-07 17:24:49 +02:00
unsigned i , j ;
2015-08-04 17:51:05 +02:00
int r ;
2015-04-20 16:55:21 -04:00
2015-07-06 13:42:58 +08:00
memset ( ctx , 0 , sizeof ( * ctx ) ) ;
ctx - > adev = adev ;
kref_init ( & ctx - > refcount ) ;
spin_lock_init ( & ctx - > ring_lock ) ;
2016-02-11 10:20:53 +01:00
ctx - > fences = kcalloc ( amdgpu_sched_jobs * AMDGPU_MAX_RINGS ,
sizeof ( struct fence * ) , GFP_KERNEL ) ;
2015-12-10 15:45:11 +08:00
if ( ! ctx - > fences )
return - ENOMEM ;
2015-04-20 16:55:21 -04:00
2015-12-10 15:45:11 +08:00
for ( i = 0 ; i < AMDGPU_MAX_RINGS ; + + i ) {
ctx - > rings [ i ] . sequence = 1 ;
2016-02-11 10:20:53 +01:00
ctx - > rings [ i ] . fences = & ctx - > fences [ amdgpu_sched_jobs * i ] ;
2015-12-10 15:45:11 +08:00
}
2016-01-15 11:25:00 +08:00
/* create context entity for each ring */
for ( i = 0 ; i < adev - > num_rings ; i + + ) {
2016-02-11 09:56:44 +01:00
struct amdgpu_ring * ring = adev - > rings [ i ] ;
2016-01-15 11:25:00 +08:00
struct amd_sched_rq * rq ;
2016-02-11 09:56:44 +01:00
rq = & ring - > sched . sched_rq [ AMD_SCHED_PRIORITY_NORMAL ] ;
r = amd_sched_entity_init ( & ring - > sched , & ctx - > rings [ i ] . entity ,
2016-01-15 11:25:00 +08:00
rq , amdgpu_sched_jobs ) ;
if ( r )
break ;
}
if ( i < adev - > num_rings ) {
for ( j = 0 ; j < i ; j + + )
amd_sched_entity_fini ( & adev - > rings [ j ] - > sched ,
& ctx - > rings [ j ] . entity ) ;
kfree ( ctx - > fences ) ;
return r ;
2015-07-21 13:17:19 +08:00
}
2015-04-20 16:55:21 -04:00
return 0 ;
}
2016-02-11 09:56:44 +01:00
static void amdgpu_ctx_fini ( struct amdgpu_ctx * ctx )
2015-04-20 16:55:21 -04:00
{
2015-08-04 17:51:05 +02:00
struct amdgpu_device * adev = ctx - > adev ;
unsigned i , j ;
2015-11-03 11:07:11 -05:00
if ( ! adev )
return ;
2015-08-04 17:51:05 +02:00
for ( i = 0 ; i < AMDGPU_MAX_RINGS ; + + i )
2015-12-10 15:45:11 +08:00
for ( j = 0 ; j < amdgpu_sched_jobs ; + + j )
2015-08-04 17:51:05 +02:00
fence_put ( ctx - > rings [ i ] . fences [ j ] ) ;
2015-12-10 15:45:11 +08:00
kfree ( ctx - > fences ) ;
2015-08-04 17:51:05 +02:00
2016-01-15 11:25:00 +08:00
for ( i = 0 ; i < adev - > num_rings ; i + + )
amd_sched_entity_fini ( & adev - > rings [ i ] - > sched ,
& ctx - > rings [ i ] . entity ) ;
2015-08-04 17:51:05 +02:00
}
static int amdgpu_ctx_alloc ( struct amdgpu_device * adev ,
struct amdgpu_fpriv * fpriv ,
uint32_t * id )
{
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
2015-04-20 16:55:21 -04:00
struct amdgpu_ctx * ctx ;
2015-08-04 17:51:05 +02:00
int r ;
2015-04-20 16:55:21 -04:00
2015-08-04 17:51:05 +02:00
ctx = kmalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx )
return - ENOMEM ;
mutex_lock ( & mgr - > lock ) ;
r = idr_alloc ( & mgr - > ctx_handles , ctx , 1 , 0 , GFP_KERNEL ) ;
if ( r < 0 ) {
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-08-04 17:51:05 +02:00
kfree ( ctx ) ;
return r ;
}
* id = ( uint32_t ) r ;
2016-02-11 09:56:44 +01:00
r = amdgpu_ctx_init ( adev , ctx ) ;
2015-12-10 15:50:02 +08:00
if ( r ) {
idr_remove ( & mgr - > ctx_handles , * id ) ;
* id = 0 ;
kfree ( ctx ) ;
}
2015-08-04 17:51:05 +02:00
mutex_unlock ( & mgr - > lock ) ;
return r ;
}
static void amdgpu_ctx_do_release ( struct kref * ref )
{
struct amdgpu_ctx * ctx ;
ctx = container_of ( ref , struct amdgpu_ctx , refcount ) ;
amdgpu_ctx_fini ( ctx ) ;
kfree ( ctx ) ;
}
static int amdgpu_ctx_free ( struct amdgpu_fpriv * fpriv , uint32_t id )
{
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
struct amdgpu_ctx * ctx ;
mutex_lock ( & mgr - > lock ) ;
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
if ( ctx ) {
idr_remove ( & mgr - > ctx_handles , id ) ;
2015-07-06 13:42:58 +08:00
kref_put ( & ctx - > refcount , amdgpu_ctx_do_release ) ;
2015-08-04 17:51:05 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-05-05 00:56:45 +02:00
return 0 ;
2015-04-20 16:55:21 -04:00
}
2015-08-04 17:51:05 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
return - EINVAL ;
}
2015-05-05 21:13:49 +02:00
static int amdgpu_ctx_query ( struct amdgpu_device * adev ,
struct amdgpu_fpriv * fpriv , uint32_t id ,
union drm_amdgpu_ctx_out * out )
2015-04-20 16:55:21 -04:00
{
struct amdgpu_ctx * ctx ;
2015-07-06 13:42:58 +08:00
struct amdgpu_ctx_mgr * mgr ;
2015-05-05 21:13:49 +02:00
unsigned reset_counter ;
2015-04-20 16:55:21 -04:00
2015-07-06 13:42:58 +08:00
if ( ! fpriv )
return - EINVAL ;
mgr = & fpriv - > ctx_mgr ;
2015-05-05 20:52:00 +02:00
mutex_lock ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
2015-05-05 21:13:49 +02:00
if ( ! ctx ) {
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-05-05 21:13:49 +02:00
return - EINVAL ;
2015-04-20 16:55:21 -04:00
}
2015-05-05 21:13:49 +02:00
/* TODO: these two are always zero */
2015-08-16 22:48:26 -04:00
out - > state . flags = 0x0 ;
out - > state . hangs = 0x0 ;
2015-05-05 21:13:49 +02:00
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read ( & adev - > gpu_reset_counter ) ;
/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
if ( ctx - > reset_counter = = reset_counter )
out - > state . reset_status = AMDGPU_CTX_NO_RESET ;
else
out - > state . reset_status = AMDGPU_CTX_UNKNOWN_RESET ;
ctx - > reset_counter = reset_counter ;
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-05-05 21:13:49 +02:00
return 0 ;
2015-04-20 16:55:21 -04:00
}
int amdgpu_ctx_ioctl ( struct drm_device * dev , void * data ,
2015-05-05 21:13:49 +02:00
struct drm_file * filp )
2015-04-20 16:55:21 -04:00
{
int r ;
uint32_t id ;
union drm_amdgpu_ctx * args = data ;
struct amdgpu_device * adev = dev - > dev_private ;
struct amdgpu_fpriv * fpriv = filp - > driver_priv ;
r = 0 ;
id = args - > in . ctx_id ;
switch ( args - > in . op ) {
2016-02-11 10:20:53 +01:00
case AMDGPU_CTX_OP_ALLOC_CTX :
r = amdgpu_ctx_alloc ( adev , fpriv , & id ) ;
args - > out . alloc . ctx_id = id ;
break ;
case AMDGPU_CTX_OP_FREE_CTX :
r = amdgpu_ctx_free ( fpriv , id ) ;
break ;
case AMDGPU_CTX_OP_QUERY_STATE :
r = amdgpu_ctx_query ( adev , fpriv , id , & args - > out ) ;
break ;
default :
return - EINVAL ;
2015-04-20 16:55:21 -04:00
}
return r ;
}
2015-05-08 17:29:40 +08:00
struct amdgpu_ctx * amdgpu_ctx_get ( struct amdgpu_fpriv * fpriv , uint32_t id )
{
struct amdgpu_ctx * ctx ;
2015-07-06 13:42:58 +08:00
struct amdgpu_ctx_mgr * mgr ;
if ( ! fpriv )
return NULL ;
mgr = & fpriv - > ctx_mgr ;
2015-05-08 17:29:40 +08:00
mutex_lock ( & mgr - > lock ) ;
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
if ( ctx )
kref_get ( & ctx - > refcount ) ;
mutex_unlock ( & mgr - > lock ) ;
return ctx ;
}
int amdgpu_ctx_put ( struct amdgpu_ctx * ctx )
{
if ( ctx = = NULL )
return - EINVAL ;
kref_put ( & ctx - > refcount , amdgpu_ctx_do_release ) ;
return 0 ;
}
2015-07-07 17:24:49 +02:00
uint64_t amdgpu_ctx_add_fence ( struct amdgpu_ctx * ctx , struct amdgpu_ring * ring ,
2015-08-19 15:00:55 +02:00
struct fence * fence )
2015-07-07 17:24:49 +02:00
{
struct amdgpu_ctx_ring * cring = & ctx - > rings [ ring - > idx ] ;
2015-08-19 15:00:55 +02:00
uint64_t seq = cring - > sequence ;
2015-07-21 15:13:53 +08:00
unsigned idx = 0 ;
struct fence * other = NULL ;
2015-07-07 17:24:49 +02:00
2015-12-10 17:34:33 +08:00
idx = seq & ( amdgpu_sched_jobs - 1 ) ;
2015-07-21 15:13:53 +08:00
other = cring - > fences [ idx ] ;
2015-07-07 17:24:49 +02:00
if ( other ) {
signed long r ;
r = fence_wait_timeout ( other , false , MAX_SCHEDULE_TIMEOUT ) ;
if ( r < 0 )
DRM_ERROR ( " Error (%ld) waiting for fence! \n " , r ) ;
}
fence_get ( fence ) ;
spin_lock ( & ctx - > ring_lock ) ;
cring - > fences [ idx ] = fence ;
2015-08-19 15:00:55 +02:00
cring - > sequence + + ;
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
fence_put ( other ) ;
return seq ;
}
struct fence * amdgpu_ctx_get_fence ( struct amdgpu_ctx * ctx ,
struct amdgpu_ring * ring , uint64_t seq )
{
struct amdgpu_ctx_ring * cring = & ctx - > rings [ ring - > idx ] ;
struct fence * fence ;
spin_lock ( & ctx - > ring_lock ) ;
2015-07-21 15:13:53 +08:00
2015-08-19 15:00:55 +02:00
if ( seq > = cring - > sequence ) {
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
return ERR_PTR ( - EINVAL ) ;
}
2015-07-21 15:13:53 +08:00
2015-12-10 15:45:11 +08:00
if ( seq + amdgpu_sched_jobs < cring - > sequence ) {
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
return NULL ;
}
2015-12-10 17:34:33 +08:00
fence = fence_get ( cring - > fences [ seq & ( amdgpu_sched_jobs - 1 ) ] ) ;
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
return fence ;
}
2015-08-04 16:20:31 +02:00
void amdgpu_ctx_mgr_init ( struct amdgpu_ctx_mgr * mgr )
{
mutex_init ( & mgr - > lock ) ;
idr_init ( & mgr - > ctx_handles ) ;
}
void amdgpu_ctx_mgr_fini ( struct amdgpu_ctx_mgr * mgr )
{
struct amdgpu_ctx * ctx ;
struct idr * idp ;
uint32_t id ;
idp = & mgr - > ctx_handles ;
idr_for_each_entry ( idp , ctx , id ) {
if ( kref_put ( & ctx - > refcount , amdgpu_ctx_do_release ) ! = 1 )
DRM_ERROR ( " ctx %p is still alive \n " , ctx ) ;
}
idr_destroy ( & mgr - > ctx_handles ) ;
mutex_destroy ( & mgr - > lock ) ;
}