2015-04-20 16:55:21 -04:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : monk liu < monk . liu @ amd . com >
*/
# include <drm/drmP.h>
# include "amdgpu.h"
static void amdgpu_ctx_do_release ( struct kref * ref )
{
struct amdgpu_ctx * ctx ;
2015-07-07 17:24:49 +02:00
unsigned i , j ;
2015-04-20 16:55:21 -04:00
ctx = container_of ( ref , struct amdgpu_ctx , refcount ) ;
2015-07-07 17:24:49 +02:00
for ( i = 0 ; i < AMDGPU_MAX_RINGS ; + + i )
for ( j = 0 ; j < AMDGPU_CTX_MAX_CS_PENDING ; + + j )
fence_put ( ctx - > rings [ i ] . fences [ j ] ) ;
2015-04-20 16:55:21 -04:00
kfree ( ctx ) ;
}
2015-08-16 22:48:26 -04:00
int amdgpu_ctx_alloc ( struct amdgpu_device * adev , struct amdgpu_fpriv * fpriv ,
uint32_t * id )
2015-04-20 16:55:21 -04:00
{
struct amdgpu_ctx * ctx ;
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
2015-07-07 17:24:49 +02:00
int i , r ;
2015-04-20 16:55:21 -04:00
ctx = kmalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx )
return - ENOMEM ;
2015-05-05 20:52:00 +02:00
mutex_lock ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
r = idr_alloc ( & mgr - > ctx_handles , ctx , 0 , 0 , GFP_KERNEL ) ;
if ( r < 0 ) {
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
kfree ( ctx ) ;
return r ;
}
* id = ( uint32_t ) r ;
memset ( ctx , 0 , sizeof ( * ctx ) ) ;
kref_init ( & ctx - > refcount ) ;
2015-07-07 17:24:49 +02:00
spin_lock_init ( & ctx - > ring_lock ) ;
for ( i = 0 ; i < AMDGPU_MAX_RINGS ; + + i )
ctx - > rings [ i ] . sequence = 1 ;
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
int amdgpu_ctx_free ( struct amdgpu_device * adev , struct amdgpu_fpriv * fpriv , uint32_t id )
{
struct amdgpu_ctx * ctx ;
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
2015-05-05 20:52:00 +02:00
mutex_lock ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
if ( ctx ) {
2015-08-16 22:48:26 -04:00
idr_remove ( & mgr - > ctx_handles , id ) ;
2015-05-05 00:56:45 +02:00
kref_put ( & ctx - > refcount , amdgpu_ctx_do_release ) ;
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-05-05 00:56:45 +02:00
return 0 ;
2015-04-20 16:55:21 -04:00
}
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
return - EINVAL ;
}
2015-05-05 21:13:49 +02:00
static int amdgpu_ctx_query ( struct amdgpu_device * adev ,
struct amdgpu_fpriv * fpriv , uint32_t id ,
union drm_amdgpu_ctx_out * out )
2015-04-20 16:55:21 -04:00
{
struct amdgpu_ctx * ctx ;
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
2015-05-05 21:13:49 +02:00
unsigned reset_counter ;
2015-04-20 16:55:21 -04:00
2015-05-05 20:52:00 +02:00
mutex_lock ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
2015-05-05 21:13:49 +02:00
if ( ! ctx ) {
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-05-05 21:13:49 +02:00
return - EINVAL ;
2015-04-20 16:55:21 -04:00
}
2015-05-05 21:13:49 +02:00
/* TODO: these two are always zero */
2015-08-16 22:48:26 -04:00
out - > state . flags = 0x0 ;
out - > state . hangs = 0x0 ;
2015-05-05 21:13:49 +02:00
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read ( & adev - > gpu_reset_counter ) ;
/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
if ( ctx - > reset_counter = = reset_counter )
out - > state . reset_status = AMDGPU_CTX_NO_RESET ;
else
out - > state . reset_status = AMDGPU_CTX_UNKNOWN_RESET ;
ctx - > reset_counter = reset_counter ;
2015-05-05 20:52:00 +02:00
mutex_unlock ( & mgr - > lock ) ;
2015-05-05 21:13:49 +02:00
return 0 ;
2015-04-20 16:55:21 -04:00
}
void amdgpu_ctx_fini ( struct amdgpu_fpriv * fpriv )
{
struct idr * idp ;
struct amdgpu_ctx * ctx ;
uint32_t id ;
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
idp = & mgr - > ctx_handles ;
idr_for_each_entry ( idp , ctx , id ) {
if ( kref_put ( & ctx - > refcount , amdgpu_ctx_do_release ) ! = 1 )
2015-08-16 22:48:26 -04:00
DRM_ERROR ( " ctx %p is still alive \n " , ctx ) ;
2015-04-20 16:55:21 -04:00
}
2015-07-16 12:01:06 +02:00
idr_destroy ( & mgr - > ctx_handles ) ;
2015-05-05 20:52:00 +02:00
mutex_destroy ( & mgr - > lock ) ;
2015-04-20 16:55:21 -04:00
}
int amdgpu_ctx_ioctl ( struct drm_device * dev , void * data ,
2015-05-05 21:13:49 +02:00
struct drm_file * filp )
2015-04-20 16:55:21 -04:00
{
int r ;
uint32_t id ;
union drm_amdgpu_ctx * args = data ;
struct amdgpu_device * adev = dev - > dev_private ;
struct amdgpu_fpriv * fpriv = filp - > driver_priv ;
r = 0 ;
id = args - > in . ctx_id ;
switch ( args - > in . op ) {
case AMDGPU_CTX_OP_ALLOC_CTX :
2015-08-16 22:48:26 -04:00
r = amdgpu_ctx_alloc ( adev , fpriv , & id ) ;
2015-04-20 16:55:21 -04:00
args - > out . alloc . ctx_id = id ;
break ;
case AMDGPU_CTX_OP_FREE_CTX :
r = amdgpu_ctx_free ( adev , fpriv , id ) ;
break ;
case AMDGPU_CTX_OP_QUERY_STATE :
2015-05-05 21:13:49 +02:00
r = amdgpu_ctx_query ( adev , fpriv , id , & args - > out ) ;
2015-04-20 16:55:21 -04:00
break ;
default :
return - EINVAL ;
}
return r ;
}
2015-05-08 17:29:40 +08:00
struct amdgpu_ctx * amdgpu_ctx_get ( struct amdgpu_fpriv * fpriv , uint32_t id )
{
struct amdgpu_ctx * ctx ;
struct amdgpu_ctx_mgr * mgr = & fpriv - > ctx_mgr ;
mutex_lock ( & mgr - > lock ) ;
ctx = idr_find ( & mgr - > ctx_handles , id ) ;
if ( ctx )
kref_get ( & ctx - > refcount ) ;
mutex_unlock ( & mgr - > lock ) ;
return ctx ;
}
int amdgpu_ctx_put ( struct amdgpu_ctx * ctx )
{
if ( ctx = = NULL )
return - EINVAL ;
kref_put ( & ctx - > refcount , amdgpu_ctx_do_release ) ;
return 0 ;
}
2015-07-07 17:24:49 +02:00
uint64_t amdgpu_ctx_add_fence ( struct amdgpu_ctx * ctx , struct amdgpu_ring * ring ,
struct fence * fence )
{
struct amdgpu_ctx_ring * cring = & ctx - > rings [ ring - > idx ] ;
uint64_t seq = cring - > sequence ;
unsigned idx = seq % AMDGPU_CTX_MAX_CS_PENDING ;
struct fence * other = cring - > fences [ idx ] ;
if ( other ) {
signed long r ;
r = fence_wait_timeout ( other , false , MAX_SCHEDULE_TIMEOUT ) ;
if ( r < 0 )
DRM_ERROR ( " Error (%ld) waiting for fence! \n " , r ) ;
}
fence_get ( fence ) ;
spin_lock ( & ctx - > ring_lock ) ;
cring - > fences [ idx ] = fence ;
cring - > sequence + + ;
spin_unlock ( & ctx - > ring_lock ) ;
fence_put ( other ) ;
return seq ;
}
struct fence * amdgpu_ctx_get_fence ( struct amdgpu_ctx * ctx ,
struct amdgpu_ring * ring , uint64_t seq )
{
struct amdgpu_ctx_ring * cring = & ctx - > rings [ ring - > idx ] ;
struct fence * fence ;
spin_lock ( & ctx - > ring_lock ) ;
if ( seq > = cring - > sequence ) {
spin_unlock ( & ctx - > ring_lock ) ;
return ERR_PTR ( - EINVAL ) ;
}
2015-07-18 19:20:05 +02:00
if ( seq + AMDGPU_CTX_MAX_CS_PENDING < cring - > sequence ) {
2015-07-07 17:24:49 +02:00
spin_unlock ( & ctx - > ring_lock ) ;
return NULL ;
}
fence = fence_get ( cring - > fences [ seq % AMDGPU_CTX_MAX_CS_PENDING ] ) ;
spin_unlock ( & ctx - > ring_lock ) ;
return fence ;
}