2015-07-21 13:45:14 +08:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*
*/
# include <linux/kthread.h>
# include <linux/wait.h>
# include <linux/sched.h>
# include <drm/drmP.h>
# include "amdgpu.h"
2015-11-11 14:56:00 +08:00
# include "amdgpu_trace.h"
2015-07-21 13:45:14 +08:00
2016-05-18 14:19:32 +02:00
static void amdgpu_job_timedout ( struct amd_sched_job * s_job )
2016-03-04 18:51:02 +08:00
{
2016-05-18 14:19:32 +02:00
struct amdgpu_job * job = container_of ( s_job , struct amdgpu_job , base ) ;
2016-03-04 18:51:02 +08:00
DRM_ERROR ( " ring %s timeout, last signaled seq=%u, last emitted seq=%u \n " ,
2016-05-18 14:19:32 +02:00
job - > base . sched - > name ,
atomic_read ( & job - > ring - > fence_drv . last_seq ) ,
job - > ring - > fence_drv . sync_seq ) ;
2017-05-05 15:09:42 +08:00
if ( amdgpu_sriov_vf ( job - > adev ) )
2017-04-26 14:51:54 +08:00
amdgpu_sriov_gpu_reset ( job - > adev , job ) ;
2017-05-05 15:09:42 +08:00
else
amdgpu_gpu_reset ( job - > adev ) ;
2016-03-04 18:51:02 +08:00
}
2016-02-03 13:44:52 +01:00
int amdgpu_job_alloc ( struct amdgpu_device * adev , unsigned num_ibs ,
2016-04-19 20:11:32 +08:00
struct amdgpu_job * * job , struct amdgpu_vm * vm )
2016-02-03 13:44:52 +01:00
{
size_t size = sizeof ( struct amdgpu_job ) ;
if ( num_ibs = = 0 )
return - EINVAL ;
size + = sizeof ( struct amdgpu_ib ) * num_ibs ;
* job = kzalloc ( size , GFP_KERNEL ) ;
if ( ! * job )
return - ENOMEM ;
( * job ) - > adev = adev ;
2016-04-19 20:11:32 +08:00
( * job ) - > vm = vm ;
2016-02-03 13:44:52 +01:00
( * job ) - > ibs = ( void * ) & ( * job ) [ 1 ] ;
( * job ) - > num_ibs = num_ibs ;
2016-02-08 12:13:05 +01:00
amdgpu_sync_create ( & ( * job ) - > sync ) ;
2017-05-09 15:50:22 +08:00
amdgpu_sync_create ( & ( * job ) - > sched_sync ) ;
2016-02-08 12:13:05 +01:00
2016-02-03 13:44:52 +01:00
return 0 ;
}
2016-02-01 12:20:25 +01:00
int amdgpu_job_alloc_with_ib ( struct amdgpu_device * adev , unsigned size ,
struct amdgpu_job * * job )
{
int r ;
2016-04-19 20:11:32 +08:00
r = amdgpu_job_alloc ( adev , 1 , job , NULL ) ;
2016-02-01 12:20:25 +01:00
if ( r )
return r ;
r = amdgpu_ib_get ( adev , NULL , size , & ( * job ) - > ibs [ 0 ] ) ;
if ( r )
kfree ( * job ) ;
return r ;
}
2016-06-29 15:10:31 +02:00
void amdgpu_job_free_resources ( struct amdgpu_job * job )
2016-02-03 13:44:52 +01:00
{
2016-10-25 13:00:45 +01:00
struct dma_fence * f ;
2016-05-18 13:09:47 +02:00
unsigned i ;
2016-03-17 13:57:09 +08:00
/* use sched fence if available */
2016-05-20 12:53:52 +02:00
f = job - > base . s_fence ? & job - > base . s_fence - > finished : job - > fence ;
2016-02-03 13:44:52 +01:00
for ( i = 0 ; i < job - > num_ibs ; + + i )
2016-05-18 13:09:47 +02:00
amdgpu_ib_free ( job - > adev , & job - > ibs [ i ] , f ) ;
2016-02-01 12:20:25 +01:00
}
2016-09-03 13:57:14 +08:00
static void amdgpu_job_free_cb ( struct amd_sched_job * s_job )
2016-03-10 12:14:44 +08:00
{
2016-05-19 09:54:15 +02:00
struct amdgpu_job * job = container_of ( s_job , struct amdgpu_job , base ) ;
2016-10-25 13:00:45 +01:00
dma_fence_put ( job - > fence ) ;
2016-06-29 13:29:57 +02:00
amdgpu_sync_free ( & job - > sync ) ;
2017-05-09 15:50:22 +08:00
amdgpu_sync_free ( & job - > sched_sync ) ;
2016-03-10 12:14:44 +08:00
kfree ( job ) ;
}
2016-05-18 13:12:12 +02:00
void amdgpu_job_free ( struct amdgpu_job * job )
{
amdgpu_job_free_resources ( job ) ;
2016-06-29 13:29:57 +02:00
2016-10-25 13:00:45 +01:00
dma_fence_put ( job - > fence ) ;
2016-06-29 13:29:57 +02:00
amdgpu_sync_free ( & job - > sync ) ;
2017-05-09 15:50:22 +08:00
amdgpu_sync_free ( & job - > sched_sync ) ;
2016-05-18 13:12:12 +02:00
kfree ( job ) ;
}
2016-02-01 12:20:25 +01:00
int amdgpu_job_submit ( struct amdgpu_job * job , struct amdgpu_ring * ring ,
2016-02-01 12:53:58 +01:00
struct amd_sched_entity * entity , void * owner ,
2016-10-25 13:00:45 +01:00
struct dma_fence * * f )
2016-02-01 12:20:25 +01:00
{
2016-03-07 12:49:55 +08:00
int r ;
2016-02-01 12:20:25 +01:00
job - > ring = ring ;
2016-03-07 12:49:55 +08:00
if ( ! f )
return - EINVAL ;
2016-06-30 10:52:03 +02:00
r = amd_sched_job_init ( & job - > base , & ring - > sched , entity , owner ) ;
2016-03-07 12:49:55 +08:00
if ( r )
return r ;
2016-02-01 12:20:25 +01:00
job - > owner = owner ;
2016-08-25 15:40:48 +08:00
job - > fence_ctx = entity - > fence_context ;
2016-10-25 13:00:45 +01:00
* f = dma_fence_get ( & job - > base . s_fence - > finished ) ;
2016-06-29 15:10:31 +02:00
amdgpu_job_free_resources ( job ) ;
2016-02-01 12:20:25 +01:00
amd_sched_entity_push_job ( & job - > base ) ;
return 0 ;
2016-02-03 13:44:52 +01:00
}
2016-10-25 13:00:45 +01:00
static struct dma_fence * amdgpu_job_dependency ( struct amd_sched_job * sched_job )
2015-08-25 11:05:36 +02:00
{
2015-09-09 09:21:19 +08:00
struct amdgpu_job * job = to_amdgpu_job ( sched_job ) ;
2016-04-19 20:11:32 +08:00
struct amdgpu_vm * vm = job - > vm ;
2015-11-03 20:58:50 +01:00
2016-10-25 13:00:45 +01:00
struct dma_fence * fence = amdgpu_sync_get_fence ( & job - > sync ) ;
2017-05-09 15:50:22 +08:00
int r ;
2015-11-03 20:58:50 +01:00
2017-04-21 17:58:42 +08:00
while ( fence = = NULL & & vm & & ! job - > vm_id ) {
2016-01-31 12:29:04 +01:00
struct amdgpu_ring * ring = job - > ring ;
2015-11-03 20:58:50 +01:00
2016-02-08 12:13:05 +01:00
r = amdgpu_vm_grab_id ( vm , ring , & job - > sync ,
2016-05-20 12:53:52 +02:00
& job - > base . s_fence - > finished ,
2016-07-01 17:59:01 +08:00
job ) ;
2016-01-18 17:01:42 +01:00
if ( r )
2015-11-03 20:58:50 +01:00
DRM_ERROR ( " Error getting VM ID (%d) \n " , r ) ;
2016-02-08 12:13:05 +01:00
fence = amdgpu_sync_get_fence ( & job - > sync ) ;
2015-11-03 20:58:50 +01:00
}
2017-05-09 15:50:22 +08:00
if ( amd_sched_dependency_optimized ( fence , sched_job - > s_entity ) ) {
r = amdgpu_sync_fence ( job - > adev , & job - > sched_sync , fence ) ;
if ( r )
DRM_ERROR ( " Error adding fence to sync (%d) \n " , r ) ;
}
2015-11-03 20:58:50 +01:00
return fence ;
2015-08-25 11:05:36 +02:00
}
2016-10-25 13:00:45 +01:00
static struct dma_fence * amdgpu_job_run ( struct amd_sched_job * sched_job )
2015-07-21 13:45:14 +08:00
{
2016-10-25 13:00:45 +01:00
struct dma_fence * fence = NULL ;
2015-09-09 09:05:55 +08:00
struct amdgpu_job * job ;
2015-08-24 14:57:26 +02:00
int r ;
2015-07-21 13:45:14 +08:00
2015-09-09 09:05:55 +08:00
if ( ! sched_job ) {
2015-08-05 19:52:14 +08:00
DRM_ERROR ( " job is null \n " ) ;
2015-08-05 21:22:10 +02:00
return NULL ;
2015-08-05 19:52:14 +08:00
}
2015-09-09 09:21:19 +08:00
job = to_amdgpu_job ( sched_job ) ;
2016-02-08 12:13:05 +01:00
2016-06-01 10:47:36 +02:00
BUG_ON ( amdgpu_sync_peek_fence ( & job - > sync , NULL ) ) ;
2016-02-08 12:13:05 +01:00
2015-11-11 14:56:00 +08:00
trace_amdgpu_sched_run_job ( job ) ;
2017-01-23 16:30:38 +08:00
r = amdgpu_ib_schedule ( job - > ring , job - > num_ibs , job - > ibs , job , & fence ) ;
2016-07-05 14:48:17 +02:00
if ( r )
2015-08-31 17:28:28 +02:00
DRM_ERROR ( " Error scheduling IBs (%d) \n " , r ) ;
2016-06-30 17:30:42 +08:00
/* if gpu reset, hw fence will be replaced here */
2016-10-25 13:00:45 +01:00
dma_fence_put ( job - > fence ) ;
job - > fence = dma_fence_get ( fence ) ;
2016-07-05 14:48:17 +02:00
amdgpu_job_free_resources ( job ) ;
2016-02-01 11:56:35 +01:00
return fence ;
2015-07-21 13:45:14 +08:00
}
2016-04-10 16:30:00 +02:00
const struct amd_sched_backend_ops amdgpu_sched_ops = {
2016-02-01 12:31:01 +01:00
. dependency = amdgpu_job_dependency ,
. run_job = amdgpu_job_run ,
2016-05-18 14:19:32 +02:00
. timedout_job = amdgpu_job_timedout ,
2016-05-19 09:54:15 +02:00
. free_job = amdgpu_job_free_cb
2015-07-21 13:45:14 +08:00
} ;