2015-07-21 13:45:14 +08:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*
*/
# include <linux/kthread.h>
# include <linux/wait.h>
# include <linux/sched.h>
# include <drm/drmP.h>
# include "amdgpu.h"
2015-08-05 21:22:10 +02:00
static struct fence * amdgpu_sched_run_job ( struct amd_gpu_scheduler * sched ,
struct amd_sched_entity * entity ,
struct amd_sched_job * job )
2015-07-21 13:45:14 +08:00
{
int r = 0 ;
2015-08-18 15:16:40 +08:00
struct amdgpu_job * sched_job ;
2015-08-04 11:30:09 +08:00
struct amdgpu_fence * fence ;
2015-07-21 13:45:14 +08:00
2015-08-18 15:16:40 +08:00
if ( ! job ) {
2015-08-05 19:52:14 +08:00
DRM_ERROR ( " job is null \n " ) ;
2015-08-05 21:22:10 +02:00
return NULL ;
2015-08-05 19:52:14 +08:00
}
2015-08-18 15:16:40 +08:00
sched_job = ( struct amdgpu_job * ) job ;
2015-07-21 13:45:14 +08:00
mutex_lock ( & sched_job - > job_lock ) ;
r = amdgpu_ib_schedule ( sched_job - > adev ,
sched_job - > num_ibs ,
sched_job - > ibs ,
2015-08-18 15:16:40 +08:00
sched_job - > owner ) ;
2015-07-21 13:45:14 +08:00
if ( r )
goto err ;
2015-08-05 21:22:10 +02:00
fence = amdgpu_fence_ref ( sched_job - > ibs [ sched_job - > num_ibs - 1 ] . fence ) ;
2015-08-04 11:30:09 +08:00
2015-07-21 13:45:14 +08:00
mutex_unlock ( & sched_job - > job_lock ) ;
2015-08-05 21:22:10 +02:00
return & fence - > base ;
2015-07-21 13:45:14 +08:00
err :
DRM_ERROR ( " Run job error \n " ) ;
mutex_unlock ( & sched_job - > job_lock ) ;
2015-08-18 15:16:40 +08:00
sched - > ops - > process_job ( sched , ( struct amd_sched_job * ) sched_job ) ;
2015-08-05 21:22:10 +02:00
return NULL ;
2015-07-21 13:45:14 +08:00
}
2015-08-06 15:19:12 +08:00
static void amdgpu_sched_process_job ( struct amd_gpu_scheduler * sched ,
struct amd_sched_job * job )
2015-07-21 13:45:14 +08:00
{
2015-08-18 15:16:40 +08:00
struct amdgpu_job * sched_job ;
2015-07-21 13:45:14 +08:00
2015-08-18 15:16:40 +08:00
if ( ! job ) {
2015-08-06 15:19:12 +08:00
DRM_ERROR ( " job is null \n " ) ;
2015-07-21 13:45:14 +08:00
return ;
2015-08-06 15:19:12 +08:00
}
2015-08-18 15:16:40 +08:00
sched_job = ( struct amdgpu_job * ) job ;
mutex_lock ( & sched_job - > job_lock ) ;
if ( sched_job - > free_job )
sched_job - > free_job ( sched_job ) ;
mutex_unlock ( & sched_job - > job_lock ) ;
/* after processing job, free memory */
fence_put ( & sched_job - > base . s_fence - > base ) ;
kfree ( sched_job ) ;
2015-07-21 13:45:14 +08:00
}
struct amd_sched_backend_ops amdgpu_sched_ops = {
. run_job = amdgpu_sched_run_job ,
. process_job = amdgpu_sched_process_job
} ;
2015-07-29 10:33:14 +08:00
int amdgpu_sched_ib_submit_kernel_helper ( struct amdgpu_device * adev ,
struct amdgpu_ring * ring ,
struct amdgpu_ib * ibs ,
unsigned num_ibs ,
2015-08-18 15:16:40 +08:00
int ( * free_job ) ( struct amdgpu_job * ) ,
2015-08-03 11:43:19 +08:00
void * owner ,
struct fence * * f )
2015-07-29 10:33:14 +08:00
{
int r = 0 ;
if ( amdgpu_enable_scheduler ) {
2015-08-18 15:16:40 +08:00
struct amdgpu_job * job =
kzalloc ( sizeof ( struct amdgpu_job ) , GFP_KERNEL ) ;
if ( ! job )
2015-07-29 10:33:14 +08:00
return - ENOMEM ;
2015-08-18 15:16:40 +08:00
job - > base . sched = ring - > scheduler ;
job - > base . s_entity = & adev - > kernel_ctx . rings [ ring - > idx ] . entity ;
job - > adev = adev ;
job - > ibs = ibs ;
job - > num_ibs = num_ibs ;
job - > owner = owner ;
mutex_init ( & job - > job_lock ) ;
job - > free_job = free_job ;
mutex_lock ( & job - > job_lock ) ;
r = amd_sched_push_job ( ( struct amd_sched_job * ) job ) ;
2015-08-02 11:18:04 +08:00
if ( r ) {
2015-08-18 15:16:40 +08:00
mutex_unlock ( & job - > job_lock ) ;
kfree ( job ) ;
2015-08-02 11:18:04 +08:00
return r ;
}
2015-08-18 15:16:40 +08:00
ibs [ num_ibs - 1 ] . sequence = job - > base . s_fence - > v_seq ;
* f = fence_get ( & job - > base . s_fence - > base ) ;
mutex_unlock ( & job - > job_lock ) ;
2015-08-02 11:18:04 +08:00
} else {
2015-08-03 12:57:31 +08:00
r = amdgpu_ib_schedule ( adev , num_ibs , ibs , owner ) ;
2015-08-02 11:18:04 +08:00
if ( r )
return r ;
2015-08-12 12:58:31 +08:00
* f = fence_get ( & ibs [ num_ibs - 1 ] . fence - > base ) ;
2015-08-02 11:18:04 +08:00
}
2015-08-03 11:43:19 +08:00
return 0 ;
2015-07-29 10:33:14 +08:00
}