2015-05-22 18:55:07 +08:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*
*/
# include <linux/kthread.h>
# include <linux/wait.h>
# include <linux/sched.h>
# include <drm/drmP.h>
# include "gpu_scheduler.h"
2015-08-24 14:29:40 +02:00
static void amd_sched_wakeup ( struct amd_gpu_scheduler * sched ) ;
2015-05-22 18:55:07 +08:00
/* Initialize a given run queue struct */
2015-08-12 11:46:04 +02:00
static void amd_sched_rq_init ( struct amd_sched_rq * rq )
2015-05-22 18:55:07 +08:00
{
2015-08-18 14:41:25 +02:00
spin_lock_init ( & rq - > lock ) ;
2015-08-12 11:46:04 +02:00
INIT_LIST_HEAD ( & rq - > entities ) ;
rq - > current_entity = NULL ;
2015-05-22 18:55:07 +08:00
}
2015-08-12 11:46:04 +02:00
static void amd_sched_rq_add_entity ( struct amd_sched_rq * rq ,
struct amd_sched_entity * entity )
2015-05-22 18:55:07 +08:00
{
2015-08-18 14:41:25 +02:00
spin_lock ( & rq - > lock ) ;
2015-08-12 11:46:04 +02:00
list_add_tail ( & entity - > list , & rq - > entities ) ;
2015-08-18 14:41:25 +02:00
spin_unlock ( & rq - > lock ) ;
2015-05-22 18:55:07 +08:00
}
2015-08-12 11:46:04 +02:00
static void amd_sched_rq_remove_entity ( struct amd_sched_rq * rq ,
struct amd_sched_entity * entity )
2015-05-22 18:55:07 +08:00
{
2015-08-18 14:41:25 +02:00
spin_lock ( & rq - > lock ) ;
2015-08-12 11:46:04 +02:00
list_del_init ( & entity - > list ) ;
if ( rq - > current_entity = = entity )
rq - > current_entity = NULL ;
2015-08-18 14:41:25 +02:00
spin_unlock ( & rq - > lock ) ;
2015-05-22 18:55:07 +08:00
}
/**
* Select next entity from a specified run queue with round robin policy .
* It could return the same entity as current one if current is the only
* available one in the queue . Return NULL if nothing available .
*/
2015-08-12 11:46:04 +02:00
static struct amd_sched_entity *
amd_sched_rq_select_entity ( struct amd_sched_rq * rq )
2015-05-22 18:55:07 +08:00
{
2015-08-18 14:41:25 +02:00
struct amd_sched_entity * entity ;
2015-08-12 11:46:04 +02:00
2015-08-18 14:41:25 +02:00
spin_lock ( & rq - > lock ) ;
entity = rq - > current_entity ;
2015-08-12 11:46:04 +02:00
if ( entity ) {
list_for_each_entry_continue ( entity , & rq - > entities , list ) {
if ( ! kfifo_is_empty ( & entity - > job_queue ) ) {
rq - > current_entity = entity ;
2015-08-18 14:41:25 +02:00
spin_unlock ( & rq - > lock ) ;
2015-08-12 11:46:04 +02:00
return rq - > current_entity ;
}
2015-05-22 18:55:07 +08:00
}
}
2015-08-12 11:46:04 +02:00
list_for_each_entry ( entity , & rq - > entities , list ) {
2015-05-22 18:55:07 +08:00
2015-08-12 11:46:04 +02:00
if ( ! kfifo_is_empty ( & entity - > job_queue ) ) {
rq - > current_entity = entity ;
2015-08-18 14:41:25 +02:00
spin_unlock ( & rq - > lock ) ;
2015-08-12 11:46:04 +02:00
return rq - > current_entity ;
}
2015-05-22 18:55:07 +08:00
2015-08-12 11:46:04 +02:00
if ( entity = = rq - > current_entity )
break ;
}
2015-05-22 18:55:07 +08:00
2015-08-18 14:41:25 +02:00
spin_unlock ( & rq - > lock ) ;
2015-08-12 11:46:04 +02:00
return NULL ;
2015-05-22 18:55:07 +08:00
}
/**
* Init a context entity used by scheduler when submit to HW ring .
*
* @ sched The pointer to the scheduler
2015-08-05 18:33:21 +02:00
* @ entity The pointer to a valid amd_sched_entity
2015-05-22 18:55:07 +08:00
* @ rq The run queue this entity belongs
2015-08-04 16:58:36 +02:00
* @ kernel If this is an entity for the kernel
2015-07-30 16:36:58 +08:00
* @ jobs The max number of jobs in the job queue
2015-05-22 18:55:07 +08:00
*
* return 0 if succeed . negative error code on failure
*/
2015-08-05 18:33:21 +02:00
int amd_sched_entity_init ( struct amd_gpu_scheduler * sched ,
2015-08-05 21:22:10 +02:00
struct amd_sched_entity * entity ,
2015-08-12 11:46:04 +02:00
struct amd_sched_rq * rq ,
2015-08-05 21:22:10 +02:00
uint32_t jobs )
2015-05-22 18:55:07 +08:00
{
if ( ! ( sched & & entity & & rq ) )
return - EINVAL ;
2015-08-05 18:33:21 +02:00
memset ( entity , 0 , sizeof ( struct amd_sched_entity ) ) ;
entity - > belongto_rq = rq ;
2015-05-22 18:55:07 +08:00
entity - > scheduler = sched ;
init_waitqueue_head ( & entity - > wait_queue ) ;
2015-08-02 11:18:04 +08:00
entity - > fence_context = fence_context_alloc ( 1 ) ;
2015-05-22 18:55:07 +08:00
if ( kfifo_alloc ( & entity - > job_queue ,
2015-07-30 16:36:58 +08:00
jobs * sizeof ( void * ) ,
2015-05-22 18:55:07 +08:00
GFP_KERNEL ) )
return - EINVAL ;
spin_lock_init ( & entity - > queue_lock ) ;
2015-08-19 15:00:55 +02:00
atomic_set ( & entity - > fence_seq , 0 ) ;
2015-05-22 18:55:07 +08:00
/* Add the entity to the run queue */
2015-08-12 11:46:04 +02:00
amd_sched_rq_add_entity ( rq , entity ) ;
2015-05-22 18:55:07 +08:00
return 0 ;
}
/**
* Query if entity is initialized
*
* @ sched Pointer to scheduler instance
* @ entity The pointer to a valid scheduler entity
*
* return true if entity is initialized , false otherwise
*/
2015-08-20 17:03:48 +02:00
static bool amd_sched_entity_is_initialized ( struct amd_gpu_scheduler * sched ,
struct amd_sched_entity * entity )
2015-05-22 18:55:07 +08:00
{
return entity - > scheduler = = sched & &
2015-08-05 18:33:21 +02:00
entity - > belongto_rq ! = NULL ;
2015-05-22 18:55:07 +08:00
}
2015-08-20 14:47:46 +02:00
/**
* Check if entity is idle
*
* @ entity The pointer to a valid scheduler entity
*
* Return true if entity don ' t has any unscheduled jobs .
*/
static bool amd_sched_entity_is_idle ( struct amd_sched_entity * entity )
2015-05-22 18:55:07 +08:00
{
2015-08-20 14:47:46 +02:00
rmb ( ) ;
if ( kfifo_is_empty ( & entity - > job_queue ) )
2015-05-22 18:55:07 +08:00
return true ;
return false ;
}
/**
* Destroy a context entity
*
* @ sched Pointer to scheduler instance
* @ entity The pointer to a valid scheduler entity
*
2015-08-21 15:46:43 +02:00
* Cleanup and free the allocated resources .
2015-05-22 18:55:07 +08:00
*/
2015-08-21 15:46:43 +02:00
void amd_sched_entity_fini ( struct amd_gpu_scheduler * sched ,
struct amd_sched_entity * entity )
2015-05-22 18:55:07 +08:00
{
2015-08-12 11:46:04 +02:00
struct amd_sched_rq * rq = entity - > belongto_rq ;
2015-05-22 18:55:07 +08:00
2015-08-20 17:03:48 +02:00
if ( ! amd_sched_entity_is_initialized ( sched , entity ) )
2015-08-21 15:46:43 +02:00
return ;
2015-08-20 16:12:50 +02:00
2015-05-22 18:55:07 +08:00
/**
* The client will not queue more IBs during this fini , consume existing
* queued IBs
*/
2015-08-21 15:46:43 +02:00
wait_event ( entity - > wait_queue , amd_sched_entity_is_idle ( entity ) ) ;
2015-05-22 18:55:07 +08:00
2015-08-12 11:46:04 +02:00
amd_sched_rq_remove_entity ( rq , entity ) ;
2015-05-22 18:55:07 +08:00
kfifo_free ( & entity - > job_queue ) ;
}
/**
2015-08-20 16:12:50 +02:00
* Helper to submit a job to the job queue
2015-05-22 18:55:07 +08:00
*
* @ job The pointer to job required to submit
2015-08-20 16:12:50 +02:00
*
* Returns true if we could submit the job .
*/
static bool amd_sched_entity_in ( struct amd_sched_job * job )
2015-05-22 18:55:07 +08:00
{
2015-08-20 16:12:50 +02:00
struct amd_sched_entity * entity = job - > s_entity ;
bool added , first = false ;
spin_lock ( & entity - > queue_lock ) ;
added = kfifo_in ( & entity - > job_queue , & job , sizeof ( job ) ) = = sizeof ( job ) ;
if ( added & & kfifo_len ( & entity - > job_queue ) = = sizeof ( job ) )
first = true ;
spin_unlock ( & entity - > queue_lock ) ;
/* first job wakes up scheduler */
if ( first )
2015-08-24 14:29:40 +02:00
amd_sched_wakeup ( job - > sched ) ;
2015-08-20 16:12:50 +02:00
return added ;
}
/**
* Submit a job to the job queue
*
* @ job The pointer to job required to submit
*
* Returns 0 for success , negative error code otherwise .
*/
int amd_sched_entity_push_job ( struct amd_sched_job * sched_job )
{
struct amd_sched_entity * entity = sched_job - > s_entity ;
2015-08-24 12:47:36 +08:00
struct amd_sched_fence * fence = amd_sched_fence_create (
entity , sched_job - > owner ) ;
2015-08-20 16:12:50 +02:00
int r ;
2015-08-02 11:18:04 +08:00
if ( ! fence )
2015-08-20 16:12:50 +02:00
return - ENOMEM ;
2015-08-18 15:16:40 +08:00
fence_get ( & fence - > base ) ;
sched_job - > s_fence = fence ;
2015-08-20 16:12:50 +02:00
r = wait_event_interruptible ( entity - > wait_queue ,
amd_sched_entity_in ( sched_job ) ) ;
return r ;
2015-05-22 18:55:07 +08:00
}
2015-08-20 17:01:01 +02:00
/**
* Return ture if we can push more jobs to the hw .
*/
static bool amd_sched_ready ( struct amd_gpu_scheduler * sched )
{
return atomic_read ( & sched - > hw_rq_count ) <
sched - > hw_submission_limit ;
}
2015-08-24 14:29:40 +02:00
/**
* Wake up the scheduler when it is ready
*/
static void amd_sched_wakeup ( struct amd_gpu_scheduler * sched )
{
if ( amd_sched_ready ( sched ) )
wake_up_interruptible ( & sched - > wait_queue ) ;
}
2015-08-20 17:01:01 +02:00
/**
* Select next entity containing real IB submissions
*/
static struct amd_sched_entity *
amd_sched_select_context ( struct amd_gpu_scheduler * sched )
{
struct amd_sched_entity * tmp ;
if ( ! amd_sched_ready ( sched ) )
return NULL ;
/* Kernel run queue has higher priority than normal run queue*/
tmp = amd_sched_rq_select_entity ( & sched - > kernel_rq ) ;
if ( tmp = = NULL )
tmp = amd_sched_rq_select_entity ( & sched - > sched_rq ) ;
return tmp ;
}
2015-08-05 21:22:10 +02:00
static void amd_sched_process_job ( struct fence * f , struct fence_cb * cb )
{
struct amd_sched_job * sched_job =
container_of ( cb , struct amd_sched_job , cb ) ;
struct amd_gpu_scheduler * sched ;
sched = sched_job - > sched ;
2015-08-02 11:18:04 +08:00
amd_sched_fence_signal ( sched_job - > s_fence ) ;
2015-08-19 16:12:15 +02:00
atomic_dec ( & sched - > hw_rq_count ) ;
2015-08-02 11:18:04 +08:00
fence_put ( & sched_job - > s_fence - > base ) ;
2015-08-18 15:16:40 +08:00
sched - > ops - > process_job ( sched , sched_job ) ;
2015-08-05 21:22:10 +02:00
wake_up_interruptible ( & sched - > wait_queue ) ;
}
2015-05-22 18:55:07 +08:00
static int amd_sched_main ( void * param )
{
struct sched_param sparam = { . sched_priority = 1 } ;
struct amd_gpu_scheduler * sched = ( struct amd_gpu_scheduler * ) param ;
2015-08-19 17:37:52 +02:00
int r ;
2015-05-22 18:55:07 +08:00
sched_setscheduler ( current , SCHED_FIFO , & sparam ) ;
while ( ! kthread_should_stop ( ) ) {
2015-08-19 17:37:52 +02:00
struct amd_sched_entity * c_entity = NULL ;
struct amd_sched_job * job ;
2015-08-05 21:22:10 +02:00
struct fence * fence ;
2015-05-22 18:55:07 +08:00
wait_event_interruptible ( sched - > wait_queue ,
2015-08-19 17:37:52 +02:00
kthread_should_stop ( ) | |
( c_entity = amd_sched_select_context ( sched ) ) ) ;
if ( ! c_entity )
continue ;
2015-05-22 18:55:07 +08:00
r = kfifo_out ( & c_entity - > job_queue , & job , sizeof ( void * ) ) ;
if ( r ! = sizeof ( void * ) )
continue ;
2015-08-20 17:08:25 +02:00
atomic_inc ( & sched - > hw_rq_count ) ;
2015-08-06 15:19:12 +08:00
fence = sched - > ops - > run_job ( sched , c_entity , job ) ;
2015-08-05 21:22:10 +02:00
if ( fence ) {
2015-08-06 15:19:12 +08:00
r = fence_add_callback ( fence , & job - > cb ,
2015-08-05 21:22:10 +02:00
amd_sched_process_job ) ;
if ( r = = - ENOENT )
2015-08-06 15:19:12 +08:00
amd_sched_process_job ( fence , & job - > cb ) ;
2015-08-05 21:22:10 +02:00
else if ( r )
DRM_ERROR ( " fence add callback failed (%d) \n " , r ) ;
fence_put ( fence ) ;
}
2015-08-20 14:47:46 +02:00
2015-08-20 16:12:50 +02:00
wake_up ( & c_entity - > wait_queue ) ;
2015-05-22 18:55:07 +08:00
}
return 0 ;
}
/**
* Create a gpu scheduler
*
2015-08-20 17:24:40 +02:00
* @ ops The backend operations for this scheduler .
* @ ring The the ring id for the scheduler .
* @ hw_submissions Number of hw submissions to do .
2015-05-22 18:55:07 +08:00
*
2015-08-20 17:24:40 +02:00
* Return the pointer to scheduler for success , otherwise return NULL
2015-05-22 18:55:07 +08:00
*/
2015-08-20 17:24:40 +02:00
struct amd_gpu_scheduler * amd_sched_create ( struct amd_sched_backend_ops * ops ,
2015-08-24 11:35:26 +08:00
unsigned ring , unsigned hw_submission ,
void * priv )
2015-05-22 18:55:07 +08:00
{
struct amd_gpu_scheduler * sched ;
sched = kzalloc ( sizeof ( struct amd_gpu_scheduler ) , GFP_KERNEL ) ;
if ( ! sched )
return NULL ;
sched - > ops = ops ;
sched - > ring_id = ring ;
2015-08-05 19:52:14 +08:00
sched - > hw_submission_limit = hw_submission ;
2015-08-24 11:35:26 +08:00
sched - > priv = priv ;
2015-08-21 15:18:47 +02:00
snprintf ( sched - > name , sizeof ( sched - > name ) , " amdgpu[%d] " , ring ) ;
2015-08-12 11:46:04 +02:00
amd_sched_rq_init ( & sched - > sched_rq ) ;
amd_sched_rq_init ( & sched - > kernel_rq ) ;
2015-05-22 18:55:07 +08:00
init_waitqueue_head ( & sched - > wait_queue ) ;
2015-08-19 16:12:15 +02:00
atomic_set ( & sched - > hw_rq_count , 0 ) ;
2015-05-22 18:55:07 +08:00
/* Each scheduler will run on a seperate kernel thread */
2015-08-21 15:18:47 +02:00
sched - > thread = kthread_run ( amd_sched_main , sched , sched - > name ) ;
2015-08-20 16:59:38 +02:00
if ( IS_ERR ( sched - > thread ) ) {
DRM_ERROR ( " Failed to create scheduler for id %d. \n " , ring ) ;
kfree ( sched ) ;
return NULL ;
2015-05-22 18:55:07 +08:00
}
2015-08-20 16:59:38 +02:00
return sched ;
2015-05-22 18:55:07 +08:00
}
/**
* Destroy a gpu scheduler
*
* @ sched The pointer to the scheduler
*
* return 0 if succeed . - 1 if failed .
*/
int amd_sched_destroy ( struct amd_gpu_scheduler * sched )
{
kthread_stop ( sched - > thread ) ;
kfree ( sched ) ;
return 0 ;
}