2017-12-06 19:49:39 +03:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# ifndef _DRM_GPU_SCHEDULER_H_
# define _DRM_GPU_SCHEDULER_H_
# include <drm/spsc_queue.h>
# include <linux/dma-fence.h>
2018-05-30 22:11:01 +03:00
# define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
2017-12-06 19:49:39 +03:00
struct drm_gpu_scheduler ;
struct drm_sched_rq ;
enum drm_sched_priority {
DRM_SCHED_PRIORITY_MIN ,
DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN ,
DRM_SCHED_PRIORITY_NORMAL ,
DRM_SCHED_PRIORITY_HIGH_SW ,
DRM_SCHED_PRIORITY_HIGH_HW ,
DRM_SCHED_PRIORITY_KERNEL ,
DRM_SCHED_PRIORITY_MAX ,
DRM_SCHED_PRIORITY_INVALID = - 1 ,
DRM_SCHED_PRIORITY_UNSET = - 2
} ;
/**
2018-05-29 08:53:07 +03:00
* struct drm_sched_entity - A wrapper around a job queue ( typically
* attached to the DRM file_priv ) .
*
* @ list : used to append this struct to the list of entities in the
* runqueue .
2018-08-01 11:19:59 +03:00
* @ rq : runqueue on which this entity is currently scheduled .
* @ rq_list : a list of run queues on which jobs from this entity can
* be scheduled
* @ num_rq_list : number of run queues in the rq_list
2018-05-29 08:53:07 +03:00
* @ rq_lock : lock to modify the runqueue to which this entity belongs .
* @ job_queue : the list of jobs of this entity .
* @ fence_seq : a linearly increasing seqno incremented with each
* new & drm_sched_fence which is part of the entity .
* @ fence_context : a unique context for all the fences which belong
* to this entity .
* The & drm_sched_fence . scheduled uses the
* fence_context but & drm_sched_fence . finished uses
* fence_context + 1.
* @ dependency : the dependency fence of the job which is on the top
* of the job queue .
* @ cb : callback for the dependency fence above .
* @ guilty : points to ctx ' s guilty .
* @ fini_status : contains the exit status in case the process was signalled .
* @ last_scheduled : points to the finished fence of the last scheduled job .
2018-07-26 14:43:49 +03:00
* @ last_user : last group leader pushing a job into the entity .
2018-08-17 17:32:50 +03:00
* @ stopped : Marks the enity as removed from rq and destined for termination .
2018-04-05 01:32:51 +03:00
*
* Entities will emit jobs in order to their corresponding hardware
* ring , and the scheduler will alternate between entities based on
* scheduling policy .
2018-05-29 08:53:07 +03:00
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_entity {
struct list_head list ;
struct drm_sched_rq * rq ;
2018-08-01 11:19:59 +03:00
struct drm_sched_rq * * rq_list ;
unsigned int num_rq_list ;
2017-12-06 19:49:39 +03:00
spinlock_t rq_lock ;
struct spsc_queue job_queue ;
atomic_t fence_seq ;
uint64_t fence_context ;
struct dma_fence * dependency ;
struct dma_fence_cb cb ;
2018-05-29 08:53:07 +03:00
atomic_t * guilty ;
struct dma_fence * last_scheduled ;
2018-07-26 14:43:49 +03:00
struct task_struct * last_user ;
2018-08-17 17:32:50 +03:00
bool stopped ;
2017-12-06 19:49:39 +03:00
} ;
/**
2018-05-29 08:53:07 +03:00
* struct drm_sched_rq - queue of entities to be scheduled .
*
* @ lock : to modify the entities list .
2018-07-13 12:51:13 +03:00
* @ sched : the scheduler to which this rq belongs to .
2018-05-29 08:53:07 +03:00
* @ entities : list of the entities to be scheduled .
* @ current_entity : the entity which is to be scheduled .
*
2017-12-06 19:49:39 +03:00
* Run queue is a set of entities scheduling command submissions for
* one specific ring . It implements the scheduling policy that selects
* the next entity to emit commands from .
2018-05-29 08:53:07 +03:00
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_rq {
spinlock_t lock ;
2018-07-13 12:51:13 +03:00
struct drm_gpu_scheduler * sched ;
2017-12-06 19:49:39 +03:00
struct list_head entities ;
struct drm_sched_entity * current_entity ;
} ;
2018-05-29 08:53:07 +03:00
/**
* struct drm_sched_fence - fences corresponding to the scheduling of a job .
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_fence {
2018-05-29 08:53:07 +03:00
/**
* @ scheduled : this fence is what will be signaled by the scheduler
* when the job is scheduled .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence scheduled ;
2018-04-05 01:32:51 +03:00
2018-05-29 08:53:07 +03:00
/**
* @ finished : this fence is what will be signaled by the scheduler
* when the job is completed .
*
* When setting up an out fence for the job , you should use
* this , since it ' s available immediately upon
* drm_sched_job_init ( ) , and the fence returned by the driver
* from run_job ( ) won ' t be created until the dependencies have
* resolved .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence finished ;
2018-04-05 01:32:51 +03:00
2018-05-29 08:53:07 +03:00
/**
* @ parent : the fence returned by & drm_sched_backend_ops . run_job
* when scheduling the job on hardware . We signal the
* & drm_sched_fence . finished fence once parent is signalled .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence * parent ;
2018-05-29 08:53:07 +03:00
/**
* @ sched : the scheduler instance to which the job having this struct
* belongs to .
*/
2017-12-06 19:49:39 +03:00
struct drm_gpu_scheduler * sched ;
2018-05-29 08:53:07 +03:00
/**
* @ lock : the lock used by the scheduled and the finished fences .
*/
2017-12-06 19:49:39 +03:00
spinlock_t lock ;
2018-05-29 08:53:07 +03:00
/**
* @ owner : job owner for debugging
*/
2017-12-06 19:49:39 +03:00
void * owner ;
} ;
struct drm_sched_fence * to_drm_sched_fence ( struct dma_fence * f ) ;
2018-04-05 01:32:51 +03:00
/**
2018-05-29 08:53:07 +03:00
* struct drm_sched_job - A job to be run by an entity .
*
* @ queue_node : used to append this struct to the queue of jobs in an entity .
* @ sched : the scheduler instance on which this job is scheduled .
* @ s_fence : contains the fences for the scheduling of job .
* @ finish_cb : the callback for the finished fence .
* @ finish_work : schedules the function @ drm_sched_job_finish once the job has
* finished to remove the job from the
* @ drm_gpu_scheduler . ring_mirror_list .
* @ node : used to append this struct to the @ drm_gpu_scheduler . ring_mirror_list .
* @ id : a unique id assigned to each job scheduled on the scheduler .
* @ karma : increment on every hang caused by this job . If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
* be scheduled further .
* @ s_priority : the priority of the job .
* @ entity : the entity to which this job belongs .
2018-12-05 22:21:28 +03:00
* @ cb : the callback for the parent fence in s_fence .
2018-04-05 01:32:51 +03:00
*
* A job is created by the driver using drm_sched_job_init ( ) , and
* should call drm_sched_entity_push_job ( ) once it wants the scheduler
* to schedule the job .
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_job {
struct spsc_node queue_node ;
struct drm_gpu_scheduler * sched ;
struct drm_sched_fence * s_fence ;
struct dma_fence_cb finish_cb ;
struct work_struct finish_work ;
struct list_head node ;
uint64_t id ;
atomic_t karma ;
enum drm_sched_priority s_priority ;
2018-04-16 05:07:02 +03:00
struct drm_sched_entity * entity ;
2018-12-05 22:21:28 +03:00
struct dma_fence_cb cb ;
2017-12-06 19:49:39 +03:00
} ;
static inline bool drm_sched_invalidate_job ( struct drm_sched_job * s_job ,
int threshold )
{
return ( s_job & & atomic_inc_return ( & s_job - > karma ) > threshold ) ;
}
/**
2018-05-29 08:53:07 +03:00
* struct drm_sched_backend_ops
*
2017-12-06 19:49:39 +03:00
* Define the backend operations called by the scheduler ,
2018-05-29 08:53:07 +03:00
* these functions should be implemented in driver side .
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_backend_ops {
2018-05-29 08:53:07 +03:00
/**
* @ dependency : Called when the scheduler is considering scheduling
* this job next , to get another struct dma_fence for this job to
2018-04-05 01:32:51 +03:00
* block on . Once it returns NULL , run_job ( ) may be called .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence * ( * dependency ) ( struct drm_sched_job * sched_job ,
struct drm_sched_entity * s_entity ) ;
2018-04-05 01:32:51 +03:00
2018-05-29 08:53:07 +03:00
/**
* @ run_job : Called to execute the job once all of the dependencies
* have been resolved . This may be called multiple times , if
2018-04-05 01:32:51 +03:00
* timedout_job ( ) has happened and drm_sched_job_recovery ( )
* decides to try it again .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence * ( * run_job ) ( struct drm_sched_job * sched_job ) ;
2018-04-05 01:32:51 +03:00
2018-05-29 08:53:07 +03:00
/**
* @ timedout_job : Called when a job has taken too long to execute ,
* to trigger GPU recovery .
2018-04-05 01:32:51 +03:00
*/
2017-12-06 19:49:39 +03:00
void ( * timedout_job ) ( struct drm_sched_job * sched_job ) ;
2018-04-05 01:32:51 +03:00
2018-05-29 08:53:07 +03:00
/**
* @ free_job : Called once the job ' s finished fence has been signaled
* and it ' s time to clean it up .
2018-04-05 01:32:51 +03:00
*/
2017-12-06 19:49:39 +03:00
void ( * free_job ) ( struct drm_sched_job * sched_job ) ;
} ;
/**
2018-05-29 08:53:07 +03:00
* struct drm_gpu_scheduler
*
* @ ops : backend operations provided by the driver .
* @ hw_submission_limit : the max size of the hardware queue .
* @ timeout : the time after which a job is removed from the scheduler .
* @ name : name of the ring for which this scheduler is being used .
* @ sched_rq : priority wise array of run queues .
* @ wake_up_worker : the wait queue on which the scheduler sleeps until a job
* is ready to be scheduled .
* @ job_scheduled : once @ drm_sched_entity_do_release is called the scheduler
* waits on this wait queue until all the scheduled jobs are
* finished .
* @ hw_rq_count : the number of jobs currently in the hardware queue .
* @ job_id_count : used to assign unique id to the each job .
2018-09-25 20:09:02 +03:00
* @ work_tdr : schedules a delayed call to @ drm_sched_job_timedout after the
* timeout interval is over .
2018-05-29 08:53:07 +03:00
* @ thread : the kthread on which the scheduler which run .
* @ ring_mirror_list : the list of jobs which are currently in the job queue .
* @ job_list_lock : lock to protect the ring_mirror_list .
* @ hang_limit : once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further .
2018-08-01 11:20:00 +03:00
* @ num_jobs : the number of jobs in queue in the scheduler
2018-10-18 19:32:46 +03:00
* @ ready : marks if the underlying HW is ready to work
2018-05-29 08:53:07 +03:00
*
* One scheduler is implemented for each hardware ring .
*/
2017-12-06 19:49:39 +03:00
struct drm_gpu_scheduler {
const struct drm_sched_backend_ops * ops ;
uint32_t hw_submission_limit ;
long timeout ;
const char * name ;
struct drm_sched_rq sched_rq [ DRM_SCHED_PRIORITY_MAX ] ;
wait_queue_head_t wake_up_worker ;
wait_queue_head_t job_scheduled ;
atomic_t hw_rq_count ;
atomic64_t job_id_count ;
2018-09-25 20:09:02 +03:00
struct delayed_work work_tdr ;
2017-12-06 19:49:39 +03:00
struct task_struct * thread ;
struct list_head ring_mirror_list ;
spinlock_t job_list_lock ;
int hang_limit ;
2018-08-01 11:20:00 +03:00
atomic_t num_jobs ;
2018-10-18 19:32:46 +03:00
bool ready ;
2017-12-06 19:49:39 +03:00
} ;
int drm_sched_init ( struct drm_gpu_scheduler * sched ,
const struct drm_sched_backend_ops * ops ,
uint32_t hw_submission , unsigned hang_limit , long timeout ,
const char * name ) ;
2018-10-18 19:32:46 +03:00
2017-12-06 19:49:39 +03:00
void drm_sched_fini ( struct drm_gpu_scheduler * sched ) ;
2018-08-06 15:25:32 +03:00
int drm_sched_job_init ( struct drm_sched_job * job ,
struct drm_sched_entity * entity ,
void * owner ) ;
2018-10-29 12:32:28 +03:00
void drm_sched_job_cleanup ( struct drm_sched_job * job ) ;
2018-08-06 15:25:32 +03:00
void drm_sched_wakeup ( struct drm_gpu_scheduler * sched ) ;
2018-12-05 00:56:14 +03:00
void drm_sched_stop ( struct drm_gpu_scheduler * sched ) ;
void drm_sched_start ( struct drm_gpu_scheduler * sched , bool full_recovery ) ;
void drm_sched_resubmit_jobs ( struct drm_gpu_scheduler * sched ) ;
void drm_sched_increase_karma ( struct drm_sched_job * bad ) ;
2018-08-06 15:25:32 +03:00
bool drm_sched_dependency_optimized ( struct dma_fence * fence ,
struct drm_sched_entity * entity ) ;
2018-10-12 17:47:13 +03:00
void drm_sched_fault ( struct drm_gpu_scheduler * sched ) ;
2018-08-06 15:25:32 +03:00
void drm_sched_job_kickout ( struct drm_sched_job * s_job ) ;
void drm_sched_rq_add_entity ( struct drm_sched_rq * rq ,
struct drm_sched_entity * entity ) ;
void drm_sched_rq_remove_entity ( struct drm_sched_rq * rq ,
struct drm_sched_entity * entity ) ;
2017-12-06 19:49:39 +03:00
2018-07-13 12:51:14 +03:00
int drm_sched_entity_init ( struct drm_sched_entity * entity ,
struct drm_sched_rq * * rq_list ,
unsigned int num_rq_list ,
2018-03-29 20:06:32 +03:00
atomic_t * guilty ) ;
2018-07-20 15:21:05 +03:00
long drm_sched_entity_flush ( struct drm_sched_entity * entity , long timeout ) ;
void drm_sched_entity_fini ( struct drm_sched_entity * entity ) ;
void drm_sched_entity_destroy ( struct drm_sched_entity * entity ) ;
2018-08-06 15:25:32 +03:00
void drm_sched_entity_select_rq ( struct drm_sched_entity * entity ) ;
struct drm_sched_job * drm_sched_entity_pop_job ( struct drm_sched_entity * entity ) ;
2017-12-06 19:49:39 +03:00
void drm_sched_entity_push_job ( struct drm_sched_job * sched_job ,
struct drm_sched_entity * entity ) ;
2018-08-01 17:22:39 +03:00
void drm_sched_entity_set_priority ( struct drm_sched_entity * entity ,
enum drm_sched_priority priority ) ;
2018-08-06 15:25:32 +03:00
bool drm_sched_entity_is_ready ( struct drm_sched_entity * entity ) ;
2017-12-06 19:49:39 +03:00
struct drm_sched_fence * drm_sched_fence_create (
struct drm_sched_entity * s_entity , void * owner ) ;
void drm_sched_fence_scheduled ( struct drm_sched_fence * fence ) ;
void drm_sched_fence_finished ( struct drm_sched_fence * fence ) ;
2018-11-29 13:05:20 +03:00
unsigned long drm_sched_suspend_timeout ( struct drm_gpu_scheduler * sched ) ;
void drm_sched_resume_timeout ( struct drm_gpu_scheduler * sched ,
unsigned long remaining ) ;
2017-12-06 19:49:39 +03:00
# endif