2017-12-06 19:49:39 +03:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# ifndef _DRM_GPU_SCHEDULER_H_
# define _DRM_GPU_SCHEDULER_H_
# include <drm/spsc_queue.h>
# include <linux/dma-fence.h>
2019-11-08 08:31:10 +03:00
# include <linux/completion.h>
2021-08-05 13:46:49 +03:00
# include <linux/xarray.h>
2022-04-12 01:15:36 +03:00
# include <linux/workqueue.h>
2017-12-06 19:49:39 +03:00
2018-05-30 22:11:01 +03:00
# define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
2022-10-07 10:51:13 +03:00
/**
* DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
*
* Setting this flag on a scheduler fence prevents pipelining of jobs depending
* on this fence . In other words we always insert a full CPU round trip before
* dependen jobs are pushed to the hw queue .
*/
# define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
2022-09-28 11:17:40 +03:00
enum dma_resv_usage ;
struct dma_resv ;
2021-08-05 13:46:49 +03:00
struct drm_gem_object ;
2017-12-06 19:49:39 +03:00
struct drm_gpu_scheduler ;
struct drm_sched_rq ;
2020-08-12 02:59:58 +03:00
/* These are often used as an (initial) index
* to an array , and as such should start at 0.
*/
2017-12-06 19:49:39 +03:00
enum drm_sched_priority {
DRM_SCHED_PRIORITY_MIN ,
DRM_SCHED_PRIORITY_NORMAL ,
2020-08-12 02:59:58 +03:00
DRM_SCHED_PRIORITY_HIGH ,
2017-12-06 19:49:39 +03:00
DRM_SCHED_PRIORITY_KERNEL ,
2020-08-12 02:59:58 +03:00
DRM_SCHED_PRIORITY_COUNT ,
2017-12-06 19:49:39 +03:00
DRM_SCHED_PRIORITY_UNSET = - 2
} ;
2022-09-30 07:12:58 +03:00
/* Used to chose between FIFO and RR jobs scheduling */
extern int drm_sched_policy ;
# define DRM_SCHED_POLICY_RR 0
# define DRM_SCHED_POLICY_FIFO 1
2017-12-06 19:49:39 +03:00
/**
2018-05-29 08:53:07 +03:00
* struct drm_sched_entity - A wrapper around a job queue ( typically
* attached to the DRM file_priv ) .
*
2018-04-05 01:32:51 +03:00
* Entities will emit jobs in order to their corresponding hardware
* ring , and the scheduler will alternate between entities based on
* scheduling policy .
2018-05-29 08:53:07 +03:00
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_entity {
2021-08-05 13:46:51 +03:00
/**
* @ list :
*
* Used to append this struct to the list of entities in the runqueue
* @ rq under & drm_sched_rq . entities .
*
* Protected by & drm_sched_rq . lock of @ rq .
*/
2017-12-06 19:49:39 +03:00
struct list_head list ;
2021-08-05 13:46:51 +03:00
/**
* @ rq :
*
* Runqueue on which this entity is currently scheduled .
*
* FIXME : Locking is very unclear for this . Writers are protected by
* @ rq_lock , but readers are generally lockless and seem to just race
* with not even a READ_ONCE .
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_rq * rq ;
2021-08-05 13:46:51 +03:00
/**
* @ sched_list :
*
* A list of schedulers ( struct drm_gpu_scheduler ) . Jobs from this entity can
* be scheduled on any scheduler on this list .
*
* This can be modified by calling drm_sched_entity_modify_sched ( ) .
* Locking is entirely up to the driver , see the above function for more
* details .
*
* This will be set to NULL if & num_sched_list equals 1 and @ rq has been
* set already .
*
* FIXME : This means priority changes through
* drm_sched_entity_set_priority ( ) will be lost henceforth in this case .
*/
2019-12-05 13:38:00 +03:00
struct drm_gpu_scheduler * * sched_list ;
2021-08-05 13:46:51 +03:00
/**
* @ num_sched_list :
*
* Number of drm_gpu_schedulers in the @ sched_list .
*/
2020-01-14 12:38:42 +03:00
unsigned int num_sched_list ;
2021-08-05 13:46:51 +03:00
/**
* @ priority :
*
* Priority of the entity . This can be modified by calling
* drm_sched_entity_set_priority ( ) . Protected by & rq_lock .
*/
2019-12-05 13:38:00 +03:00
enum drm_sched_priority priority ;
2021-08-05 13:46:51 +03:00
/**
* @ rq_lock :
*
* Lock to modify the runqueue to which this entity belongs .
*/
2017-12-06 19:49:39 +03:00
spinlock_t rq_lock ;
2021-08-05 13:46:51 +03:00
/**
* @ job_queue : the list of jobs of this entity .
*/
2017-12-06 19:49:39 +03:00
struct spsc_queue job_queue ;
2021-08-05 13:46:51 +03:00
/**
* @ fence_seq :
*
* A linearly increasing seqno incremented with each new
* & drm_sched_fence which is part of the entity .
*
* FIXME : Callers of drm_sched_job_arm ( ) need to ensure correct locking ,
* this doesn ' t need to be atomic .
*/
2017-12-06 19:49:39 +03:00
atomic_t fence_seq ;
2021-08-05 13:46:51 +03:00
/**
* @ fence_context :
*
* A unique context for all the fences which belong to this entity . The
* & drm_sched_fence . scheduled uses the fence_context but
* & drm_sched_fence . finished uses fence_context + 1.
*/
2017-12-06 19:49:39 +03:00
uint64_t fence_context ;
2021-08-05 13:46:51 +03:00
/**
* @ dependency :
*
* The dependency fence of the job which is on the top of the job queue .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence * dependency ;
2021-08-05 13:46:51 +03:00
/**
* @ cb :
*
* Callback for the dependency fence above .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence_cb cb ;
2021-08-05 13:46:51 +03:00
/**
* @ guilty :
*
* Points to entities ' guilty .
*/
2018-05-29 08:53:07 +03:00
atomic_t * guilty ;
2021-08-05 13:46:51 +03:00
/**
* @ last_scheduled :
*
* Points to the finished fence of the last scheduled job . Only written
* by the scheduler thread , can be accessed locklessly from
* drm_sched_job_arm ( ) iff the queue is empty .
*/
2018-05-29 08:53:07 +03:00
struct dma_fence * last_scheduled ;
2021-08-05 13:46:51 +03:00
/**
* @ last_user : last group leader pushing a job into the entity .
*/
2018-07-26 14:43:49 +03:00
struct task_struct * last_user ;
2021-08-05 13:46:51 +03:00
/**
* @ stopped :
*
* Marks the enity as removed from rq and destined for
* termination . This is set by calling drm_sched_entity_flush ( ) and by
* drm_sched_fini ( ) .
*/
2018-08-17 17:32:50 +03:00
bool stopped ;
2021-08-05 13:46:51 +03:00
/**
* @ entity_idle :
*
* Signals when entity is not in use , used to sequence entity cleanup in
* drm_sched_entity_fini ( ) .
*/
2019-11-05 00:30:05 +03:00
struct completion entity_idle ;
2022-09-30 07:12:58 +03:00
/**
* @ oldest_job_waiting :
*
* Marks earliest job waiting in SW queue
*/
ktime_t oldest_job_waiting ;
/**
* @ rb_tree_node :
*
* The node used to insert this entity into time based priority queue
*/
struct rb_node rb_tree_node ;
2017-12-06 19:49:39 +03:00
} ;
/**
2018-05-29 08:53:07 +03:00
* struct drm_sched_rq - queue of entities to be scheduled .
*
* @ lock : to modify the entities list .
2018-07-13 12:51:13 +03:00
* @ sched : the scheduler to which this rq belongs to .
2018-05-29 08:53:07 +03:00
* @ entities : list of the entities to be scheduled .
* @ current_entity : the entity which is to be scheduled .
2022-09-30 07:12:58 +03:00
* @ rb_tree_root : root of time based priory queue of entities for FIFO scheduling
2018-05-29 08:53:07 +03:00
*
2017-12-06 19:49:39 +03:00
* Run queue is a set of entities scheduling command submissions for
* one specific ring . It implements the scheduling policy that selects
* the next entity to emit commands from .
2018-05-29 08:53:07 +03:00
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_rq {
spinlock_t lock ;
2018-07-13 12:51:13 +03:00
struct drm_gpu_scheduler * sched ;
2017-12-06 19:49:39 +03:00
struct list_head entities ;
struct drm_sched_entity * current_entity ;
2022-09-30 07:12:58 +03:00
struct rb_root_cached rb_tree_root ;
2017-12-06 19:49:39 +03:00
} ;
2018-05-29 08:53:07 +03:00
/**
* struct drm_sched_fence - fences corresponding to the scheduling of a job .
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_fence {
2018-05-29 08:53:07 +03:00
/**
* @ scheduled : this fence is what will be signaled by the scheduler
* when the job is scheduled .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence scheduled ;
2018-04-05 01:32:51 +03:00
2018-05-29 08:53:07 +03:00
/**
* @ finished : this fence is what will be signaled by the scheduler
* when the job is completed .
*
* When setting up an out fence for the job , you should use
* this , since it ' s available immediately upon
* drm_sched_job_init ( ) , and the fence returned by the driver
* from run_job ( ) won ' t be created until the dependencies have
* resolved .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence finished ;
2018-04-05 01:32:51 +03:00
2018-05-29 08:53:07 +03:00
/**
* @ parent : the fence returned by & drm_sched_backend_ops . run_job
* when scheduling the job on hardware . We signal the
* & drm_sched_fence . finished fence once parent is signalled .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence * parent ;
2018-05-29 08:53:07 +03:00
/**
* @ sched : the scheduler instance to which the job having this struct
* belongs to .
*/
2017-12-06 19:49:39 +03:00
struct drm_gpu_scheduler * sched ;
2018-05-29 08:53:07 +03:00
/**
* @ lock : the lock used by the scheduled and the finished fences .
*/
2017-12-06 19:49:39 +03:00
spinlock_t lock ;
2018-05-29 08:53:07 +03:00
/**
* @ owner : job owner for debugging
*/
2017-12-06 19:49:39 +03:00
void * owner ;
} ;
struct drm_sched_fence * to_drm_sched_fence ( struct dma_fence * f ) ;
2018-04-05 01:32:51 +03:00
/**
2018-05-29 08:53:07 +03:00
* struct drm_sched_job - A job to be run by an entity .
*
* @ queue_node : used to append this struct to the queue of jobs in an entity .
2020-12-10 01:31:42 +03:00
* @ list : a job participates in a " pending " and " done " lists .
2018-05-29 08:53:07 +03:00
* @ sched : the scheduler instance on which this job is scheduled .
* @ s_fence : contains the fences for the scheduling of job .
* @ finish_cb : the callback for the finished fence .
2022-03-28 16:25:32 +03:00
* @ work : Helper to reschdeule job kill to different context .
2018-05-29 08:53:07 +03:00
* @ id : a unique id assigned to each job scheduled on the scheduler .
* @ karma : increment on every hang caused by this job . If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
* be scheduled further .
* @ s_priority : the priority of the job .
* @ entity : the entity to which this job belongs .
2018-12-05 22:21:28 +03:00
* @ cb : the callback for the parent fence in s_fence .
2018-04-05 01:32:51 +03:00
*
* A job is created by the driver using drm_sched_job_init ( ) , and
* should call drm_sched_entity_push_job ( ) once it wants the scheduler
* to schedule the job .
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_job {
struct spsc_node queue_node ;
2020-12-04 06:17:18 +03:00
struct list_head list ;
2017-12-06 19:49:39 +03:00
struct drm_gpu_scheduler * sched ;
struct drm_sched_fence * s_fence ;
2021-10-28 19:24:03 +03:00
/*
* work is used only after finish_cb has been used and will not be
* accessed anymore .
*/
union {
struct dma_fence_cb finish_cb ;
2022-09-29 16:01:57 +03:00
struct work_struct work ;
2021-10-28 19:24:03 +03:00
} ;
2017-12-06 19:49:39 +03:00
uint64_t id ;
atomic_t karma ;
enum drm_sched_priority s_priority ;
2020-12-04 06:17:18 +03:00
struct drm_sched_entity * entity ;
2018-12-05 22:21:28 +03:00
struct dma_fence_cb cb ;
2021-08-05 13:46:49 +03:00
/**
* @ dependencies :
*
* Contains the dependencies as struct dma_fence for this job , see
* drm_sched_job_add_dependency ( ) and
* drm_sched_job_add_implicit_dependencies ( ) .
*/
struct xarray dependencies ;
/** @last_dependency: tracks @dependencies as they signal */
unsigned long last_dependency ;
2022-09-30 07:12:58 +03:00
/**
* @ submit_ts :
*
* When the job was pushed into the entity queue .
*/
ktime_t submit_ts ;
2017-12-06 19:49:39 +03:00
} ;
static inline bool drm_sched_invalidate_job ( struct drm_sched_job * s_job ,
int threshold )
{
gpu/drm: ring_mirror_list --> pending_list
Rename "ring_mirror_list" to "pending_list",
to describe what something is, not what it does,
how it's used, or how the hardware implements it.
This also abstracts the actual hardware
implementation, i.e. how the low-level driver
communicates with the device it drives, ring, CAM,
etc., shouldn't be exposed to DRM.
The pending_list keeps jobs submitted, which are
out of our control. Usually this means they are
pending execution status in hardware, but the
latter definition is a more general (inclusive)
definition.
Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/405573/
Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Christian König <christian.koenig@amd.com>
2020-12-04 06:17:19 +03:00
return s_job & & atomic_inc_return ( & s_job - > karma ) > threshold ;
2017-12-06 19:49:39 +03:00
}
2021-01-20 23:09:59 +03:00
enum drm_gpu_sched_stat {
DRM_GPU_SCHED_STAT_NONE , /* Reserve 0 */
DRM_GPU_SCHED_STAT_NOMINAL ,
DRM_GPU_SCHED_STAT_ENODEV ,
} ;
2017-12-06 19:49:39 +03:00
/**
2022-04-05 00:30:40 +03:00
* struct drm_sched_backend_ops - Define the backend operations
* called by the scheduler
2018-05-29 08:53:07 +03:00
*
2022-04-05 00:30:40 +03:00
* These functions should be implemented in the driver side .
2018-05-29 08:53:07 +03:00
*/
2017-12-06 19:49:39 +03:00
struct drm_sched_backend_ops {
2018-05-29 08:53:07 +03:00
/**
2022-09-29 16:01:57 +03:00
* @ prepare_job :
2021-08-05 13:46:49 +03:00
*
* Called when the scheduler is considering scheduling this job next , to
* get another struct dma_fence for this job to block on . Once it
* returns NULL , run_job ( ) may be called .
*
2022-09-29 16:01:57 +03:00
* Can be NULL if no additional preparation to the dependencies are
* necessary . Skipped when jobs are killed instead of run .
2018-04-05 01:32:51 +03:00
*/
2022-09-29 16:01:57 +03:00
struct dma_fence * ( * prepare_job ) ( struct drm_sched_job * sched_job ,
struct drm_sched_entity * s_entity ) ;
2018-04-05 01:32:51 +03:00
2018-05-29 08:53:07 +03:00
/**
* @ run_job : Called to execute the job once all of the dependencies
* have been resolved . This may be called multiple times , if
2018-04-05 01:32:51 +03:00
* timedout_job ( ) has happened and drm_sched_job_recovery ( )
* decides to try it again .
*/
2017-12-06 19:49:39 +03:00
struct dma_fence * ( * run_job ) ( struct drm_sched_job * sched_job ) ;
2018-04-05 01:32:51 +03:00
2018-05-29 08:53:07 +03:00
/**
2021-01-20 23:09:59 +03:00
* @ timedout_job : Called when a job has taken too long to execute ,
* to trigger GPU recovery .
*
2021-06-30 09:27:36 +03:00
* This method is called in a workqueue context .
*
* Drivers typically issue a reset to recover from GPU hangs , and this
* procedure usually follows the following workflow :
*
* 1. Stop the scheduler using drm_sched_stop ( ) . This will park the
* scheduler thread and cancel the timeout work , guaranteeing that
* nothing is queued while we reset the hardware queue
* 2. Try to gracefully stop non - faulty jobs ( optional )
* 3. Issue a GPU reset ( driver - specific )
* 4. Re - submit jobs using drm_sched_resubmit_jobs ( )
* 5. Restart the scheduler using drm_sched_start ( ) . At that point , new
* jobs can be queued , and the scheduler thread is unblocked
*
2021-06-30 09:27:37 +03:00
* Note that some GPUs have distinct hardware queues but need to reset
* the GPU globally , which requires extra synchronization between the
* timeout handler of the different & drm_gpu_scheduler . One way to
* achieve this synchronization is to create an ordered workqueue
* ( using alloc_ordered_workqueue ( ) ) at the driver level , and pass this
* queue to drm_sched_init ( ) , to guarantee that timeout handlers are
* executed sequentially . The above workflow needs to be slightly
* adjusted in that case :
*
* 1. Stop all schedulers impacted by the reset using drm_sched_stop ( )
* 2. Try to gracefully stop non - faulty jobs on all queues impacted by
* the reset ( optional )
* 3. Issue a GPU reset on all faulty queues ( driver - specific )
* 4. Re - submit jobs on all schedulers impacted by the reset using
* drm_sched_resubmit_jobs ( )
* 5. Restart all schedulers that were stopped in step # 1 using
* drm_sched_start ( )
*
2021-01-20 23:09:59 +03:00
* Return DRM_GPU_SCHED_STAT_NOMINAL , when all is normal ,
* and the underlying driver has started or completed recovery .
*
* Return DRM_GPU_SCHED_STAT_ENODEV , if the device is no longer
* available , i . e . has been unplugged .
2018-04-05 01:32:51 +03:00
*/
2021-01-20 23:09:59 +03:00
enum drm_gpu_sched_stat ( * timedout_job ) ( struct drm_sched_job * sched_job ) ;
2018-04-05 01:32:51 +03:00
2018-05-29 08:53:07 +03:00
/**
* @ free_job : Called once the job ' s finished fence has been signaled
* and it ' s time to clean it up .
2018-04-05 01:32:51 +03:00
*/
2017-12-06 19:49:39 +03:00
void ( * free_job ) ( struct drm_sched_job * sched_job ) ;
} ;
/**
2022-04-05 00:30:40 +03:00
* struct drm_gpu_scheduler - scheduler instance - specific data
2018-05-29 08:53:07 +03:00
*
* @ ops : backend operations provided by the driver .
* @ hw_submission_limit : the max size of the hardware queue .
* @ timeout : the time after which a job is removed from the scheduler .
* @ name : name of the ring for which this scheduler is being used .
* @ sched_rq : priority wise array of run queues .
* @ wake_up_worker : the wait queue on which the scheduler sleeps until a job
* is ready to be scheduled .
* @ job_scheduled : once @ drm_sched_entity_do_release is called the scheduler
* waits on this wait queue until all the scheduled jobs are
* finished .
* @ hw_rq_count : the number of jobs currently in the hardware queue .
* @ job_id_count : used to assign unique id to the each job .
2021-06-30 09:27:37 +03:00
* @ timeout_wq : workqueue used to queue @ work_tdr
2018-09-25 20:09:02 +03:00
* @ work_tdr : schedules a delayed call to @ drm_sched_job_timedout after the
* timeout interval is over .
2018-05-29 08:53:07 +03:00
* @ thread : the kthread on which the scheduler which run .
gpu/drm: ring_mirror_list --> pending_list
Rename "ring_mirror_list" to "pending_list",
to describe what something is, not what it does,
how it's used, or how the hardware implements it.
This also abstracts the actual hardware
implementation, i.e. how the low-level driver
communicates with the device it drives, ring, CAM,
etc., shouldn't be exposed to DRM.
The pending_list keeps jobs submitted, which are
out of our control. Usually this means they are
pending execution status in hardware, but the
latter definition is a more general (inclusive)
definition.
Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/405573/
Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Christian König <christian.koenig@amd.com>
2020-12-04 06:17:19 +03:00
* @ pending_list : the list of jobs which are currently in the job queue .
* @ job_list_lock : lock to protect the pending_list .
2018-05-29 08:53:07 +03:00
* @ hang_limit : once the hangs by a job crosses this limit then it is marked
2021-05-29 02:51:52 +03:00
* guilty and it will no longer be considered for scheduling .
2020-06-25 15:07:23 +03:00
* @ score : score to help loadbalancer pick a idle sched
2021-04-01 15:50:15 +03:00
* @ _score : score used when the driver doesn ' t provide one
2018-10-18 19:32:46 +03:00
* @ ready : marks if the underlying HW is ready to work
2019-04-18 18:00:23 +03:00
* @ free_guilty : A hit to time out handler to free the guilty job .
2022-04-05 00:30:40 +03:00
* @ dev : system & struct device
2018-05-29 08:53:07 +03:00
*
* One scheduler is implemented for each hardware ring .
*/
2017-12-06 19:49:39 +03:00
struct drm_gpu_scheduler {
const struct drm_sched_backend_ops * ops ;
uint32_t hw_submission_limit ;
long timeout ;
const char * name ;
2020-08-12 02:59:58 +03:00
struct drm_sched_rq sched_rq [ DRM_SCHED_PRIORITY_COUNT ] ;
2017-12-06 19:49:39 +03:00
wait_queue_head_t wake_up_worker ;
wait_queue_head_t job_scheduled ;
atomic_t hw_rq_count ;
atomic64_t job_id_count ;
2021-06-30 09:27:37 +03:00
struct workqueue_struct * timeout_wq ;
2018-09-25 20:09:02 +03:00
struct delayed_work work_tdr ;
2017-12-06 19:49:39 +03:00
struct task_struct * thread ;
gpu/drm: ring_mirror_list --> pending_list
Rename "ring_mirror_list" to "pending_list",
to describe what something is, not what it does,
how it's used, or how the hardware implements it.
This also abstracts the actual hardware
implementation, i.e. how the low-level driver
communicates with the device it drives, ring, CAM,
etc., shouldn't be exposed to DRM.
The pending_list keeps jobs submitted, which are
out of our control. Usually this means they are
pending execution status in hardware, but the
latter definition is a more general (inclusive)
definition.
Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/405573/
Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Christian König <christian.koenig@amd.com>
2020-12-04 06:17:19 +03:00
struct list_head pending_list ;
2017-12-06 19:49:39 +03:00
spinlock_t job_list_lock ;
int hang_limit ;
2021-02-02 14:40:01 +03:00
atomic_t * score ;
atomic_t _score ;
2020-06-25 15:07:23 +03:00
bool ready ;
2019-04-18 18:00:23 +03:00
bool free_guilty ;
2022-02-22 12:42:52 +03:00
struct device * dev ;
2017-12-06 19:49:39 +03:00
} ;
int drm_sched_init ( struct drm_gpu_scheduler * sched ,
const struct drm_sched_backend_ops * ops ,
2021-06-30 09:27:37 +03:00
uint32_t hw_submission , unsigned hang_limit ,
long timeout , struct workqueue_struct * timeout_wq ,
2022-02-22 12:42:52 +03:00
atomic_t * score , const char * name , struct device * dev ) ;
2018-10-18 19:32:46 +03:00
2017-12-06 19:49:39 +03:00
void drm_sched_fini ( struct drm_gpu_scheduler * sched ) ;
2018-08-06 15:25:32 +03:00
int drm_sched_job_init ( struct drm_sched_job * job ,
struct drm_sched_entity * entity ,
void * owner ) ;
drm/sched: Split drm_sched_job_init
This is a very confusingly named function, because not just does it
init an object, it arms it and provides a point of no return for
pushing a job into the scheduler. It would be nice if that's a bit
clearer in the interface.
But the real reason is that I want to push the dependency tracking
helpers into the scheduler code, and that means drm_sched_job_init
must be called a lot earlier, without arming the job.
v2:
- don't change .gitignore (Steven)
- don't forget v3d (Emma)
v3: Emma noticed that I leak the memory allocated in
drm_sched_job_init if we bail out before the point of no return in
subsequent driver patches. To be able to fix this change
drm_sched_job_cleanup() so it can handle being called both before and
after drm_sched_job_arm().
Also improve the kerneldoc for this.
v4:
- Fix the drm_sched_job_cleanup logic, I inverted the booleans, as
usual (Melissa)
- Christian pointed out that drm_sched_entity_select_rq() also needs
to be moved into drm_sched_job_arm, which made me realize that the
job->id definitely needs to be moved too.
Shuffle things to fit between job_init and job_arm.
v5:
Reshuffle the split between init/arm once more, amdgpu abuses
drm_sched.ready to signal gpu reset failures. Also document this
somewhat. (Christian)
v6:
Rebase on top of the msm drm/sched support. Note that the
drm_sched_job_init() call is completely misplaced, and hence also the
split-out drm_sched_entity_push_job(). I've put in a FIXME which the next
patch will address.
v7: Drop the FIXME in msm, after discussions with Rob I agree it shouldn't
be a problem where it is now.
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Melissa Wen <mwen@igalia.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Steven Price <steven.price@arm.com> (v2)
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v5)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Nirmoy Das <nirmoy.das@amd.com>
Cc: Deepak R Varma <mh12gx2825@gmail.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Kevin Wang <kevin1.wang@amd.com>
Cc: Chen Li <chenli@uniontech.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: "Marek Olšák" <marek.olsak@amd.com>
Cc: Dennis Li <Dennis.Li@amd.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Cc: Sonny Jiang <sonny.jiang@amd.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Tian Tao <tiantao6@hisilicon.com>
Cc: etnaviv@lists.freedesktop.org
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210817084917.3555822-1-daniel.vetter@ffwll.ch
2021-08-17 11:49:16 +03:00
void drm_sched_job_arm ( struct drm_sched_job * job ) ;
2021-08-05 13:46:49 +03:00
int drm_sched_job_add_dependency ( struct drm_sched_job * job ,
struct dma_fence * fence ) ;
2022-09-28 11:17:40 +03:00
int drm_sched_job_add_resv_dependencies ( struct drm_sched_job * job ,
struct dma_resv * resv ,
enum dma_resv_usage usage ) ;
2021-08-05 13:46:49 +03:00
int drm_sched_job_add_implicit_dependencies ( struct drm_sched_job * job ,
struct drm_gem_object * obj ,
bool write ) ;
2020-02-27 17:34:15 +03:00
void drm_sched_entity_modify_sched ( struct drm_sched_entity * entity ,
struct drm_gpu_scheduler * * sched_list ,
unsigned int num_sched_list ) ;
2018-10-29 12:32:28 +03:00
void drm_sched_job_cleanup ( struct drm_sched_job * job ) ;
2018-08-06 15:25:32 +03:00
void drm_sched_wakeup ( struct drm_gpu_scheduler * sched ) ;
2019-04-18 18:00:21 +03:00
void drm_sched_stop ( struct drm_gpu_scheduler * sched , struct drm_sched_job * bad ) ;
2018-12-05 00:56:14 +03:00
void drm_sched_start ( struct drm_gpu_scheduler * sched , bool full_recovery ) ;
void drm_sched_resubmit_jobs ( struct drm_gpu_scheduler * sched ) ;
void drm_sched_increase_karma ( struct drm_sched_job * bad ) ;
2021-03-08 07:41:27 +03:00
void drm_sched_reset_karma ( struct drm_sched_job * bad ) ;
void drm_sched_increase_karma_ext ( struct drm_sched_job * bad , int type ) ;
2018-08-06 15:25:32 +03:00
bool drm_sched_dependency_optimized ( struct dma_fence * fence ,
struct drm_sched_entity * entity ) ;
2018-10-12 17:47:13 +03:00
void drm_sched_fault ( struct drm_gpu_scheduler * sched ) ;
2018-08-06 15:25:32 +03:00
void drm_sched_job_kickout ( struct drm_sched_job * s_job ) ;
void drm_sched_rq_add_entity ( struct drm_sched_rq * rq ,
struct drm_sched_entity * entity ) ;
void drm_sched_rq_remove_entity ( struct drm_sched_rq * rq ,
struct drm_sched_entity * entity ) ;
2017-12-06 19:49:39 +03:00
2022-09-30 07:12:58 +03:00
void drm_sched_rq_update_fifo ( struct drm_sched_entity * entity , ktime_t ts ) ;
2018-07-13 12:51:14 +03:00
int drm_sched_entity_init ( struct drm_sched_entity * entity ,
2019-12-05 13:38:00 +03:00
enum drm_sched_priority priority ,
struct drm_gpu_scheduler * * sched_list ,
2020-01-14 12:38:42 +03:00
unsigned int num_sched_list ,
2018-03-29 20:06:32 +03:00
atomic_t * guilty ) ;
2018-07-20 15:21:05 +03:00
long drm_sched_entity_flush ( struct drm_sched_entity * entity , long timeout ) ;
void drm_sched_entity_fini ( struct drm_sched_entity * entity ) ;
void drm_sched_entity_destroy ( struct drm_sched_entity * entity ) ;
2018-08-06 15:25:32 +03:00
void drm_sched_entity_select_rq ( struct drm_sched_entity * entity ) ;
struct drm_sched_job * drm_sched_entity_pop_job ( struct drm_sched_entity * entity ) ;
2021-08-05 13:46:50 +03:00
void drm_sched_entity_push_job ( struct drm_sched_job * sched_job ) ;
2018-08-01 17:22:39 +03:00
void drm_sched_entity_set_priority ( struct drm_sched_entity * entity ,
enum drm_sched_priority priority ) ;
2018-08-06 15:25:32 +03:00
bool drm_sched_entity_is_ready ( struct drm_sched_entity * entity ) ;
drm/sched: Split drm_sched_job_init
This is a very confusingly named function, because not just does it
init an object, it arms it and provides a point of no return for
pushing a job into the scheduler. It would be nice if that's a bit
clearer in the interface.
But the real reason is that I want to push the dependency tracking
helpers into the scheduler code, and that means drm_sched_job_init
must be called a lot earlier, without arming the job.
v2:
- don't change .gitignore (Steven)
- don't forget v3d (Emma)
v3: Emma noticed that I leak the memory allocated in
drm_sched_job_init if we bail out before the point of no return in
subsequent driver patches. To be able to fix this change
drm_sched_job_cleanup() so it can handle being called both before and
after drm_sched_job_arm().
Also improve the kerneldoc for this.
v4:
- Fix the drm_sched_job_cleanup logic, I inverted the booleans, as
usual (Melissa)
- Christian pointed out that drm_sched_entity_select_rq() also needs
to be moved into drm_sched_job_arm, which made me realize that the
job->id definitely needs to be moved too.
Shuffle things to fit between job_init and job_arm.
v5:
Reshuffle the split between init/arm once more, amdgpu abuses
drm_sched.ready to signal gpu reset failures. Also document this
somewhat. (Christian)
v6:
Rebase on top of the msm drm/sched support. Note that the
drm_sched_job_init() call is completely misplaced, and hence also the
split-out drm_sched_entity_push_job(). I've put in a FIXME which the next
patch will address.
v7: Drop the FIXME in msm, after discussions with Rob I agree it shouldn't
be a problem where it is now.
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Melissa Wen <mwen@igalia.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Steven Price <steven.price@arm.com> (v2)
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v5)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Nirmoy Das <nirmoy.das@amd.com>
Cc: Deepak R Varma <mh12gx2825@gmail.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Kevin Wang <kevin1.wang@amd.com>
Cc: Chen Li <chenli@uniontech.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: "Marek Olšák" <marek.olsak@amd.com>
Cc: Dennis Li <Dennis.Li@amd.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Cc: Sonny Jiang <sonny.jiang@amd.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Tian Tao <tiantao6@hisilicon.com>
Cc: etnaviv@lists.freedesktop.org
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210817084917.3555822-1-daniel.vetter@ffwll.ch
2021-08-17 11:49:16 +03:00
struct drm_sched_fence * drm_sched_fence_alloc (
2017-12-06 19:49:39 +03:00
struct drm_sched_entity * s_entity , void * owner ) ;
drm/sched: Split drm_sched_job_init
This is a very confusingly named function, because not just does it
init an object, it arms it and provides a point of no return for
pushing a job into the scheduler. It would be nice if that's a bit
clearer in the interface.
But the real reason is that I want to push the dependency tracking
helpers into the scheduler code, and that means drm_sched_job_init
must be called a lot earlier, without arming the job.
v2:
- don't change .gitignore (Steven)
- don't forget v3d (Emma)
v3: Emma noticed that I leak the memory allocated in
drm_sched_job_init if we bail out before the point of no return in
subsequent driver patches. To be able to fix this change
drm_sched_job_cleanup() so it can handle being called both before and
after drm_sched_job_arm().
Also improve the kerneldoc for this.
v4:
- Fix the drm_sched_job_cleanup logic, I inverted the booleans, as
usual (Melissa)
- Christian pointed out that drm_sched_entity_select_rq() also needs
to be moved into drm_sched_job_arm, which made me realize that the
job->id definitely needs to be moved too.
Shuffle things to fit between job_init and job_arm.
v5:
Reshuffle the split between init/arm once more, amdgpu abuses
drm_sched.ready to signal gpu reset failures. Also document this
somewhat. (Christian)
v6:
Rebase on top of the msm drm/sched support. Note that the
drm_sched_job_init() call is completely misplaced, and hence also the
split-out drm_sched_entity_push_job(). I've put in a FIXME which the next
patch will address.
v7: Drop the FIXME in msm, after discussions with Rob I agree it shouldn't
be a problem where it is now.
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Melissa Wen <mwen@igalia.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Steven Price <steven.price@arm.com> (v2)
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v5)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Nirmoy Das <nirmoy.das@amd.com>
Cc: Deepak R Varma <mh12gx2825@gmail.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Kevin Wang <kevin1.wang@amd.com>
Cc: Chen Li <chenli@uniontech.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: "Marek Olšák" <marek.olsak@amd.com>
Cc: Dennis Li <Dennis.Li@amd.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Cc: Sonny Jiang <sonny.jiang@amd.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Tian Tao <tiantao6@hisilicon.com>
Cc: etnaviv@lists.freedesktop.org
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210817084917.3555822-1-daniel.vetter@ffwll.ch
2021-08-17 11:49:16 +03:00
void drm_sched_fence_init ( struct drm_sched_fence * fence ,
struct drm_sched_entity * entity ) ;
2021-09-03 15:05:54 +03:00
void drm_sched_fence_free ( struct drm_sched_fence * fence ) ;
drm/sched: Split drm_sched_job_init
This is a very confusingly named function, because not just does it
init an object, it arms it and provides a point of no return for
pushing a job into the scheduler. It would be nice if that's a bit
clearer in the interface.
But the real reason is that I want to push the dependency tracking
helpers into the scheduler code, and that means drm_sched_job_init
must be called a lot earlier, without arming the job.
v2:
- don't change .gitignore (Steven)
- don't forget v3d (Emma)
v3: Emma noticed that I leak the memory allocated in
drm_sched_job_init if we bail out before the point of no return in
subsequent driver patches. To be able to fix this change
drm_sched_job_cleanup() so it can handle being called both before and
after drm_sched_job_arm().
Also improve the kerneldoc for this.
v4:
- Fix the drm_sched_job_cleanup logic, I inverted the booleans, as
usual (Melissa)
- Christian pointed out that drm_sched_entity_select_rq() also needs
to be moved into drm_sched_job_arm, which made me realize that the
job->id definitely needs to be moved too.
Shuffle things to fit between job_init and job_arm.
v5:
Reshuffle the split between init/arm once more, amdgpu abuses
drm_sched.ready to signal gpu reset failures. Also document this
somewhat. (Christian)
v6:
Rebase on top of the msm drm/sched support. Note that the
drm_sched_job_init() call is completely misplaced, and hence also the
split-out drm_sched_entity_push_job(). I've put in a FIXME which the next
patch will address.
v7: Drop the FIXME in msm, after discussions with Rob I agree it shouldn't
be a problem where it is now.
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Melissa Wen <mwen@igalia.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Steven Price <steven.price@arm.com> (v2)
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v5)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Nirmoy Das <nirmoy.das@amd.com>
Cc: Deepak R Varma <mh12gx2825@gmail.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Kevin Wang <kevin1.wang@amd.com>
Cc: Chen Li <chenli@uniontech.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: "Marek Olšák" <marek.olsak@amd.com>
Cc: Dennis Li <Dennis.Li@amd.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Cc: Sonny Jiang <sonny.jiang@amd.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Tian Tao <tiantao6@hisilicon.com>
Cc: etnaviv@lists.freedesktop.org
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210817084917.3555822-1-daniel.vetter@ffwll.ch
2021-08-17 11:49:16 +03:00
2017-12-06 19:49:39 +03:00
void drm_sched_fence_scheduled ( struct drm_sched_fence * fence ) ;
void drm_sched_fence_finished ( struct drm_sched_fence * fence ) ;
2018-11-29 13:05:20 +03:00
unsigned long drm_sched_suspend_timeout ( struct drm_gpu_scheduler * sched ) ;
void drm_sched_resume_timeout ( struct drm_gpu_scheduler * sched ,
unsigned long remaining ) ;
2020-03-13 13:39:27 +03:00
struct drm_gpu_scheduler *
drm_sched_pick_best ( struct drm_gpu_scheduler * * sched_list ,
unsigned int num_sched_list ) ;
2018-11-29 13:05:20 +03:00
2017-12-06 19:49:39 +03:00
# endif