drm/scheduler: Fix hang when sched_entity released
Problem: If scheduler is already stopped by the time sched_entity is released and entity's job_queue not empty I encountred a hang in drm_sched_entity_flush. This is because drm_sched_entity_is_idle never becomes false. Fix: In drm_sched_fini detach all sched_entities from the scheduler's run queues. This will satisfy drm_sched_entity_is_idle. Also wakeup all those processes stuck in sched_entity flushing as the scheduler main thread which wakes them up is stopped by now. v2: Reverse order of drm_sched_rq_remove_entity and marking s_entity as stopped to prevent reinserion back to rq due to race. v3: Drop drm_sched_rq_remove_entity, only modify entity->stopped and check for it in drm_sched_entity_is_idle Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210512142648.666476-14-andrey.grodzovsky@amd.com
This commit is contained in:
parent
54a85db8de
commit
c61cdbdbff
@ -116,7 +116,8 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
|
|||||||
rmb(); /* for list_empty to work without lock */
|
rmb(); /* for list_empty to work without lock */
|
||||||
|
|
||||||
if (list_empty(&entity->list) ||
|
if (list_empty(&entity->list) ||
|
||||||
spsc_queue_count(&entity->job_queue) == 0)
|
spsc_queue_count(&entity->job_queue) == 0 ||
|
||||||
|
entity->stopped)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -898,9 +898,33 @@ EXPORT_SYMBOL(drm_sched_init);
|
|||||||
*/
|
*/
|
||||||
void drm_sched_fini(struct drm_gpu_scheduler *sched)
|
void drm_sched_fini(struct drm_gpu_scheduler *sched)
|
||||||
{
|
{
|
||||||
|
struct drm_sched_entity *s_entity;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (sched->thread)
|
if (sched->thread)
|
||||||
kthread_stop(sched->thread);
|
kthread_stop(sched->thread);
|
||||||
|
|
||||||
|
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||||
|
struct drm_sched_rq *rq = &sched->sched_rq[i];
|
||||||
|
|
||||||
|
if (!rq)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
spin_lock(&rq->lock);
|
||||||
|
list_for_each_entry(s_entity, &rq->entities, list)
|
||||||
|
/*
|
||||||
|
* Prevents reinsertion and marks job_queue as idle,
|
||||||
|
* it will removed from rq in drm_sched_entity_fini
|
||||||
|
* eventually
|
||||||
|
*/
|
||||||
|
s_entity->stopped = true;
|
||||||
|
spin_unlock(&rq->lock);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
|
||||||
|
wake_up_all(&sched->job_scheduled);
|
||||||
|
|
||||||
/* Confirm no work left behind accessing device structures */
|
/* Confirm no work left behind accessing device structures */
|
||||||
cancel_delayed_work_sync(&sched->work_tdr);
|
cancel_delayed_work_sync(&sched->work_tdr);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user