drm/xe: Do not access xe file when updating exec queue run_ticks
The current code is running into a use after free case where xe file is closed before the exec queue run_ticks can be updated. This is occurring in the xe_file_close path. To fix that, do not access xe file when updating the exec queue run_ticks. Instead store the exec queue run_ticks locally in the exec queue object and accumulate it when the user dumps the drm client stats. We know that the xe file is valid when user is dumping the run_ticks for the drm client, so this effectively removes the dependency on xe file object in xe_exec_queue_update_run_ticks(). v2: - Fix the accumulation of q->run_ticks delta into xe file run_ticks - s/runtime/run_ticks/ (Rodrigo) Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/1908 Fixes: 6109f24f87d7 ("drm/xe: Add helper to accumulate exec queue runtime") Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240524234744.1352543-2-umesh.nerlige.ramappa@intel.com
This commit is contained in:
parent
45bb564de0
commit
ce62827bc2
@ -251,8 +251,11 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
|
||||
|
||||
/* Accumulate all the exec queues from this client */
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
xa_for_each(&xef->exec_queue.xa, i, q)
|
||||
xa_for_each(&xef->exec_queue.xa, i, q) {
|
||||
xe_exec_queue_update_run_ticks(q);
|
||||
xef->run_ticks[q->class] += q->run_ticks - q->old_run_ticks;
|
||||
q->old_run_ticks = q->run_ticks;
|
||||
}
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
|
||||
/* Get the total GPU cycles */
|
||||
|
@ -760,7 +760,6 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
|
||||
*/
|
||||
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_file *xef;
|
||||
struct xe_lrc *lrc;
|
||||
u32 old_ts, new_ts;
|
||||
|
||||
@ -772,8 +771,6 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
|
||||
if (!q->vm || !q->vm->xef)
|
||||
return;
|
||||
|
||||
xef = q->vm->xef;
|
||||
|
||||
/*
|
||||
* Only sample the first LRC. For parallel submission, all of them are
|
||||
* scheduled together and we compensate that below by multiplying by
|
||||
@ -784,7 +781,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
|
||||
*/
|
||||
lrc = &q->lrc[0];
|
||||
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
|
||||
xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
|
||||
q->run_ticks += (new_ts - old_ts) * q->width;
|
||||
}
|
||||
|
||||
void xe_exec_queue_kill(struct xe_exec_queue *q)
|
||||
|
@ -141,6 +141,10 @@ struct xe_exec_queue {
|
||||
* Protected by @vm's resv. Unused if @vm == NULL.
|
||||
*/
|
||||
u64 tlb_flush_seqno;
|
||||
/** @old_run_ticks: prior hw engine class run time in ticks for this exec queue */
|
||||
u64 old_run_ticks;
|
||||
/** @run_ticks: hw engine class run time in ticks for this exec queue */
|
||||
u64 run_ticks;
|
||||
/** @lrc: logical ring context for this exec queue */
|
||||
struct xe_lrc lrc[];
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user