drm/i915/gvt: Move request alloc to dispatch_workload path only
Previously the performance is improved through the workload auditing
and shadowing ahead of vGPU scheduling, however, there is the case that
more requests are allocated in submit_context before the previous request
is added, the timeline will hold its seqno which is later.
This patch is to move the request alloc to dispatch_workload function,
where is the same place as request is added.
It will fix the issue of kernel BUG for (timeline->seqno != request->fence.seqno)
check when add_request.
Fixes: 89ea20b930
("drm/i915/gvt: Factor out scan and shadow from workload dispatch")
Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
Signed-off-by: fred gao <fred.gao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
parent
b2d6ef7061
commit
f2880e04f3
@ -270,7 +270,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
int ring_id = workload->ring_id;
|
||||
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
||||
struct drm_i915_gem_request *rq;
|
||||
struct intel_ring *ring;
|
||||
int ret;
|
||||
|
||||
@ -315,6 +314,27 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
||||
ret = populate_shadow_context(workload);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
workload->shadowed = true;
|
||||
return 0;
|
||||
|
||||
err_unpin:
|
||||
engine->context_unpin(engine, shadow_ctx);
|
||||
err_shadow:
|
||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||
err_scan:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
int ring_id = workload->ring_id;
|
||||
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
||||
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
||||
struct drm_i915_gem_request *rq;
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
|
||||
int ret;
|
||||
|
||||
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
|
||||
if (IS_ERR(rq)) {
|
||||
@ -329,14 +349,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
||||
ret = copy_workload_to_ring_buffer(workload);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
workload->shadowed = true;
|
||||
return 0;
|
||||
|
||||
err_unpin:
|
||||
engine->context_unpin(engine, shadow_ctx);
|
||||
err_shadow:
|
||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||
err_scan:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -496,6 +513,12 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
|
||||
goto err_unpin_mm;
|
||||
}
|
||||
|
||||
ret = intel_gvt_generate_request(workload);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to generate request\n");
|
||||
goto err_unpin_mm;
|
||||
}
|
||||
|
||||
ret = prepare_shadow_batch_buffer(workload);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
|
||||
|
Loading…
Reference in New Issue
Block a user