drm/xe: prefer xe_bo_create_pin_map()
With small-bar we likely want to annotate all the kernel users that require CPU access with vram. If xe_bo_create_pin_map() is the central place for that then we should have a central place to annotate. This also simplifies the code and fixes what appears to be a double xe_bo_put(hwe->hwsp) in the error handling. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
90385dcfc0
commit
e103c45f50
@ -310,24 +310,14 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
|
||||
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
|
||||
xe_reg_sr_apply_whitelist(&hwe->reg_whitelist, hwe->mmio_base, gt);
|
||||
|
||||
hwe->hwsp = xe_bo_create_locked(xe, gt, NULL, SZ_4K, ttm_bo_type_kernel,
|
||||
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
|
||||
XE_BO_CREATE_GGTT_BIT);
|
||||
hwe->hwsp = xe_bo_create_pin_map(xe, gt, NULL, SZ_4K, ttm_bo_type_kernel,
|
||||
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
|
||||
XE_BO_CREATE_GGTT_BIT);
|
||||
if (IS_ERR(hwe->hwsp)) {
|
||||
err = PTR_ERR(hwe->hwsp);
|
||||
goto err_name;
|
||||
}
|
||||
|
||||
err = xe_bo_pin(hwe->hwsp);
|
||||
if (err)
|
||||
goto err_unlock_put_hwsp;
|
||||
|
||||
err = xe_bo_vmap(hwe->hwsp);
|
||||
if (err)
|
||||
goto err_unpin_hwsp;
|
||||
|
||||
xe_bo_unlock_no_vm(hwe->hwsp);
|
||||
|
||||
err = xe_lrc_init(&hwe->kernel_lrc, hwe, NULL, NULL, SZ_16K);
|
||||
if (err)
|
||||
goto err_hwsp;
|
||||
@ -353,15 +343,10 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
|
||||
|
||||
return 0;
|
||||
|
||||
err_unpin_hwsp:
|
||||
xe_bo_unpin(hwe->hwsp);
|
||||
err_unlock_put_hwsp:
|
||||
xe_bo_unlock_no_vm(hwe->hwsp);
|
||||
xe_bo_put(hwe->hwsp);
|
||||
err_kernel_lrc:
|
||||
xe_lrc_finish(&hwe->kernel_lrc);
|
||||
err_hwsp:
|
||||
xe_bo_put(hwe->hwsp);
|
||||
xe_bo_unpin_map_no_vm(hwe->hwsp);
|
||||
err_name:
|
||||
hwe->name = NULL;
|
||||
|
||||
|
@ -615,7 +615,11 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
||||
|
||||
lrc->flags = 0;
|
||||
|
||||
lrc->bo = xe_bo_create_locked(xe, hwe->gt, vm,
|
||||
/*
|
||||
* FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
|
||||
* via VM bind calls.
|
||||
*/
|
||||
lrc->bo = xe_bo_create_pin_map(xe, hwe->gt, vm,
|
||||
ring_size + xe_lrc_size(xe, hwe->class),
|
||||
ttm_bo_type_kernel,
|
||||
XE_BO_CREATE_VRAM_IF_DGFX(hwe->gt) |
|
||||
@ -628,21 +632,6 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
||||
else
|
||||
lrc->full_gt = hwe->gt;
|
||||
|
||||
/*
|
||||
* FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
|
||||
* via VM bind calls.
|
||||
*/
|
||||
err = xe_bo_pin(lrc->bo);
|
||||
if (err)
|
||||
goto err_unlock_put_bo;
|
||||
lrc->flags |= XE_LRC_PINNED;
|
||||
|
||||
err = xe_bo_vmap(lrc->bo);
|
||||
if (err)
|
||||
goto err_unpin_bo;
|
||||
|
||||
xe_bo_unlock_vm_held(lrc->bo);
|
||||
|
||||
lrc->ring.size = ring_size;
|
||||
lrc->ring.tail = 0;
|
||||
|
||||
@ -652,8 +641,8 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
||||
if (!gt->default_lrc[hwe->class]) {
|
||||
init_data = empty_lrc_data(hwe);
|
||||
if (!init_data) {
|
||||
xe_lrc_finish(lrc);
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
goto err_lrc_finish;
|
||||
}
|
||||
}
|
||||
|
||||
@ -710,12 +699,8 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
||||
|
||||
return 0;
|
||||
|
||||
err_unpin_bo:
|
||||
if (lrc->flags & XE_LRC_PINNED)
|
||||
xe_bo_unpin(lrc->bo);
|
||||
err_unlock_put_bo:
|
||||
xe_bo_unlock_vm_held(lrc->bo);
|
||||
xe_bo_put(lrc->bo);
|
||||
err_lrc_finish:
|
||||
xe_lrc_finish(lrc);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -724,17 +709,15 @@ void xe_lrc_finish(struct xe_lrc *lrc)
|
||||
struct ww_acquire_ctx ww;
|
||||
|
||||
xe_hw_fence_ctx_finish(&lrc->fence_ctx);
|
||||
if (lrc->flags & XE_LRC_PINNED) {
|
||||
if (lrc->bo->vm)
|
||||
xe_vm_lock(lrc->bo->vm, &ww, 0, false);
|
||||
else
|
||||
xe_bo_lock_no_vm(lrc->bo, NULL);
|
||||
xe_bo_unpin(lrc->bo);
|
||||
if (lrc->bo->vm)
|
||||
xe_vm_unlock(lrc->bo->vm, &ww);
|
||||
else
|
||||
xe_bo_unlock_no_vm(lrc->bo);
|
||||
}
|
||||
if (lrc->bo->vm)
|
||||
xe_vm_lock(lrc->bo->vm, &ww, 0, false);
|
||||
else
|
||||
xe_bo_lock_no_vm(lrc->bo, NULL);
|
||||
xe_bo_unpin(lrc->bo);
|
||||
if (lrc->bo->vm)
|
||||
xe_vm_unlock(lrc->bo->vm, &ww);
|
||||
else
|
||||
xe_bo_unlock_no_vm(lrc->bo);
|
||||
xe_bo_put(lrc->bo);
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,6 @@ struct xe_lrc {
|
||||
|
||||
/** @flags: LRC flags */
|
||||
u32 flags;
|
||||
#define XE_LRC_PINNED BIT(1)
|
||||
|
||||
/** @ring: submission ring state */
|
||||
struct {
|
||||
|
Loading…
x
Reference in New Issue
Block a user