drm/xe: Move ufence add to vm_bind_ioctl_ops_fini
Rather than adding a ufence to a VMA in the bind function, add the ufence to all VMAs in the IOCTL that require binds in vm_bind_ioctl_ops_fini. This help withs the transition to job 1 per VM bind IOCTL. v2: - Rebase v3: - Fix typo in commit (Oak) Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Oak Zeng <oak.zeng@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-12-matthew.brost@intel.com
This commit is contained in:
parent
fda75ef80b
commit
5aa5eea09a
@ -338,6 +338,21 @@ err_out:
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __xe_sync_ufence_get() - Get user fence from user fence
|
||||||
|
* @ufence: input user fence
|
||||||
|
*
|
||||||
|
* Get a user fence reference from user fence
|
||||||
|
*
|
||||||
|
* Return: xe_user_fence pointer with reference
|
||||||
|
*/
|
||||||
|
struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence *ufence)
|
||||||
|
{
|
||||||
|
user_fence_get(ufence);
|
||||||
|
|
||||||
|
return ufence;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xe_sync_ufence_get() - Get user fence from sync
|
* xe_sync_ufence_get() - Get user fence from sync
|
||||||
* @sync: input sync
|
* @sync: input sync
|
||||||
|
@ -37,6 +37,7 @@ static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
|
|||||||
return !!sync->ufence;
|
return !!sync->ufence;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence *ufence);
|
||||||
struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
|
struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
|
||||||
void xe_sync_ufence_put(struct xe_user_fence *ufence);
|
void xe_sync_ufence_put(struct xe_user_fence *ufence);
|
||||||
int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
|
int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
|
||||||
|
@ -1798,17 +1798,10 @@ xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
|
|||||||
{
|
{
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
|
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
|
||||||
struct xe_user_fence *ufence;
|
|
||||||
|
|
||||||
xe_vm_assert_held(vm);
|
xe_vm_assert_held(vm);
|
||||||
xe_bo_assert_held(bo);
|
xe_bo_assert_held(bo);
|
||||||
|
|
||||||
ufence = find_ufence_get(syncs, num_syncs);
|
|
||||||
if (vma->ufence && ufence)
|
|
||||||
xe_sync_ufence_put(vma->ufence);
|
|
||||||
|
|
||||||
vma->ufence = ufence ?: vma->ufence;
|
|
||||||
|
|
||||||
if (immediate) {
|
if (immediate) {
|
||||||
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
|
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
|
||||||
first_op, last_op);
|
first_op, last_op);
|
||||||
@ -2817,20 +2810,57 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
|
|||||||
return fence;
|
return fence;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
|
||||||
|
{
|
||||||
|
if (vma->ufence)
|
||||||
|
xe_sync_ufence_put(vma->ufence);
|
||||||
|
vma->ufence = __xe_sync_ufence_get(ufence);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
|
||||||
|
struct xe_user_fence *ufence)
|
||||||
|
{
|
||||||
|
switch (op->base.op) {
|
||||||
|
case DRM_GPUVA_OP_MAP:
|
||||||
|
vma_add_ufence(op->map.vma, ufence);
|
||||||
|
break;
|
||||||
|
case DRM_GPUVA_OP_REMAP:
|
||||||
|
if (op->remap.prev)
|
||||||
|
vma_add_ufence(op->remap.prev, ufence);
|
||||||
|
if (op->remap.next)
|
||||||
|
vma_add_ufence(op->remap.next, ufence);
|
||||||
|
break;
|
||||||
|
case DRM_GPUVA_OP_UNMAP:
|
||||||
|
break;
|
||||||
|
case DRM_GPUVA_OP_PREFETCH:
|
||||||
|
vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
|
static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
|
||||||
struct dma_fence *fence)
|
struct dma_fence *fence)
|
||||||
{
|
{
|
||||||
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
|
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
|
||||||
|
struct xe_user_fence *ufence;
|
||||||
struct xe_vma_op *op;
|
struct xe_vma_op *op;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
ufence = find_ufence_get(vops->syncs, vops->num_syncs);
|
||||||
list_for_each_entry(op, &vops->list, link) {
|
list_for_each_entry(op, &vops->list, link) {
|
||||||
|
if (ufence)
|
||||||
|
op_add_ufence(vm, op, ufence);
|
||||||
|
|
||||||
if (op->base.op == DRM_GPUVA_OP_UNMAP)
|
if (op->base.op == DRM_GPUVA_OP_UNMAP)
|
||||||
xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
|
xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
|
||||||
else if (op->base.op == DRM_GPUVA_OP_REMAP)
|
else if (op->base.op == DRM_GPUVA_OP_REMAP)
|
||||||
xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
|
xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
|
||||||
fence);
|
fence);
|
||||||
}
|
}
|
||||||
|
if (ufence)
|
||||||
|
xe_sync_ufence_put(ufence);
|
||||||
for (i = 0; i < vops->num_syncs; i++)
|
for (i = 0; i < vops->num_syncs; i++)
|
||||||
xe_sync_entry_signal(vops->syncs + i, fence);
|
xe_sync_entry_signal(vops->syncs + i, fence);
|
||||||
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
|
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user