drm/xe: Skip VMAs pin when requesting signal to the last XE_EXEC
Doing a XE_EXEC with num_batch_buffer == 0 makes signals passed as argument to be signaled when the last real XE_EXEC is completed. But to do that it was first pinning all VMAs in drm_gpuvm_exec_lock(), this patch remove this pinning as it is not required. This change also help Mesa implementing memory over-commiting recovery as it needs to unbind not needed VMAs when the whole VM can't fit in GPU memory but it can only do the unbiding when the last XE_EXEC is completed. So with this change Mesa can get the signal it want without getting out-of-memory errors. Fixes: eb9702ad2986 ("drm/xe: Allow num_batch_buffer / num_binds == 0 in IOCTLs") Cc: Thomas Hellstrom <thomas.hellstrom@linux.intel.com> Co-developed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: José Roberto de Souza <jose.souza@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240313171318.121066-1-jose.souza@intel.com (cherry picked from commit 58480c1c912ff8146d067301a0d04cca318b4a66) Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
This commit is contained in:
parent
d58b4ef63b
commit
dd8a07f06d
@ -235,6 +235,29 @@ retry:
|
||||
goto err_unlock_list;
|
||||
}
|
||||
|
||||
if (!args->num_batch_buffer) {
|
||||
err = xe_vm_lock(vm, true);
|
||||
if (err)
|
||||
goto err_unlock_list;
|
||||
|
||||
if (!xe_vm_in_lr_mode(vm)) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
goto err_unlock_list;
|
||||
}
|
||||
for (i = 0; i < num_syncs; i++)
|
||||
xe_sync_entry_signal(&syncs[i], NULL, fence);
|
||||
xe_exec_queue_last_fence_set(q, vm, fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
xe_vm_unlock(vm);
|
||||
goto err_unlock_list;
|
||||
}
|
||||
|
||||
vm_exec.vm = &vm->gpuvm;
|
||||
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
|
||||
if (xe_vm_in_lr_mode(vm)) {
|
||||
@ -254,24 +277,6 @@ retry:
|
||||
goto err_exec;
|
||||
}
|
||||
|
||||
if (!args->num_batch_buffer) {
|
||||
if (!xe_vm_in_lr_mode(vm)) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
goto err_exec;
|
||||
}
|
||||
for (i = 0; i < num_syncs; i++)
|
||||
xe_sync_entry_signal(&syncs[i], NULL, fence);
|
||||
xe_exec_queue_last_fence_set(q, vm, fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
goto err_exec;
|
||||
}
|
||||
|
||||
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
|
||||
err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
|
||||
skip_retry = true;
|
||||
|
Loading…
x
Reference in New Issue
Block a user