Driver changes:
- Invalidate userptr VMA on page pin fault, allowing userspace to free userptr while still having bindings - Fail early on sysfs file creation error - Skip VMA pinning on xe_exec with num_batch_buffer == 0 -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE6rM8lpABPHM5FqyDm6KlpjDL6lMFAmXzxSgZHGx1Y2FzLmRl bWFyY2hpQGludGVsLmNvbQAKCRCboqWmMMvqUyGDEACPEFGiM+A6xFaKF3xh6MrQ wSYGGwTuQriACOxF1MSjdyDcI4n8jHM04negYPyZRdtausG3O37nZbHlHlUXmizY zw0aI8KAy2ZwkD7l0BwuFhAycs47+6ht5B6/sI05VMuN42VNclNCj3DOJ3v45rCL GIecBbNm9z+jy70B0r6snczCJ5dMZASbfJkgUTJTyvtRpJbJZRaOj3unn+fzuD6+ 8ZmPO/+MN7ALt7Zr57Ndhsz8kZtyDUpch/xgVrU2VgLSj3xloNnI6vyLM6jmFaka hdfPgd2U4e+kK8iQEB84AozGeKnsE7i7YkSO5bTnpIwMZMVrWUQll0TAiT/wp8sW TGVjXB83TzqXi8WvNkWZ6bcTXGFjpS8mXkTVp14moBQaJpXXakr1+uFxYZGavidg u5Ka6BN7f01utLUVBzL8kpvG5m8jd1p2BTLXsoAwxdfBMwVUctr+OkH5UVbc9Bc6 q6LNGR25rFL4psSTxWVbfE4vnsHQONQIAlSe9OIS1llLuBuhbQm9gEm92XkKXqDw MaYHt8RivfuDsgpDDWADtGe8RJxwTRnpDWz/UArP+VCJy0vEjHi2IR8Y2Okw7KFg qjQiWjHZyYGcSFAfqk3hs4h1XPkpaJ99feNcwSkPHXS+ib/ypUilvEF9XJnJtMBG Ht7xHB0p26WR0v2SsfjXRQ== =HsE+ -----END PGP SIGNATURE----- Merge tag 'drm-xe-next-fixes-2024-03-14' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next Driver changes: - Invalidate userptr VMA on page pin fault, allowing userspace to free userptr while still having bindings - Fail early on sysfs file creation error - Skip VMA pinning on xe_exec with num_batch_buffer == 0 Signed-off-by: Dave Airlie <airlied@redhat.com> From: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/c4epi2j6anpc77z73zbgibxg7bxsmmkb522aa7tyei6oa6uunn@3oad4cgomd5a
This commit is contained in:
commit
341f708158
@ -235,6 +235,29 @@ retry:
|
||||
goto err_unlock_list;
|
||||
}
|
||||
|
||||
if (!args->num_batch_buffer) {
|
||||
err = xe_vm_lock(vm, true);
|
||||
if (err)
|
||||
goto err_unlock_list;
|
||||
|
||||
if (!xe_vm_in_lr_mode(vm)) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
goto err_unlock_list;
|
||||
}
|
||||
for (i = 0; i < num_syncs; i++)
|
||||
xe_sync_entry_signal(&syncs[i], NULL, fence);
|
||||
xe_exec_queue_last_fence_set(q, vm, fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
xe_vm_unlock(vm);
|
||||
goto err_unlock_list;
|
||||
}
|
||||
|
||||
vm_exec.vm = &vm->gpuvm;
|
||||
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
|
||||
if (xe_vm_in_lr_mode(vm)) {
|
||||
@ -254,24 +277,6 @@ retry:
|
||||
goto err_exec;
|
||||
}
|
||||
|
||||
if (!args->num_batch_buffer) {
|
||||
if (!xe_vm_in_lr_mode(vm)) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
goto err_exec;
|
||||
}
|
||||
for (i = 0; i < num_syncs; i++)
|
||||
xe_sync_entry_signal(&syncs[i], NULL, fence);
|
||||
xe_exec_queue_last_fence_set(q, vm, fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
goto err_exec;
|
||||
}
|
||||
|
||||
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
|
||||
err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
|
||||
skip_retry = true;
|
||||
|
@ -69,7 +69,7 @@ static bool access_is_atomic(enum access_type access_type)
|
||||
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
|
||||
{
|
||||
return BIT(tile->id) & vma->tile_present &&
|
||||
!(BIT(tile->id) & vma->usm.tile_invalidated);
|
||||
!(BIT(tile->id) & vma->tile_invalidated);
|
||||
}
|
||||
|
||||
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
|
||||
@ -226,7 +226,7 @@ retry_userptr:
|
||||
|
||||
if (xe_vma_is_userptr(vma))
|
||||
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
|
||||
vma->usm.tile_invalidated &= ~BIT(tile->id);
|
||||
vma->tile_invalidated &= ~BIT(tile->id);
|
||||
|
||||
unlock_dma_resv:
|
||||
drm_exec_fini(&exec);
|
||||
|
@ -464,7 +464,7 @@ DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
|
||||
TP_ARGS(vma)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
|
||||
DEFINE_EVENT(xe_vma, xe_vma_invalidate,
|
||||
TP_PROTO(struct xe_vma *vma),
|
||||
TP_ARGS(vma)
|
||||
);
|
||||
|
@ -708,6 +708,7 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
||||
int err = 0;
|
||||
LIST_HEAD(tmp_evict);
|
||||
|
||||
xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
|
||||
lockdep_assert_held_write(&vm->lock);
|
||||
|
||||
/* Collect invalidated userptrs */
|
||||
@ -724,11 +725,27 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
||||
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
|
||||
userptr.repin_link) {
|
||||
err = xe_vma_userptr_pin_pages(uvma);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (err == -EFAULT) {
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
|
||||
/* Wait for pending binds */
|
||||
xe_vm_lock(vm, false);
|
||||
dma_resv_wait_timeout(xe_vm_resv(vm),
|
||||
DMA_RESV_USAGE_BOOKKEEP,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
|
||||
err = xe_vm_invalidate_vma(&uvma->vma);
|
||||
xe_vm_unlock(vm);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->vma.combined_links.rebind,
|
||||
&vm->rebind_list);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1987,7 +2004,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
|
||||
return err;
|
||||
}
|
||||
|
||||
if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
|
||||
if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
|
||||
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
|
||||
true, first_op, last_op);
|
||||
} else {
|
||||
@ -3185,9 +3202,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
||||
u8 id;
|
||||
int ret;
|
||||
|
||||
xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
|
||||
xe_assert(xe, !xe_vma_is_null(vma));
|
||||
trace_xe_vma_usm_invalidate(vma);
|
||||
trace_xe_vma_invalidate(vma);
|
||||
|
||||
/* Check that we don't race with page-table updates */
|
||||
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
|
||||
@ -3225,7 +3241,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
||||
}
|
||||
}
|
||||
|
||||
vma->usm.tile_invalidated = vma->tile_mask;
|
||||
vma->tile_invalidated = vma->tile_mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -83,11 +83,8 @@ struct xe_vma {
|
||||
struct work_struct destroy_work;
|
||||
};
|
||||
|
||||
/** @usm: unified shared memory state */
|
||||
struct {
|
||||
/** @tile_invalidated: VMA has been invalidated */
|
||||
u8 tile_invalidated;
|
||||
} usm;
|
||||
/** @tile_invalidated: VMA has been invalidated */
|
||||
u8 tile_invalidated;
|
||||
|
||||
/** @tile_mask: Tile mask of where to create binding for this VMA */
|
||||
u8 tile_mask;
|
||||
|
@ -111,8 +111,10 @@ void xe_vram_freq_sysfs_init(struct xe_tile *tile)
|
||||
return;
|
||||
|
||||
kobj = kobject_create_and_add("memory", tile->sysfs);
|
||||
if (!kobj)
|
||||
if (!kobj) {
|
||||
drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
|
||||
return;
|
||||
}
|
||||
|
||||
err = sysfs_create_group(kobj, &freq_group_attrs);
|
||||
if (err) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user