drm/xe: Avoid doing rebinds
If we dont change page sizes we can avoid doing rebinds rather just do a partial unbind. The algorithm to determine its page size is greedy as we assume all pages in the removed VMA are the largest page used in the VMA. v2: Don't exceed 100 lines v3: struct xe_vma_op_unmap remove in different patch, remove XXX comment Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
3188c0f4c8
commit
8f33b4f054
@ -668,6 +668,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
|
||||
if (!is_null)
|
||||
xe_res_next(curs, next - addr);
|
||||
xe_walk->va_curs_start = next;
|
||||
xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
|
||||
*action = ACTION_CONTINUE;
|
||||
|
||||
return ret;
|
||||
|
@ -2396,6 +2396,16 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
|
||||
return vma;
|
||||
}
|
||||
|
||||
static u64 xe_vma_max_pte_size(struct xe_vma *vma)
|
||||
{
|
||||
if (vma->gpuva.flags & XE_VMA_PTE_1G)
|
||||
return SZ_1G;
|
||||
else if (vma->gpuva.flags & XE_VMA_PTE_2M)
|
||||
return SZ_2M;
|
||||
|
||||
return SZ_4K;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse operations list and create any resources needed for the operations
|
||||
* prior to fully committing to the operations. This setup can fail.
|
||||
@ -2472,6 +2482,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
|
||||
break;
|
||||
}
|
||||
case DRM_GPUVA_OP_REMAP:
|
||||
{
|
||||
struct xe_vma *old =
|
||||
gpuva_to_vma(op->base.remap.unmap->va);
|
||||
|
||||
op->remap.start = xe_vma_start(old);
|
||||
op->remap.range = xe_vma_size(old);
|
||||
|
||||
if (op->base.remap.prev) {
|
||||
struct xe_vma *vma;
|
||||
bool read_only =
|
||||
@ -2490,6 +2507,20 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
|
||||
}
|
||||
|
||||
op->remap.prev = vma;
|
||||
|
||||
/*
|
||||
* Userptr creates a new SG mapping so
|
||||
* we must also rebind.
|
||||
*/
|
||||
op->remap.skip_prev = !xe_vma_is_userptr(old) &&
|
||||
IS_ALIGNED(xe_vma_end(vma),
|
||||
xe_vma_max_pte_size(old));
|
||||
if (op->remap.skip_prev) {
|
||||
op->remap.range -=
|
||||
xe_vma_end(vma) -
|
||||
xe_vma_start(old);
|
||||
op->remap.start = xe_vma_end(vma);
|
||||
}
|
||||
}
|
||||
|
||||
if (op->base.remap.next) {
|
||||
@ -2511,14 +2542,21 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
|
||||
}
|
||||
|
||||
op->remap.next = vma;
|
||||
}
|
||||
|
||||
/* XXX: Support no doing remaps */
|
||||
op->remap.start =
|
||||
xe_vma_start(gpuva_to_vma(op->base.remap.unmap->va));
|
||||
op->remap.range =
|
||||
xe_vma_size(gpuva_to_vma(op->base.remap.unmap->va));
|
||||
/*
|
||||
* Userptr creates a new SG mapping so
|
||||
* we must also rebind.
|
||||
*/
|
||||
op->remap.skip_next = !xe_vma_is_userptr(old) &&
|
||||
IS_ALIGNED(xe_vma_start(vma),
|
||||
xe_vma_max_pte_size(old));
|
||||
if (op->remap.skip_next)
|
||||
op->remap.range -=
|
||||
xe_vma_end(old) -
|
||||
xe_vma_start(vma);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case DRM_GPUVA_OP_UNMAP:
|
||||
case DRM_GPUVA_OP_PREFETCH:
|
||||
/* Nothing to do */
|
||||
@ -2561,10 +2599,23 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
|
||||
case DRM_GPUVA_OP_REMAP:
|
||||
prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
|
||||
true);
|
||||
if (op->remap.prev)
|
||||
|
||||
if (op->remap.prev) {
|
||||
err |= xe_vm_insert_vma(vm, op->remap.prev);
|
||||
if (op->remap.next)
|
||||
if (!err && op->remap.skip_prev)
|
||||
op->remap.prev = NULL;
|
||||
}
|
||||
if (op->remap.next) {
|
||||
err |= xe_vm_insert_vma(vm, op->remap.next);
|
||||
if (!err && op->remap.skip_next)
|
||||
op->remap.next = NULL;
|
||||
}
|
||||
|
||||
/* Adjust for partial unbind after removin VMA from VM */
|
||||
if (!err) {
|
||||
op->base.remap.unmap->va->va.addr = op->remap.start;
|
||||
op->base.remap.unmap->va->va.range = op->remap.range;
|
||||
}
|
||||
break;
|
||||
case DRM_GPUVA_OP_UNMAP:
|
||||
prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
|
||||
@ -2634,9 +2685,10 @@ again:
|
||||
bool next = !!op->remap.next;
|
||||
|
||||
if (!op->remap.unmap_done) {
|
||||
vm->async_ops.munmap_rebind_inflight = true;
|
||||
if (prev || next)
|
||||
if (prev || next) {
|
||||
vm->async_ops.munmap_rebind_inflight = true;
|
||||
vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
|
||||
}
|
||||
err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
|
||||
op->num_syncs,
|
||||
!prev && !next ? op->fence : NULL,
|
||||
|
@ -30,6 +30,9 @@ struct xe_vm;
|
||||
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
|
||||
#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
|
||||
#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
|
||||
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
|
||||
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
|
||||
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
|
||||
|
||||
struct xe_vma {
|
||||
/** @gpuva: Base GPUVA object */
|
||||
@ -336,6 +339,10 @@ struct xe_vma_op_remap {
|
||||
u64 start;
|
||||
/** @range: range of the VMA unmap */
|
||||
u64 range;
|
||||
/** @skip_prev: skip prev rebind */
|
||||
bool skip_prev;
|
||||
/** @skip_next: skip next rebind */
|
||||
bool skip_next;
|
||||
/** @unmap_done: unmap operation in done */
|
||||
bool unmap_done;
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user