drm/amdkfd: Improve amdgpu_vm_handle_moved
Let amdgpu_vm_handle_moved update all BO VA mappings of BOs reserved by the caller. This will be useful for handling extra BO VA mappings in KFD VMs that are managed through the render node API. v2: rebase against drm_exec changes (Alex) Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
6740ec97bc
commit
5a104cb97c
@ -1116,6 +1116,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* FIXME: In theory this loop shouldn't be needed any more when
|
||||
* amdgpu_vm_handle_moved handles all moved BOs that are reserved
|
||||
* with p->ticket. But removing it caused test regressions, so I'm
|
||||
* leaving it here for now.
|
||||
*/
|
||||
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
||||
bo_va = e->bo_va;
|
||||
if (bo_va == NULL)
|
||||
@ -1130,7 +1135,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_handle_moved(adev, vm);
|
||||
r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -409,7 +409,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
|
||||
if (!r)
|
||||
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
||||
if (!r)
|
||||
r = amdgpu_vm_handle_moved(adev, vm);
|
||||
r = amdgpu_vm_handle_moved(adev, vm, ticket);
|
||||
|
||||
if (r && r != -EBUSY)
|
||||
DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
|
||||
|
@ -1373,6 +1373,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: requested vm
|
||||
* @ticket: optional reservation ticket used to reserve the VM
|
||||
*
|
||||
* Make sure all BOs which are moved are updated in the PTs.
|
||||
*
|
||||
@ -1382,11 +1383,12 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
* PTs have to be reserved!
|
||||
*/
|
||||
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
struct amdgpu_vm *vm,
|
||||
struct ww_acquire_ctx *ticket)
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct dma_resv *resv;
|
||||
bool clear;
|
||||
bool clear, unlock;
|
||||
int r;
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
@ -1409,17 +1411,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
/* Try to reserve the BO to avoid clearing its ptes */
|
||||
if (!adev->debug_vm && dma_resv_trylock(resv))
|
||||
if (!adev->debug_vm && dma_resv_trylock(resv)) {
|
||||
clear = false;
|
||||
unlock = true;
|
||||
/* The caller is already holding the reservation lock */
|
||||
} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
|
||||
clear = false;
|
||||
unlock = false;
|
||||
/* Somebody else is using the BO right now */
|
||||
else
|
||||
} else {
|
||||
clear = true;
|
||||
unlock = false;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, clear);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (!clear)
|
||||
if (unlock)
|
||||
dma_resv_unlock(resv);
|
||||
spin_lock(&vm->status_lock);
|
||||
}
|
||||
|
@ -443,7 +443,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct dma_fence **fence);
|
||||
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
struct amdgpu_vm *vm,
|
||||
struct ww_acquire_ctx *ticket);
|
||||
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
||||
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
|
||||
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
|
Loading…
x
Reference in New Issue
Block a user