drm/amdgpu: handle multi level PD updates V2
Update all levels of the page directory. V2: a. sub level pdes always are written to incorrect place. b. sub levels need to update regardless of parent updates. Signed-off-by: Christian König <christian.koenig@amd.com> (V1) Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (V1) Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> (V2) Acked-by: Alex Deucher <alexander.deucher@amd.com> (V2) Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
d711e1398d
commit
194d216113
@ -777,7 +777,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
r = amdgpu_vm_update_page_directory(adev, vm);
|
r = amdgpu_vm_update_directories(adev, vm);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -536,7 +536,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
r = amdgpu_vm_update_page_directory(adev, vm);
|
r = amdgpu_vm_update_directories(adev, vm);
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
@ -700,24 +700,24 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* amdgpu_vm_update_pdes - make sure that page directory is valid
|
* amdgpu_vm_update_level - update a single level in the hierarchy
|
||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
* @vm: requested vm
|
* @vm: requested vm
|
||||||
* @start: start of GPU address range
|
* @parent: parent directory
|
||||||
* @end: end of GPU address range
|
|
||||||
*
|
*
|
||||||
* Allocates new page tables if necessary
|
* Makes sure all entries in @parent are up to date.
|
||||||
* and updates the page directory.
|
|
||||||
* Returns 0 for success, error for failure.
|
* Returns 0 for success, error for failure.
|
||||||
*/
|
*/
|
||||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm)
|
struct amdgpu_vm *vm,
|
||||||
|
struct amdgpu_vm_pt *parent,
|
||||||
|
unsigned level)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *shadow;
|
struct amdgpu_bo *shadow;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
uint64_t pd_addr, shadow_addr;
|
uint64_t pd_addr, shadow_addr;
|
||||||
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
|
uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
|
||||||
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
|
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
|
||||||
unsigned count = 0, pt_idx, ndw;
|
unsigned count = 0, pt_idx, ndw;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
@ -726,16 +726,19 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (!parent->entries)
|
||||||
|
return 0;
|
||||||
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
||||||
shadow = vm->root.bo->shadow;
|
|
||||||
|
|
||||||
/* padding, etc. */
|
/* padding, etc. */
|
||||||
ndw = 64;
|
ndw = 64;
|
||||||
|
|
||||||
/* assume the worst case */
|
/* assume the worst case */
|
||||||
ndw += vm->root.last_entry_used * 6;
|
ndw += parent->last_entry_used * 6;
|
||||||
|
|
||||||
pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
|
pd_addr = amdgpu_bo_gpu_offset(parent->bo);
|
||||||
|
|
||||||
|
shadow = parent->bo->shadow;
|
||||||
if (shadow) {
|
if (shadow) {
|
||||||
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
||||||
if (r)
|
if (r)
|
||||||
@ -754,9 +757,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
params.adev = adev;
|
params.adev = adev;
|
||||||
params.ib = &job->ibs[0];
|
params.ib = &job->ibs[0];
|
||||||
|
|
||||||
/* walk over the address space and update the page directory */
|
/* walk over the address space and update the directory */
|
||||||
for (pt_idx = 0; pt_idx <= vm->root.last_entry_used; ++pt_idx) {
|
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
|
||||||
struct amdgpu_bo *bo = vm->root.entries[pt_idx].bo;
|
struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
|
||||||
uint64_t pde, pt;
|
uint64_t pde, pt;
|
||||||
|
|
||||||
if (bo == NULL)
|
if (bo == NULL)
|
||||||
@ -772,10 +775,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
pt = amdgpu_bo_gpu_offset(bo);
|
pt = amdgpu_bo_gpu_offset(bo);
|
||||||
if (vm->root.entries[pt_idx].addr == pt)
|
if (parent->entries[pt_idx].addr == pt)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
vm->root.entries[pt_idx].addr = pt;
|
parent->entries[pt_idx].addr = pt;
|
||||||
|
|
||||||
pde = pd_addr + pt_idx * 8;
|
pde = pd_addr + pt_idx * 8;
|
||||||
if (((last_pde + 8 * count) != pde) ||
|
if (((last_pde + 8 * count) != pde) ||
|
||||||
@ -820,11 +823,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
if (params.ib->length_dw == 0) {
|
if (params.ib->length_dw == 0) {
|
||||||
amdgpu_job_free(job);
|
amdgpu_job_free(job);
|
||||||
return 0;
|
} else {
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_ring_pad_ib(ring, params.ib);
|
amdgpu_ring_pad_ib(ring, params.ib);
|
||||||
amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
|
amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
|
||||||
AMDGPU_FENCE_OWNER_VM);
|
AMDGPU_FENCE_OWNER_VM);
|
||||||
if (shadow)
|
if (shadow)
|
||||||
amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
|
amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
|
||||||
@ -836,10 +837,25 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
if (r)
|
if (r)
|
||||||
goto error_free;
|
goto error_free;
|
||||||
|
|
||||||
amdgpu_bo_fence(vm->root.bo, fence, true);
|
amdgpu_bo_fence(parent->bo, fence, true);
|
||||||
dma_fence_put(vm->last_dir_update);
|
dma_fence_put(vm->last_dir_update);
|
||||||
vm->last_dir_update = dma_fence_get(fence);
|
vm->last_dir_update = dma_fence_get(fence);
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Recurse into the subdirectories. This recursion is harmless because
|
||||||
|
* we only have a maximum of 5 layers.
|
||||||
|
*/
|
||||||
|
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
|
||||||
|
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
|
||||||
|
|
||||||
|
if (!entry->bo)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -848,6 +864,21 @@ error_free:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* amdgpu_vm_update_directories - make sure that all directories are valid
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
* @vm: requested vm
|
||||||
|
*
|
||||||
|
* Makes sure all directories are up to date.
|
||||||
|
* Returns 0 for success, error for failure.
|
||||||
|
*/
|
||||||
|
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_vm *vm)
|
||||||
|
{
|
||||||
|
return amdgpu_vm_update_level(adev, vm, &vm->root, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_update_ptes - make sure that page tables are valid
|
* amdgpu_vm_update_ptes - make sure that page tables are valid
|
||||||
*
|
*
|
||||||
|
@ -192,7 +192,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||||||
struct amdgpu_job *job);
|
struct amdgpu_job *job);
|
||||||
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
|
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
|
||||||
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
|
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
|
||||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm);
|
struct amdgpu_vm *vm);
|
||||||
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm,
|
struct amdgpu_vm *vm,
|
||||||
|
Reference in New Issue
Block a user