drm/radeon: make page table updates async v2

Currently doing the update with the CP.

v2: Rebased on Jeromes bugfix. Make validity comparison
    more human readable.

Signed-off-by: Christian König <deathsimple@vodafone.de>
This commit is contained in:
Christian König 2012-08-11 15:00:30 +02:00 committed by Alex Deucher
parent 3e8970f96b
commit 2a6f1abbb4
5 changed files with 71 additions and 25 deletions

View File

@ -1521,20 +1521,24 @@ void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
unsigned pfn, struct ttm_mem_reg *mem, unsigned pfn, struct ttm_mem_reg *mem,
unsigned npages, uint32_t flags) unsigned npages, uint32_t flags)
{ {
void __iomem *ptr = (void *)vm->pt; struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint64_t addr; uint64_t addr, pt = vm->pt_gpu_addr + pfn * 8;
int i; int i;
addr = flags = cayman_vm_page_flags(rdev, flags); addr = flags = cayman_vm_page_flags(rdev, flags);
for (i = 0; i < npages; ++i, ++pfn) { radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + npages * 2));
if (mem) { radeon_ring_write(ring, pt & 0xffffffff);
addr = radeon_vm_get_addr(rdev, mem, i); radeon_ring_write(ring, (pt >> 32) & 0xff);
for (i = 0; i < npages; ++i) {
if (mem) {
addr = radeon_vm_get_addr(rdev, mem, i);
addr = addr & 0xFFFFFFFFFFFFF000ULL; addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= flags; addr |= flags;
} }
writeq(addr, ptr + (pfn * 8)); radeon_ring_write(ring, addr & 0xffffffff);
} radeon_ring_write(ring, (addr >> 32) & 0xffffffff);
}
} }
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib) void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)

View File

@ -585,6 +585,7 @@
#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 #define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
#define PACKET3_SET_RESOURCE_INDIRECT 0x74 #define PACKET3_SET_RESOURCE_INDIRECT 0x74
#define PACKET3_SET_APPEND_CNT 0x75 #define PACKET3_SET_APPEND_CNT 0x75
#define PACKET3_ME_WRITE 0x7A
#endif #endif

View File

@ -1135,6 +1135,8 @@ struct radeon_asic {
struct { struct {
int (*init)(struct radeon_device *rdev); int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev); void (*fini)(struct radeon_device *rdev);
u32 pt_ring_index;
void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm, void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
unsigned pfn, struct ttm_mem_reg *mem, unsigned pfn, struct ttm_mem_reg *mem,
unsigned npages, uint32_t flags); unsigned npages, uint32_t flags);

View File

@ -1375,6 +1375,7 @@ static struct radeon_asic cayman_asic = {
.vm = { .vm = {
.init = &cayman_vm_init, .init = &cayman_vm_init,
.fini = &cayman_vm_fini, .fini = &cayman_vm_fini,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page, .set_page = &cayman_vm_set_page,
}, },
.ring = { .ring = {
@ -1478,6 +1479,7 @@ static struct radeon_asic trinity_asic = {
.vm = { .vm = {
.init = &cayman_vm_init, .init = &cayman_vm_init,
.fini = &cayman_vm_fini, .fini = &cayman_vm_fini,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page, .set_page = &cayman_vm_set_page,
}, },
.ring = { .ring = {
@ -1581,6 +1583,7 @@ static struct radeon_asic si_asic = {
.vm = { .vm = {
.init = &si_vm_init, .init = &si_vm_init,
.fini = &si_vm_fini, .fini = &si_vm_fini,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page, .set_page = &cayman_vm_set_page,
}, },
.ring = { .ring = {

View File

@ -464,15 +464,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
continue; continue;
list_for_each_entry(bo_va, &vm->va, vm_list) { list_for_each_entry(bo_va, &vm->va, vm_list) {
struct ttm_mem_reg *mem = NULL;
if (bo_va->valid)
mem = &bo_va->bo->tbo.mem;
bo_va->valid = false; bo_va->valid = false;
r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
if (r) {
DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
}
} }
} }
return 0; return 0;
@ -801,7 +793,6 @@ u64 radeon_vm_get_addr(struct radeon_device *rdev,
return addr; return addr;
} }
/* object have to be reserved & global and local mutex must be locked */
/** /**
* radeon_vm_bo_update_pte - map a bo into the vm page table * radeon_vm_bo_update_pte - map a bo into the vm page table
* *
@ -812,15 +803,21 @@ u64 radeon_vm_get_addr(struct radeon_device *rdev,
* *
* Fill in the page table entries for @bo (cayman+). * Fill in the page table entries for @bo (cayman+).
* Returns 0 for success, -EINVAL for failure. * Returns 0 for success, -EINVAL for failure.
*
* Object have to be reserved & global and local mutex must be locked!
*/ */
int radeon_vm_bo_update_pte(struct radeon_device *rdev, int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_vm *vm,
struct radeon_bo *bo, struct radeon_bo *bo,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
unsigned ridx = rdev->asic->vm.pt_ring_index;
struct radeon_ring *ring = &rdev->ring[ridx];
struct radeon_semaphore *sem = NULL;
struct radeon_bo_va *bo_va; struct radeon_bo_va *bo_va;
unsigned ngpu_pages; unsigned ngpu_pages, ndw;
uint64_t pfn; uint64_t pfn;
int r;
/* nothing to do if vm isn't bound */ /* nothing to do if vm isn't bound */
if (vm->sa_bo == NULL) if (vm->sa_bo == NULL)
@ -832,7 +829,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
return -EINVAL; return -EINVAL;
} }
if (bo_va->valid && mem) if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
return 0; return 0;
ngpu_pages = radeon_bo_ngpu_pages(bo); ngpu_pages = radeon_bo_ngpu_pages(bo);
@ -846,12 +843,50 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
if (mem->mem_type == TTM_PL_TT) { if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM; bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
} }
} if (!bo_va->valid) {
if (!bo_va->valid) { mem = NULL;
mem = NULL; }
} else {
bo_va->valid = false;
} }
pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE; pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
radeon_asic_vm_set_page(rdev, bo_va->vm, pfn, mem, ngpu_pages, bo_va->flags);
if (vm->fence && radeon_fence_signaled(vm->fence)) {
radeon_fence_unref(&vm->fence);
}
if (vm->fence && vm->fence->ring != ridx) {
r = radeon_semaphore_create(rdev, &sem);
if (r) {
return r;
}
}
/* estimate number of dw needed */
ndw = 32;
ndw += (ngpu_pages >> 12) * 3;
ndw += ngpu_pages * 2;
r = radeon_ring_lock(rdev, ring, ndw);
if (r) {
return r;
}
if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
radeon_fence_note_sync(vm->fence, ridx);
}
radeon_asic_vm_set_page(rdev, vm, pfn, mem, ngpu_pages, bo_va->flags);
radeon_fence_unref(&vm->fence);
r = radeon_fence_emit(rdev, &vm->fence, ridx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return r;
}
radeon_ring_unlock_commit(rdev, ring);
radeon_semaphore_free(rdev, &sem, vm->fence);
radeon_fence_unref(&vm->last_flush); radeon_fence_unref(&vm->last_flush);
return 0; return 0;
} }
@ -875,6 +910,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo *bo) struct radeon_bo *bo)
{ {
struct radeon_bo_va *bo_va; struct radeon_bo_va *bo_va;
int r;
bo_va = radeon_bo_va(bo, vm); bo_va = radeon_bo_va(bo, vm);
if (bo_va == NULL) if (bo_va == NULL)
@ -882,14 +918,14 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
mutex_lock(&rdev->vm_manager.lock); mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
radeon_vm_free_pt(rdev, vm); r = radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
mutex_unlock(&rdev->vm_manager.lock); mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list); list_del(&bo_va->vm_list);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
list_del(&bo_va->bo_list); list_del(&bo_va->bo_list);
kfree(bo_va); kfree(bo_va);
return 0; return r;
} }
/** /**