drm/amdgpu: dispatch job for vm
use kernel context to submit command for vm Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Christian K?nig <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
This commit is contained in:
parent
23ca0e4e47
commit
d5fc5e82a3
@ -1221,6 +1221,19 @@ struct amdgpu_cs_chunk {
|
|||||||
void __user *user_ptr;
|
void __user *user_ptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
union amdgpu_sched_job_param {
|
||||||
|
struct {
|
||||||
|
struct amdgpu_vm *vm;
|
||||||
|
uint64_t start;
|
||||||
|
uint64_t last;
|
||||||
|
struct amdgpu_fence **fence;
|
||||||
|
|
||||||
|
} vm_mapping;
|
||||||
|
struct {
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
} vm;
|
||||||
|
};
|
||||||
|
|
||||||
struct amdgpu_cs_parser {
|
struct amdgpu_cs_parser {
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
struct drm_file *filp;
|
struct drm_file *filp;
|
||||||
@ -1245,6 +1258,7 @@ struct amdgpu_cs_parser {
|
|||||||
struct mutex job_lock;
|
struct mutex job_lock;
|
||||||
struct work_struct job_work;
|
struct work_struct job_work;
|
||||||
int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
|
int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
|
||||||
|
union amdgpu_sched_job_param job_param;
|
||||||
int (*run_job)(struct amdgpu_cs_parser *sched_job);
|
int (*run_job)(struct amdgpu_cs_parser *sched_job);
|
||||||
int (*free_job)(struct amdgpu_cs_parser *sched_job);
|
int (*free_job)(struct amdgpu_cs_parser *sched_job);
|
||||||
};
|
};
|
||||||
@ -2255,6 +2269,12 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
|
|||||||
bool amdgpu_card_posted(struct amdgpu_device *adev);
|
bool amdgpu_card_posted(struct amdgpu_device *adev);
|
||||||
void amdgpu_update_display_priority(struct amdgpu_device *adev);
|
void amdgpu_update_display_priority(struct amdgpu_device *adev);
|
||||||
bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
|
bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
|
||||||
|
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
|
||||||
|
struct drm_file *filp,
|
||||||
|
struct amdgpu_ctx *ctx,
|
||||||
|
struct amdgpu_ib *ibs,
|
||||||
|
uint32_t num_ibs);
|
||||||
|
|
||||||
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
|
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
|
||||||
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||||
u32 ip_instance, u32 ring,
|
u32 ip_instance, u32 ring,
|
||||||
|
@ -306,6 +306,24 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int amdgpu_vm_free_job(
|
||||||
|
struct amdgpu_cs_parser *sched_job)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < sched_job->num_ibs; i++)
|
||||||
|
amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
|
||||||
|
kfree(sched_job->ibs);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_vm_run_job(
|
||||||
|
struct amdgpu_cs_parser *sched_job)
|
||||||
|
{
|
||||||
|
amdgpu_bo_fence(sched_job->job_param.vm.bo,
|
||||||
|
sched_job->ibs[sched_job->num_ibs -1].fence, true);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_clear_bo - initially clear the page dir/table
|
* amdgpu_vm_clear_bo - initially clear the page dir/table
|
||||||
*
|
*
|
||||||
@ -316,7 +334,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||||||
struct amdgpu_bo *bo)
|
struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
|
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
|
||||||
struct amdgpu_ib ib;
|
struct amdgpu_cs_parser *sched_job = NULL;
|
||||||
|
struct amdgpu_ib *ib;
|
||||||
unsigned entries;
|
unsigned entries;
|
||||||
uint64_t addr;
|
uint64_t addr;
|
||||||
int r;
|
int r;
|
||||||
@ -336,24 +355,54 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||||||
addr = amdgpu_bo_gpu_offset(bo);
|
addr = amdgpu_bo_gpu_offset(bo);
|
||||||
entries = amdgpu_bo_size(bo) / 8;
|
entries = amdgpu_bo_size(bo) / 8;
|
||||||
|
|
||||||
r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib);
|
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
|
||||||
if (r)
|
if (!ib)
|
||||||
goto error_unreserve;
|
goto error_unreserve;
|
||||||
|
|
||||||
ib.length_dw = 0;
|
r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
|
||||||
|
|
||||||
amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
|
|
||||||
amdgpu_vm_pad_ib(adev, &ib);
|
|
||||||
WARN_ON(ib.length_dw > 64);
|
|
||||||
|
|
||||||
r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free;
|
goto error_free;
|
||||||
|
|
||||||
amdgpu_bo_fence(bo, ib.fence, true);
|
ib->length_dw = 0;
|
||||||
|
|
||||||
|
amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
|
||||||
|
amdgpu_vm_pad_ib(adev, ib);
|
||||||
|
WARN_ON(ib->length_dw > 64);
|
||||||
|
|
||||||
|
if (amdgpu_enable_scheduler) {
|
||||||
|
int r;
|
||||||
|
uint64_t v_seq;
|
||||||
|
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
|
||||||
|
adev->kernel_ctx, ib, 1);
|
||||||
|
if(!sched_job)
|
||||||
|
goto error_free;
|
||||||
|
sched_job->job_param.vm.bo = bo;
|
||||||
|
sched_job->run_job = amdgpu_vm_run_job;
|
||||||
|
sched_job->free_job = amdgpu_vm_free_job;
|
||||||
|
v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
|
||||||
|
sched_job->uf.sequence = v_seq;
|
||||||
|
amd_sched_push_job(ring->scheduler,
|
||||||
|
&adev->kernel_ctx->rings[ring->idx].c_entity,
|
||||||
|
sched_job);
|
||||||
|
r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
|
||||||
|
v_seq,
|
||||||
|
true,
|
||||||
|
-1);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("emit timeout\n");
|
||||||
|
|
||||||
|
amdgpu_bo_unreserve(bo);
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
|
||||||
|
if (r)
|
||||||
|
goto error_free;
|
||||||
|
amdgpu_bo_fence(bo, ib->fence, true);
|
||||||
|
}
|
||||||
|
|
||||||
error_free:
|
error_free:
|
||||||
amdgpu_ib_free(adev, &ib);
|
amdgpu_ib_free(adev, ib);
|
||||||
|
kfree(ib);
|
||||||
|
|
||||||
error_unreserve:
|
error_unreserve:
|
||||||
amdgpu_bo_unreserve(bo);
|
amdgpu_bo_unreserve(bo);
|
||||||
@ -406,7 +455,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
|
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
|
||||||
uint64_t last_pde = ~0, last_pt = ~0;
|
uint64_t last_pde = ~0, last_pt = ~0;
|
||||||
unsigned count = 0, pt_idx, ndw;
|
unsigned count = 0, pt_idx, ndw;
|
||||||
struct amdgpu_ib ib;
|
struct amdgpu_ib *ib;
|
||||||
|
struct amdgpu_cs_parser *sched_job = NULL;
|
||||||
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* padding, etc. */
|
/* padding, etc. */
|
||||||
@ -419,10 +470,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
if (ndw > 0xfffff)
|
if (ndw > 0xfffff)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
|
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
|
||||||
|
if (!ib)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
ib.length_dw = 0;
|
ib->length_dw = 0;
|
||||||
|
|
||||||
/* walk over the address space and update the page directory */
|
/* walk over the address space and update the page directory */
|
||||||
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
|
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
|
||||||
@ -442,7 +497,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
((last_pt + incr * count) != pt)) {
|
((last_pt + incr * count) != pt)) {
|
||||||
|
|
||||||
if (count) {
|
if (count) {
|
||||||
amdgpu_vm_update_pages(adev, &ib, last_pde,
|
amdgpu_vm_update_pages(adev, ib, last_pde,
|
||||||
last_pt, count, incr,
|
last_pt, count, incr,
|
||||||
AMDGPU_PTE_VALID, 0);
|
AMDGPU_PTE_VALID, 0);
|
||||||
}
|
}
|
||||||
@ -456,23 +511,59 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (count)
|
if (count)
|
||||||
amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count,
|
amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
|
||||||
incr, AMDGPU_PTE_VALID, 0);
|
incr, AMDGPU_PTE_VALID, 0);
|
||||||
|
|
||||||
if (ib.length_dw != 0) {
|
if (ib->length_dw != 0) {
|
||||||
amdgpu_vm_pad_ib(adev, &ib);
|
amdgpu_vm_pad_ib(adev, ib);
|
||||||
amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
|
amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
|
||||||
WARN_ON(ib.length_dw > ndw);
|
WARN_ON(ib->length_dw > ndw);
|
||||||
r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
|
|
||||||
if (r) {
|
if (amdgpu_enable_scheduler) {
|
||||||
amdgpu_ib_free(adev, &ib);
|
int r;
|
||||||
return r;
|
uint64_t v_seq;
|
||||||
|
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
|
||||||
|
adev->kernel_ctx,
|
||||||
|
ib, 1);
|
||||||
|
if(!sched_job)
|
||||||
|
goto error_free;
|
||||||
|
sched_job->job_param.vm.bo = pd;
|
||||||
|
sched_job->run_job = amdgpu_vm_run_job;
|
||||||
|
sched_job->free_job = amdgpu_vm_free_job;
|
||||||
|
v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
|
||||||
|
sched_job->uf.sequence = v_seq;
|
||||||
|
amd_sched_push_job(ring->scheduler,
|
||||||
|
&adev->kernel_ctx->rings[ring->idx].c_entity,
|
||||||
|
sched_job);
|
||||||
|
r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
|
||||||
|
v_seq,
|
||||||
|
true,
|
||||||
|
-1);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("emit timeout\n");
|
||||||
|
} else {
|
||||||
|
r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_ib_free(adev, ib);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
amdgpu_bo_fence(pd, ib->fence, true);
|
||||||
}
|
}
|
||||||
amdgpu_bo_fence(pd, ib.fence, true);
|
|
||||||
}
|
}
|
||||||
amdgpu_ib_free(adev, &ib);
|
|
||||||
|
if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
|
||||||
|
amdgpu_ib_free(adev, ib);
|
||||||
|
kfree(ib);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
error_free:
|
||||||
|
if (sched_job)
|
||||||
|
kfree(sched_job);
|
||||||
|
amdgpu_ib_free(adev, ib);
|
||||||
|
kfree(ib);
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -657,6 +748,20 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
|
|||||||
amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
|
amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int amdgpu_vm_bo_update_mapping_run_job(
|
||||||
|
struct amdgpu_cs_parser *sched_job)
|
||||||
|
{
|
||||||
|
struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence;
|
||||||
|
amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
|
||||||
|
sched_job->job_param.vm_mapping.start,
|
||||||
|
sched_job->job_param.vm_mapping.last + 1,
|
||||||
|
sched_job->ibs[sched_job->num_ibs -1].fence);
|
||||||
|
if (fence) {
|
||||||
|
amdgpu_fence_unref(fence);
|
||||||
|
*fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
|
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
|
||||||
*
|
*
|
||||||
@ -681,7 +786,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||||||
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
|
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
|
||||||
unsigned nptes, ncmds, ndw;
|
unsigned nptes, ncmds, ndw;
|
||||||
uint32_t flags = gtt_flags;
|
uint32_t flags = gtt_flags;
|
||||||
struct amdgpu_ib ib;
|
struct amdgpu_ib *ib;
|
||||||
|
struct amdgpu_cs_parser *sched_job = NULL;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
|
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
|
||||||
@ -728,48 +834,91 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||||||
if (ndw > 0xfffff)
|
if (ndw > 0xfffff)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
|
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
|
||||||
if (r)
|
if (!ib)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
|
||||||
|
if (r) {
|
||||||
|
kfree(ib);
|
||||||
return r;
|
return r;
|
||||||
ib.length_dw = 0;
|
}
|
||||||
|
|
||||||
|
ib->length_dw = 0;
|
||||||
|
|
||||||
if (!(flags & AMDGPU_PTE_VALID)) {
|
if (!(flags & AMDGPU_PTE_VALID)) {
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||||
struct amdgpu_fence *f = vm->ids[i].last_id_use;
|
struct amdgpu_fence *f = vm->ids[i].last_id_use;
|
||||||
r = amdgpu_sync_fence(adev, &ib.sync, &f->base);
|
r = amdgpu_sync_fence(adev, &ib->sync, &f->base);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start,
|
r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
|
||||||
mapping->it.last + 1, addr + mapping->offset,
|
mapping->it.last + 1, addr + mapping->offset,
|
||||||
flags, gtt_flags);
|
flags, gtt_flags);
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
amdgpu_ib_free(adev, &ib);
|
amdgpu_ib_free(adev, ib);
|
||||||
|
kfree(ib);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_vm_pad_ib(adev, &ib);
|
amdgpu_vm_pad_ib(adev, ib);
|
||||||
WARN_ON(ib.length_dw > ndw);
|
WARN_ON(ib->length_dw > ndw);
|
||||||
|
|
||||||
r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
|
if (amdgpu_enable_scheduler) {
|
||||||
if (r) {
|
int r;
|
||||||
amdgpu_ib_free(adev, &ib);
|
uint64_t v_seq;
|
||||||
return r;
|
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
|
||||||
}
|
adev->kernel_ctx, ib, 1);
|
||||||
amdgpu_vm_fence_pts(vm, mapping->it.start,
|
if(!sched_job)
|
||||||
mapping->it.last + 1, ib.fence);
|
goto error_free;
|
||||||
if (fence) {
|
sched_job->job_param.vm_mapping.vm = vm;
|
||||||
amdgpu_fence_unref(fence);
|
sched_job->job_param.vm_mapping.start = mapping->it.start;
|
||||||
*fence = amdgpu_fence_ref(ib.fence);
|
sched_job->job_param.vm_mapping.last = mapping->it.last;
|
||||||
}
|
sched_job->job_param.vm_mapping.fence = fence;
|
||||||
amdgpu_ib_free(adev, &ib);
|
sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
|
||||||
|
sched_job->free_job = amdgpu_vm_free_job;
|
||||||
|
v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
|
||||||
|
sched_job->uf.sequence = v_seq;
|
||||||
|
amd_sched_push_job(ring->scheduler,
|
||||||
|
&adev->kernel_ctx->rings[ring->idx].c_entity,
|
||||||
|
sched_job);
|
||||||
|
r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
|
||||||
|
v_seq,
|
||||||
|
true,
|
||||||
|
-1);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("emit timeout\n");
|
||||||
|
} else {
|
||||||
|
r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_ib_free(adev, ib);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
amdgpu_vm_fence_pts(vm, mapping->it.start,
|
||||||
|
mapping->it.last + 1, ib->fence);
|
||||||
|
if (fence) {
|
||||||
|
amdgpu_fence_unref(fence);
|
||||||
|
*fence = amdgpu_fence_ref(ib->fence);
|
||||||
|
}
|
||||||
|
|
||||||
|
amdgpu_ib_free(adev, ib);
|
||||||
|
kfree(ib);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
error_free:
|
||||||
|
if (sched_job)
|
||||||
|
kfree(sched_job);
|
||||||
|
amdgpu_ib_free(adev, ib);
|
||||||
|
kfree(ib);
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
x
Reference in New Issue
Block a user