drm/amdgpu: drop amdgpu_sync from amdgpu_vmid_grab v2
Instead return the fence directly. Avoids memory allocation to store the fence. v2: cleanup coding style as well Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Luben Tuikov <luben.tuikov@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-6-christian.koenig@amd.com
This commit is contained in:
parent
c5093cddf5
commit
940ca22b7e
@ -170,26 +170,27 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
|
||||
*
|
||||
* @vm: vm to allocate id for
|
||||
* @ring: ring we want to submit job to
|
||||
* @sync: sync object where we add dependencies
|
||||
* @idle: resulting idle VMID
|
||||
* @fence: fence to wait for if no id could be grabbed
|
||||
*
|
||||
* Try to find an idle VMID, if none is idle add a fence to wait to the sync
|
||||
* object. Returns -ENOMEM when we are out of memory.
|
||||
*/
|
||||
static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
|
||||
struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync,
|
||||
struct amdgpu_vmid **idle)
|
||||
struct amdgpu_vmid **idle,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
struct dma_fence **fences;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (!dma_fence_is_signaled(ring->vmid_wait))
|
||||
return amdgpu_sync_fence(sync, ring->vmid_wait);
|
||||
if (!dma_fence_is_signaled(ring->vmid_wait)) {
|
||||
*fence = dma_fence_get(ring->vmid_wait);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
|
||||
if (!fences)
|
||||
@ -228,10 +229,10 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(sync, &array->base);
|
||||
*fence = dma_fence_get(&array->base);
|
||||
dma_fence_put(ring->vmid_wait);
|
||||
ring->vmid_wait = &array->base;
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
kfree(fences);
|
||||
|
||||
@ -243,17 +244,17 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
|
||||
*
|
||||
* @vm: vm to allocate id for
|
||||
* @ring: ring we want to submit job to
|
||||
* @sync: sync object where we add dependencies
|
||||
* @job: job who wants to use the VMID
|
||||
* @id: resulting VMID
|
||||
* @fence: fence to wait for if no id could be grabbed
|
||||
*
|
||||
* Try to assign a reserved VMID.
|
||||
*/
|
||||
static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||
struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_vmid **id)
|
||||
struct amdgpu_vmid **id,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
@ -280,7 +281,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
|
||||
if (tmp) {
|
||||
*id = NULL;
|
||||
return amdgpu_sync_fence(sync, tmp);
|
||||
*fence = dma_fence_get(tmp);
|
||||
return 0;
|
||||
}
|
||||
needs_flush = true;
|
||||
}
|
||||
@ -302,17 +304,17 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||
*
|
||||
* @vm: vm to allocate id for
|
||||
* @ring: ring we want to submit job to
|
||||
* @sync: sync object where we add dependencies
|
||||
* @job: job who wants to use the VMID
|
||||
* @id: resulting VMID
|
||||
* @fence: fence to wait for if no id could be grabbed
|
||||
*
|
||||
* Try to reuse a VMID for this submission.
|
||||
*/
|
||||
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
|
||||
struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_vmid **id)
|
||||
struct amdgpu_vmid **id,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
@ -367,13 +369,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
|
||||
*
|
||||
* @vm: vm to allocate id for
|
||||
* @ring: ring we want to submit job to
|
||||
* @sync: sync object where we add dependencies
|
||||
* @job: job who wants to use the VMID
|
||||
* @fence: fence to wait for if no id could be grabbed
|
||||
*
|
||||
* Allocate an id for the vm, adding fences to the sync obj as necessary.
|
||||
*/
|
||||
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync, struct amdgpu_job *job)
|
||||
struct amdgpu_job *job, struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
@ -383,16 +385,16 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
int r = 0;
|
||||
|
||||
mutex_lock(&id_mgr->lock);
|
||||
r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
|
||||
r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
|
||||
if (r || !idle)
|
||||
goto error;
|
||||
|
||||
if (vm->reserved_vmid[vmhub]) {
|
||||
r = amdgpu_vmid_grab_reserved(vm, ring, sync, job, &id);
|
||||
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
|
||||
if (r || !id)
|
||||
goto error;
|
||||
} else {
|
||||
r = amdgpu_vmid_grab_used(vm, ring, sync, job, &id);
|
||||
r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
@ -84,7 +84,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
unsigned vmhub);
|
||||
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync, struct amdgpu_job *job);
|
||||
struct amdgpu_job *job, struct dma_fence **fence);
|
||||
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
|
||||
unsigned vmid);
|
||||
void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
|
||||
|
@ -239,12 +239,12 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity)
|
||||
static struct dma_fence *
|
||||
amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
|
||||
struct amdgpu_job *job = to_amdgpu_job(sched_job);
|
||||
struct amdgpu_vm *vm = job->vm;
|
||||
struct dma_fence *fence;
|
||||
int r;
|
||||
|
||||
@ -255,12 +255,10 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||
DRM_ERROR("Error adding fence (%d)\n", r);
|
||||
}
|
||||
|
||||
while (fence == NULL && vm && !job->vmid) {
|
||||
r = amdgpu_vmid_grab(vm, ring, &job->sync, job);
|
||||
while (!fence && job->vm && !job->vmid) {
|
||||
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
|
||||
if (r)
|
||||
DRM_ERROR("Error getting VM ID (%d)\n", r);
|
||||
|
||||
fence = amdgpu_sync_get_fence(&job->sync);
|
||||
}
|
||||
|
||||
if (!fence && job->gang_submit)
|
||||
|
Loading…
x
Reference in New Issue
Block a user