drm/amdgpu: fix last_vm_update fence is not effetive for sched fence
Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian K?nig <christian.koenig@amd.com>
This commit is contained in:
parent
f38fdfddfa
commit
3c62338c26
@ -705,7 +705,7 @@ struct amdgpu_sync {
|
||||
struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
|
||||
struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS];
|
||||
DECLARE_HASHTABLE(fences, 4);
|
||||
struct amdgpu_fence *last_vm_update;
|
||||
struct fence *last_vm_update;
|
||||
};
|
||||
|
||||
void amdgpu_sync_create(struct amdgpu_sync *sync);
|
||||
@ -963,7 +963,7 @@ struct amdgpu_vm_id {
|
||||
unsigned id;
|
||||
uint64_t pd_gpu_addr;
|
||||
/* last flushed PD/PT update */
|
||||
struct amdgpu_fence *flushed_updates;
|
||||
struct fence *flushed_updates;
|
||||
/* last use of vmid */
|
||||
struct amdgpu_fence *last_id_use;
|
||||
};
|
||||
@ -2349,7 +2349,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync);
|
||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_fence *updates);
|
||||
struct fence *updates);
|
||||
void amdgpu_vm_fence(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_fence *fence);
|
||||
|
@ -119,5 +119,6 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
|
||||
return r;
|
||||
*f = fence_get(&ibs[num_ibs - 1].fence->base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -58,6 +58,29 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
|
||||
sync->last_vm_update = NULL;
|
||||
}
|
||||
|
||||
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
|
||||
|
||||
if (a_fence)
|
||||
return a_fence->ring->adev == adev;
|
||||
if (s_fence)
|
||||
return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
|
||||
{
|
||||
struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
|
||||
if (s_fence)
|
||||
return s_fence->owner == owner;
|
||||
if (a_fence)
|
||||
return a_fence->owner == owner;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_fence - remember to sync to this fence
|
||||
*
|
||||
@ -71,10 +94,23 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
struct amdgpu_sync_entry *e;
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_fence *other;
|
||||
struct fence *tmp, *later;
|
||||
|
||||
if (!f)
|
||||
return 0;
|
||||
|
||||
if (amdgpu_sync_same_dev(adev, f) &&
|
||||
amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) {
|
||||
if (sync->last_vm_update) {
|
||||
tmp = sync->last_vm_update;
|
||||
BUG_ON(f->context != tmp->context);
|
||||
later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp;
|
||||
sync->last_vm_update = fence_get(later);
|
||||
fence_put(tmp);
|
||||
} else
|
||||
sync->last_vm_update = fence_get(f);
|
||||
}
|
||||
|
||||
fence = to_amdgpu_fence(f);
|
||||
if (!fence || fence->ring->adev != adev) {
|
||||
hash_for_each_possible(sync->fences, e, node, f->context) {
|
||||
@ -103,13 +139,6 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
amdgpu_fence_later(fence, other));
|
||||
amdgpu_fence_unref(&other);
|
||||
|
||||
if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
|
||||
other = sync->last_vm_update;
|
||||
sync->last_vm_update = amdgpu_fence_ref(
|
||||
amdgpu_fence_later(fence, other));
|
||||
amdgpu_fence_unref(&other);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -296,5 +325,5 @@ void amdgpu_sync_free(struct amdgpu_device *adev,
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
amdgpu_fence_unref(&sync->sync_to[i]);
|
||||
|
||||
amdgpu_fence_unref(&sync->last_vm_update);
|
||||
fence_put(sync->last_vm_update);
|
||||
}
|
||||
|
@ -200,19 +200,29 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
*/
|
||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_fence *updates)
|
||||
struct fence *updates)
|
||||
{
|
||||
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
|
||||
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
|
||||
struct amdgpu_fence *flushed_updates = vm_id->flushed_updates;
|
||||
struct fence *flushed_updates = vm_id->flushed_updates;
|
||||
bool is_earlier = false;
|
||||
|
||||
if (flushed_updates && updates) {
|
||||
BUG_ON(flushed_updates->context != updates->context);
|
||||
is_earlier = (updates->seqno - flushed_updates->seqno <=
|
||||
INT_MAX) ? true : false;
|
||||
}
|
||||
|
||||
if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
|
||||
(updates && amdgpu_fence_is_earlier(flushed_updates, updates))) {
|
||||
is_earlier) {
|
||||
|
||||
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
|
||||
vm_id->flushed_updates = amdgpu_fence_ref(
|
||||
amdgpu_fence_later(flushed_updates, updates));
|
||||
amdgpu_fence_unref(&flushed_updates);
|
||||
if (is_earlier) {
|
||||
vm_id->flushed_updates = fence_get(updates);
|
||||
fence_put(flushed_updates);
|
||||
}
|
||||
if (!flushed_updates)
|
||||
vm_id->flushed_updates = fence_get(updates);
|
||||
vm_id->pd_gpu_addr = pd_addr;
|
||||
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
|
||||
}
|
||||
@ -1347,7 +1357,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
fence_put(vm->page_directory_fence);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
amdgpu_fence_unref(&vm->ids[i].flushed_updates);
|
||||
fence_put(vm->ids[i].flushed_updates);
|
||||
amdgpu_fence_unref(&vm->ids[i].last_id_use);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user