drm/amdgpu: add a fence after the VM flush
This way we can track when the flush is done. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
832a902f94
commit
41d9eb2c5a
@ -880,6 +880,7 @@ struct amdgpu_vm_id {
|
|||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct fence *first;
|
struct fence *first;
|
||||||
struct amdgpu_sync active;
|
struct amdgpu_sync active;
|
||||||
|
struct fence *last_flush;
|
||||||
atomic_long_t owner;
|
atomic_long_t owner;
|
||||||
|
|
||||||
uint64_t pd_gpu_addr;
|
uint64_t pd_gpu_addr;
|
||||||
@ -926,7 +927,7 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
|
|||||||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||||
struct amdgpu_sync *sync, struct fence *fence,
|
struct amdgpu_sync *sync, struct fence *fence,
|
||||||
unsigned *vm_id, uint64_t *vm_pd_addr);
|
unsigned *vm_id, uint64_t *vm_pd_addr);
|
||||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||||
unsigned vm_id, uint64_t pd_addr,
|
unsigned vm_id, uint64_t pd_addr,
|
||||||
uint32_t gds_base, uint32_t gds_size,
|
uint32_t gds_base, uint32_t gds_size,
|
||||||
uint32_t gws_base, uint32_t gws_size,
|
uint32_t gws_base, uint32_t gws_size,
|
||||||
|
@ -155,10 +155,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||||||
|
|
||||||
if (vm) {
|
if (vm) {
|
||||||
/* do context switch */
|
/* do context switch */
|
||||||
amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
|
r = amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
|
||||||
ib->gds_base, ib->gds_size,
|
ib->gds_base, ib->gds_size,
|
||||||
ib->gws_base, ib->gws_size,
|
ib->gws_base, ib->gws_size,
|
||||||
ib->oa_base, ib->oa_size);
|
ib->oa_base, ib->oa_size);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_ring_undo(ring);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
if (ring->funcs->emit_hdp_flush)
|
if (ring->funcs->emit_hdp_flush)
|
||||||
amdgpu_ring_emit_hdp_flush(ring);
|
amdgpu_ring_emit_hdp_flush(ring);
|
||||||
|
@ -236,6 +236,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||||||
fence_put(id->first);
|
fence_put(id->first);
|
||||||
id->first = fence_get(fence);
|
id->first = fence_get(fence);
|
||||||
|
|
||||||
|
fence_put(id->last_flush);
|
||||||
|
id->last_flush = NULL;
|
||||||
|
|
||||||
fence_put(id->flushed_updates);
|
fence_put(id->flushed_updates);
|
||||||
id->flushed_updates = fence_get(updates);
|
id->flushed_updates = fence_get(updates);
|
||||||
|
|
||||||
@ -263,7 +266,7 @@ error:
|
|||||||
*
|
*
|
||||||
* Emit a VM flush when it is necessary.
|
* Emit a VM flush when it is necessary.
|
||||||
*/
|
*/
|
||||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||||
unsigned vm_id, uint64_t pd_addr,
|
unsigned vm_id, uint64_t pd_addr,
|
||||||
uint32_t gds_base, uint32_t gds_size,
|
uint32_t gds_base, uint32_t gds_size,
|
||||||
uint32_t gws_base, uint32_t gws_size,
|
uint32_t gws_base, uint32_t gws_size,
|
||||||
@ -278,14 +281,25 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
|||||||
id->gws_size != gws_size ||
|
id->gws_size != gws_size ||
|
||||||
id->oa_base != oa_base ||
|
id->oa_base != oa_base ||
|
||||||
id->oa_size != oa_size);
|
id->oa_size != oa_size);
|
||||||
|
int r;
|
||||||
|
|
||||||
if (ring->funcs->emit_pipeline_sync && (
|
if (ring->funcs->emit_pipeline_sync && (
|
||||||
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
|
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
|
||||||
amdgpu_ring_emit_pipeline_sync(ring);
|
amdgpu_ring_emit_pipeline_sync(ring);
|
||||||
|
|
||||||
if (pd_addr != AMDGPU_VM_NO_FLUSH) {
|
if (pd_addr != AMDGPU_VM_NO_FLUSH) {
|
||||||
|
struct fence *fence;
|
||||||
|
|
||||||
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
|
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
|
||||||
amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
|
amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
|
||||||
|
r = amdgpu_fence_emit(ring, &fence);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
mutex_lock(&adev->vm_manager.lock);
|
||||||
|
fence_put(id->last_flush);
|
||||||
|
id->last_flush = fence;
|
||||||
|
mutex_unlock(&adev->vm_manager.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gds_switch_needed) {
|
if (gds_switch_needed) {
|
||||||
@ -300,6 +314,8 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
|||||||
gws_base, gws_size,
|
gws_base, gws_size,
|
||||||
oa_base, oa_size);
|
oa_base, oa_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
x
Reference in New Issue
Block a user