drm/etnaviv: replace MMU flush marker with flush sequence

commit 4900dda90af2cb13bc1d4c12ce94b98acc8fe64e upstream.

If a MMU is shared between multiple GPUs, all of them need to flush their
TLBs, so a single marker that gets reset on the first flush won't do.
Replace the flush marker with a sequence number, so that it's possible to
check if the TLB is in sync with the current page table state for each GPU.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Lucas Stach 2019-07-05 19:17:23 +02:00 committed by Greg Kroah-Hartman
parent 3069eab7ee
commit c0eab61c13
5 changed files with 13 additions and 10 deletions

View File

@ -258,6 +258,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
unsigned int waitlink_offset = buffer->user_size - 16;
u32 return_target, return_dwords;
u32 link_target, link_dwords;
unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
bool need_flush = gpu->flush_seq != new_flush_seq;
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
@ -270,14 +272,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
if (gpu->mmu->need_flush || gpu->switch_context) {
if (need_flush || gpu->switch_context) {
u32 target, extra_dwords;
/* link command */
extra_dwords = 1;
/* flush command */
if (gpu->mmu->need_flush) {
if (need_flush) {
if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1;
else
@ -290,7 +292,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
if (gpu->mmu->need_flush) {
if (need_flush) {
/* Add the MMU flush */
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
@ -310,7 +312,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
SYNC_RECIPIENT_PE);
}
gpu->mmu->need_flush = false;
gpu->flush_seq = new_flush_seq;
}
if (gpu->switch_context) {

View File

@ -1353,7 +1353,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
gpu->active_fence = submit->fence->seqno;
if (gpu->lastctx != cmdbuf->ctx) {
gpu->mmu->need_flush = true;
gpu->mmu->flush_seq++;
gpu->switch_context = true;
gpu->lastctx = cmdbuf->ctx;
}

View File

@ -138,6 +138,7 @@ struct etnaviv_gpu {
struct etnaviv_iommu *mmu;
struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
unsigned int flush_seq;
/* Power Control: */
struct clk *clk_bus;

View File

@ -132,7 +132,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
*/
if (mmu->last_iova) {
mmu->last_iova = 0;
mmu->need_flush = true;
mmu->flush_seq++;
continue;
}
@ -246,7 +246,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
}
list_add_tail(&mapping->mmu_node, &mmu->mappings);
mmu->need_flush = true;
mmu->flush_seq++;
mutex_unlock(&mmu->lock);
return ret;
@ -264,7 +264,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
etnaviv_iommu_remove_mapping(mmu, mapping);
list_del(&mapping->mmu_node);
mmu->need_flush = true;
mmu->flush_seq++;
mutex_unlock(&mmu->lock);
}
@ -346,7 +346,7 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
return ret;
}
mmu->last_iova = vram_node->start + size;
gpu->mmu->need_flush = true;
mmu->flush_seq++;
mutex_unlock(&mmu->lock);
*iova = (u32)vram_node->start;

View File

@ -44,7 +44,7 @@ struct etnaviv_iommu {
struct list_head mappings;
struct drm_mm mm;
u32 last_iova;
bool need_flush;
unsigned int flush_seq;
};
struct etnaviv_gem_object;