drm/etnaviv: replace MMU flush marker with flush sequence
If a MMU is shared between multiple GPUs, all of them need to flush their TLBs, so a single marker that gets reset on the first flush won't do. Replace the flush marker with a sequence number, so that it's possible to check if the TLB is in sync with the current page table state for each GPU. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de> Reviewed-by: Guido Günther <agx@sigxcpu.org>
This commit is contained in:
parent
bffe5db81a
commit
4900dda90a
@ -315,6 +315,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
||||
u32 return_target, return_dwords;
|
||||
u32 link_target, link_dwords;
|
||||
bool switch_context = gpu->exec_state != exec_state;
|
||||
unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
|
||||
bool need_flush = gpu->flush_seq != new_flush_seq;
|
||||
|
||||
lockdep_assert_held(&gpu->lock);
|
||||
|
||||
@ -329,14 +331,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
||||
* need to append a mmu flush load state, followed by a new
|
||||
* link to this buffer - a total of four additional words.
|
||||
*/
|
||||
if (gpu->mmu->need_flush || switch_context) {
|
||||
if (need_flush || switch_context) {
|
||||
u32 target, extra_dwords;
|
||||
|
||||
/* link command */
|
||||
extra_dwords = 1;
|
||||
|
||||
/* flush command */
|
||||
if (gpu->mmu->need_flush) {
|
||||
if (need_flush) {
|
||||
if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
|
||||
extra_dwords += 1;
|
||||
else
|
||||
@ -349,7 +351,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
||||
|
||||
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
|
||||
|
||||
if (gpu->mmu->need_flush) {
|
||||
if (need_flush) {
|
||||
/* Add the MMU flush */
|
||||
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
|
||||
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
|
||||
@ -369,7 +371,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
||||
SYNC_RECIPIENT_PE);
|
||||
}
|
||||
|
||||
gpu->mmu->need_flush = false;
|
||||
gpu->flush_seq = new_flush_seq;
|
||||
}
|
||||
|
||||
if (switch_context) {
|
||||
|
@ -137,6 +137,7 @@ struct etnaviv_gpu {
|
||||
int irq;
|
||||
|
||||
struct etnaviv_iommu *mmu;
|
||||
unsigned int flush_seq;
|
||||
|
||||
/* Power Control: */
|
||||
struct clk *clk_bus;
|
||||
|
@ -263,7 +263,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
|
||||
}
|
||||
|
||||
list_add_tail(&mapping->mmu_node, &mmu->mappings);
|
||||
mmu->need_flush = true;
|
||||
mmu->flush_seq++;
|
||||
unlock:
|
||||
mutex_unlock(&mmu->lock);
|
||||
|
||||
@ -282,7 +282,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
|
||||
etnaviv_iommu_remove_mapping(mmu, mapping);
|
||||
|
||||
list_del(&mapping->mmu_node);
|
||||
mmu->need_flush = true;
|
||||
mmu->flush_seq++;
|
||||
mutex_unlock(&mmu->lock);
|
||||
}
|
||||
|
||||
@ -369,7 +369,7 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmu->need_flush = true;
|
||||
mmu->flush_seq++;
|
||||
}
|
||||
|
||||
list_add_tail(&mapping->mmu_node, &mmu->mappings);
|
||||
|
@ -48,7 +48,7 @@ struct etnaviv_iommu {
|
||||
struct mutex lock;
|
||||
struct list_head mappings;
|
||||
struct drm_mm mm;
|
||||
bool need_flush;
|
||||
unsigned int flush_seq;
|
||||
};
|
||||
|
||||
struct etnaviv_gem_object;
|
||||
|
Loading…
x
Reference in New Issue
Block a user