drm/etnaviv: map cmdbuf through MMU on version 2
With MMUv2 all buffers need to be mapped through the MMU once it is enabled. Align the buffer size to 4K, as the MMU is only able to map page aligned buffers. Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
This commit is contained in:
parent
90969c9aa9
commit
e68f270f21
@ -1112,6 +1112,9 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
|
||||
if (!cmdbuf)
|
||||
return NULL;
|
||||
|
||||
if (gpu->mmu->version == ETNAVIV_IOMMU_V2)
|
||||
size = ALIGN(size, SZ_4K);
|
||||
|
||||
cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
|
||||
GFP_KERNEL);
|
||||
if (!cmdbuf->vaddr) {
|
||||
@ -1127,6 +1130,7 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
|
||||
|
||||
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
|
||||
{
|
||||
etnaviv_iommu_put_cmdbuf_va(cmdbuf->gpu, cmdbuf);
|
||||
dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
|
||||
cmdbuf->paddr);
|
||||
kfree(cmdbuf);
|
||||
|
@ -160,6 +160,8 @@ struct etnaviv_cmdbuf {
|
||||
dma_addr_t paddr;
|
||||
u32 size;
|
||||
u32 user_size;
|
||||
/* vram node used if the cmdbuf is mapped through the MMUv2 */
|
||||
struct drm_mm_node vram_node;
|
||||
/* fence after which this buffer is to be disposed */
|
||||
struct fence *fence;
|
||||
/* target exec state */
|
||||
|
@ -319,9 +319,49 @@ void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
|
||||
u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_cmdbuf *buf)
|
||||
{
|
||||
return buf->paddr - gpu->memory_base;
|
||||
struct etnaviv_iommu *mmu = gpu->mmu;
|
||||
|
||||
if (mmu->version == ETNAVIV_IOMMU_V1) {
|
||||
return buf->paddr - gpu->memory_base;
|
||||
} else {
|
||||
int ret;
|
||||
|
||||
if (buf->vram_node.allocated)
|
||||
return (u32)buf->vram_node.start;
|
||||
|
||||
mutex_lock(&mmu->lock);
|
||||
ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&mmu->lock);
|
||||
return 0;
|
||||
}
|
||||
ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
|
||||
buf->size, IOMMU_READ);
|
||||
if (ret < 0) {
|
||||
drm_mm_remove_node(&buf->vram_node);
|
||||
mutex_unlock(&mmu->lock);
|
||||
return 0;
|
||||
}
|
||||
mmu->last_iova = buf->vram_node.start + buf->size;
|
||||
gpu->mmu->need_flush = true;
|
||||
mutex_unlock(&mmu->lock);
|
||||
|
||||
return (u32)buf->vram_node.start;
|
||||
}
|
||||
}
|
||||
|
||||
void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_cmdbuf *buf)
|
||||
{
|
||||
struct etnaviv_iommu *mmu = gpu->mmu;
|
||||
|
||||
if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
|
||||
mutex_lock(&mmu->lock);
|
||||
iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
|
||||
drm_mm_remove_node(&buf->vram_node);
|
||||
mutex_unlock(&mmu->lock);
|
||||
}
|
||||
}
|
||||
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
|
||||
{
|
||||
struct etnaviv_iommu_ops *ops;
|
||||
|
@ -64,6 +64,8 @@ void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
|
||||
|
||||
u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_cmdbuf *buf);
|
||||
void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_cmdbuf *buf);
|
||||
|
||||
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
|
||||
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
|
||||
|
Loading…
Reference in New Issue
Block a user