2018-11-14 17:08:04 -05:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
2019-08-04 08:55:51 +02:00
# include <linux/dma-mapping.h>
2018-11-14 17:08:04 -05:00
# include "msm_drv.h"
# include "msm_mmu.h"
# include "adreno/adreno_gpu.h"
# include "adreno/a2xx.xml.h"
struct msm_gpummu {
struct msm_mmu base ;
struct msm_gpu * gpu ;
dma_addr_t pt_base ;
uint32_t * table ;
} ;
# define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
# define GPUMMU_VA_START SZ_16M
# define GPUMMU_VA_RANGE (0xfff * SZ_64K)
# define GPUMMU_PAGE_SIZE SZ_4K
# define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
2019-09-16 14:11:54 -06:00
static void msm_gpummu_detach ( struct msm_mmu * mmu )
2018-11-14 17:08:04 -05:00
{
}
static int msm_gpummu_map ( struct msm_mmu * mmu , uint64_t iova ,
2020-05-22 16:03:16 -06:00
struct sg_table * sgt , size_t len , int prot )
2018-11-14 17:08:04 -05:00
{
struct msm_gpummu * gpummu = to_msm_gpummu ( mmu ) ;
unsigned idx = ( iova - GPUMMU_VA_START ) / GPUMMU_PAGE_SIZE ;
2020-05-08 09:56:57 +02:00
struct sg_dma_page_iter dma_iter ;
2018-11-14 17:08:04 -05:00
unsigned prot_bits = 0 ;
if ( prot & IOMMU_WRITE )
prot_bits | = 1 ;
if ( prot & IOMMU_READ )
prot_bits | = 2 ;
2020-05-08 09:56:57 +02:00
for_each_sgtable_dma_page ( sgt , & dma_iter , 0 ) {
dma_addr_t addr = sg_page_iter_dma_address ( & dma_iter ) ;
int i ;
for ( i = 0 ; i < PAGE_SIZE ; i + = GPUMMU_PAGE_SIZE )
gpummu - > table [ idx + + ] = ( addr + i ) | prot_bits ;
2018-11-14 17:08:04 -05:00
}
/* we can improve by deferring flush for multiple map() */
gpu_write ( gpummu - > gpu , REG_A2XX_MH_MMU_INVALIDATE ,
A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC ) ;
return 0 ;
}
2020-05-22 16:03:16 -06:00
static int msm_gpummu_unmap ( struct msm_mmu * mmu , uint64_t iova , size_t len )
2018-11-14 17:08:04 -05:00
{
struct msm_gpummu * gpummu = to_msm_gpummu ( mmu ) ;
unsigned idx = ( iova - GPUMMU_VA_START ) / GPUMMU_PAGE_SIZE ;
unsigned i ;
for ( i = 0 ; i < len / GPUMMU_PAGE_SIZE ; i + + , idx + + )
gpummu - > table [ idx ] = 0 ;
gpu_write ( gpummu - > gpu , REG_A2XX_MH_MMU_INVALIDATE ,
A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC ) ;
return 0 ;
}
2021-06-10 14:44:13 -07:00
static void msm_gpummu_resume_translation ( struct msm_mmu * mmu )
{
}
2018-11-14 17:08:04 -05:00
static void msm_gpummu_destroy ( struct msm_mmu * mmu )
{
struct msm_gpummu * gpummu = to_msm_gpummu ( mmu ) ;
dma_free_attrs ( mmu - > dev , TABLE_SIZE , gpummu - > table , gpummu - > pt_base ,
DMA_ATTR_FORCE_CONTIGUOUS ) ;
kfree ( gpummu ) ;
}
static const struct msm_mmu_funcs funcs = {
. detach = msm_gpummu_detach ,
. map = msm_gpummu_map ,
. unmap = msm_gpummu_unmap ,
. destroy = msm_gpummu_destroy ,
2021-06-10 14:44:13 -07:00
. resume_translation = msm_gpummu_resume_translation ,
2018-11-14 17:08:04 -05:00
} ;
struct msm_mmu * msm_gpummu_new ( struct device * dev , struct msm_gpu * gpu )
{
struct msm_gpummu * gpummu ;
gpummu = kzalloc ( sizeof ( * gpummu ) , GFP_KERNEL ) ;
if ( ! gpummu )
return ERR_PTR ( - ENOMEM ) ;
gpummu - > table = dma_alloc_attrs ( dev , TABLE_SIZE + 32 , & gpummu - > pt_base ,
GFP_KERNEL | __GFP_ZERO , DMA_ATTR_FORCE_CONTIGUOUS ) ;
if ( ! gpummu - > table ) {
kfree ( gpummu ) ;
return ERR_PTR ( - ENOMEM ) ;
}
gpummu - > gpu = gpu ;
2020-08-17 15:01:39 -07:00
msm_mmu_init ( & gpummu - > base , dev , & funcs , MSM_MMU_GPUMMU ) ;
2018-11-14 17:08:04 -05:00
return & gpummu - > base ;
}
void msm_gpummu_params ( struct msm_mmu * mmu , dma_addr_t * pt_base ,
dma_addr_t * tran_error )
{
dma_addr_t base = to_msm_gpummu ( mmu ) - > pt_base ;
* pt_base = base ;
* tran_error = base + TABLE_SIZE ; /* 32-byte aligned */
}