2019-05-27 09:55:21 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2016-02-22 20:20:50 +03:00
/*
* Copyright ( c ) 2015 - 2016 MediaTek Inc .
* Author : Yong Wu < yong . wu @ mediatek . com >
*/
2018-10-31 01:09:49 +03:00
# include <linux/memblock.h>
2016-02-22 20:20:50 +03:00
# include <linux/bug.h>
# include <linux/clk.h>
# include <linux/component.h>
# include <linux/device.h>
# include <linux/dma-iommu.h>
# include <linux/err.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/iommu.h>
# include <linux/iopoll.h>
# include <linux/list.h>
# include <linux/of_address.h>
# include <linux/of_iommu.h>
# include <linux/of_irq.h>
# include <linux/of_platform.h>
# include <linux/platform_device.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <asm/barrier.h>
# include <soc/mediatek/smi.h>
2016-06-08 12:50:58 +03:00
# include "mtk_iommu.h"
2016-02-22 20:20:50 +03:00
# define REG_MMU_PT_BASE_ADDR 0x000
2019-08-24 06:02:02 +03:00
# define MMU_PT_ADDR_MASK GENMASK(31, 7)
2016-02-22 20:20:50 +03:00
# define REG_MMU_INVALIDATE 0x020
# define F_ALL_INVLD 0x2
# define F_MMU_INV_RANGE 0x1
# define REG_MMU_INVLD_START_A 0x024
# define REG_MMU_INVLD_END_A 0x028
2020-07-03 07:41:22 +03:00
# define REG_MMU_INV_SEL_GEN1 0x038
2016-02-22 20:20:50 +03:00
# define F_INVLD_EN0 BIT(0)
# define F_INVLD_EN1 BIT(1)
2020-07-03 07:41:19 +03:00
# define REG_MMU_MISC_CTRL 0x048
2020-07-03 07:41:21 +03:00
# define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17))
# define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19))
2016-02-22 20:20:50 +03:00
# define REG_MMU_DCM_DIS 0x050
2020-07-03 07:41:24 +03:00
# define REG_MMU_WR_LEN_CTRL 0x054
# define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21))
2016-02-22 20:20:50 +03:00
# define REG_MMU_CTRL_REG 0x110
2019-08-24 06:01:58 +03:00
# define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
2016-02-22 20:20:50 +03:00
# define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
2019-08-24 06:01:58 +03:00
# define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5)
2016-02-22 20:20:50 +03:00
# define REG_MMU_IVRP_PADDR 0x114
2018-03-18 04:52:54 +03:00
2017-08-21 14:00:20 +03:00
# define REG_MMU_VLD_PA_RNG 0x118
# define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
2016-02-22 20:20:50 +03:00
# define REG_MMU_INT_CONTROL0 0x120
# define F_L2_MULIT_HIT_EN BIT(0)
# define F_TABLE_WALK_FAULT_INT_EN BIT(1)
# define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
# define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
# define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
# define F_MISS_FIFO_ERR_INT_EN BIT(6)
# define F_INT_CLR_BIT BIT(12)
# define REG_MMU_INT_MAIN_CONTROL 0x124
2019-08-24 06:02:03 +03:00
/* mmu0 | mmu1 */
# define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
# define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
# define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
# define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
# define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
# define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
# define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
2016-02-22 20:20:50 +03:00
# define REG_MMU_CPE_DONE 0x12C
# define REG_MMU_FAULT_ST1 0x134
2019-08-24 06:02:03 +03:00
# define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
# define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
2016-02-22 20:20:50 +03:00
2019-08-24 06:02:03 +03:00
# define REG_MMU0_FAULT_VA 0x13c
2016-02-22 20:20:50 +03:00
# define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
# define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
2019-08-24 06:02:03 +03:00
# define REG_MMU0_INVLD_PA 0x140
# define REG_MMU1_FAULT_VA 0x144
# define REG_MMU1_INVLD_PA 0x148
# define REG_MMU0_INT_ID 0x150
# define REG_MMU1_INT_ID 0x154
2020-07-03 07:41:23 +03:00
# define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7)
# define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3)
2019-08-24 06:02:03 +03:00
# define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
# define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
2016-02-22 20:20:50 +03:00
2020-07-03 07:41:25 +03:00
# define MTK_PROTECT_PA_ALIGN 256
2016-02-22 20:20:50 +03:00
2017-08-21 14:00:15 +03:00
/*
* Get the local arbiter ID and the portid within the larb arbiter
* from mtk_m4u_id which is defined by MTK_M4U_ID .
*/
2017-08-21 14:00:16 +03:00
# define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
2017-08-21 14:00:15 +03:00
# define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
2020-07-03 07:41:20 +03:00
# define HAS_4GB_MODE BIT(0)
/* HW will use the EMI clock if there isn't the "bclk". */
# define HAS_BCLK BIT(1)
# define HAS_VLD_PA_RNG BIT(2)
# define RESET_AXI BIT(3)
2020-07-03 07:41:21 +03:00
# define OUT_ORDER_WR_EN BIT(4)
2020-07-03 07:41:23 +03:00
# define HAS_SUB_COMM BIT(5)
2020-07-03 07:41:24 +03:00
# define WR_THROT_EN BIT(6)
2020-07-03 07:41:20 +03:00
# define MTK_IOMMU_HAS_FLAG(pdata, _x) \
( ( ( ( pdata ) - > flags ) & ( _x ) ) = = ( _x ) )
2016-02-22 20:20:50 +03:00
struct mtk_iommu_domain {
struct io_pgtable_cfg cfg ;
struct io_pgtable_ops * iop ;
struct iommu_domain domain ;
} ;
2018-10-18 14:13:38 +03:00
static const struct iommu_ops mtk_iommu_ops ;
2016-02-22 20:20:50 +03:00
2019-08-24 06:01:50 +03:00
/*
* In M4U 4 GB mode , the physical address is remapped as below :
*
* CPU Physical address :
* = = = = = = = = = = = = = = = = = = = =
*
* 0 1 G 2 G 3 G 4 G 5 G
* | - - - A - - - | - - - B - - - | - - - C - - - | - - - D - - - | - - - E - - - |
* + - - I / O - - + - - - - - - - - - - - - Memory - - - - - - - - - - - - - +
*
* IOMMU output physical address :
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
*
* 4 G 5 G 6 G 7 G 8 G
* | - - - E - - - | - - - B - - - | - - - C - - - | - - - D - - - |
* + - - - - - - - - - - - - Memory - - - - - - - - - - - - - +
*
* The Region ' A ' ( I / O ) can NOT be mapped by M4U ; For Region ' B ' / ' C ' / ' D ' , the
* bit32 of the CPU physical address always is needed to set , and for Region
* ' E ' , the CPU physical address keep as is .
* Additionally , The iommu consumers always use the CPU phyiscal address .
*/
2019-08-24 06:01:55 +03:00
# define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
2019-08-24 06:01:50 +03:00
2017-08-21 14:00:17 +03:00
static LIST_HEAD ( m4ulist ) ; /* List all the M4U HWs */
# define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
/*
* There may be 1 or 2 M4U HWs , But we always expect they are in the same domain
* for the performance .
*
* Here always return the mtk_iommu_data of the first probed M4U where the
* iommu domain information is recorded .
*/
static struct mtk_iommu_data * mtk_iommu_get_m4u_data ( void )
{
struct mtk_iommu_data * data ;
for_each_m4u ( data )
return data ;
return NULL ;
}
2016-02-22 20:20:50 +03:00
static struct mtk_iommu_domain * to_mtk_domain ( struct iommu_domain * dom )
{
return container_of ( dom , struct mtk_iommu_domain , domain ) ;
}
static void mtk_iommu_tlb_flush_all ( void * cookie )
{
struct mtk_iommu_data * data = cookie ;
2017-08-21 14:00:17 +03:00
for_each_m4u ( data ) {
writel_relaxed ( F_INVLD_EN1 | F_INVLD_EN0 ,
2020-07-03 07:41:22 +03:00
data - > base + data - > plat_data - > inv_sel_reg ) ;
2017-08-21 14:00:17 +03:00
writel_relaxed ( F_ALL_INVLD , data - > base + REG_MMU_INVALIDATE ) ;
wmb ( ) ; /* Make sure the tlb flush all done */
}
2016-02-22 20:20:50 +03:00
}
2019-11-04 10:01:06 +03:00
static void mtk_iommu_tlb_flush_range_sync ( unsigned long iova , size_t size ,
2019-11-04 10:01:05 +03:00
size_t granule , void * cookie )
2016-02-22 20:20:50 +03:00
{
struct mtk_iommu_data * data = cookie ;
2019-11-04 10:01:06 +03:00
unsigned long flags ;
int ret ;
u32 tmp ;
2016-02-22 20:20:50 +03:00
2017-08-21 14:00:17 +03:00
for_each_m4u ( data ) {
2019-11-04 10:01:06 +03:00
spin_lock_irqsave ( & data - > tlb_lock , flags ) ;
2017-08-21 14:00:17 +03:00
writel_relaxed ( F_INVLD_EN1 | F_INVLD_EN0 ,
2020-07-03 07:41:22 +03:00
data - > base + data - > plat_data - > inv_sel_reg ) ;
2016-02-22 20:20:50 +03:00
2017-08-21 14:00:17 +03:00
writel_relaxed ( iova , data - > base + REG_MMU_INVLD_START_A ) ;
writel_relaxed ( iova + size - 1 ,
data - > base + REG_MMU_INVLD_END_A ) ;
writel_relaxed ( F_MMU_INV_RANGE ,
data - > base + REG_MMU_INVALIDATE ) ;
2017-07-06 19:55:30 +03:00
2019-11-04 10:01:06 +03:00
/* tlb sync */
2017-08-21 14:00:17 +03:00
ret = readl_poll_timeout_atomic ( data - > base + REG_MMU_CPE_DONE ,
2019-11-04 10:01:08 +03:00
tmp , tmp ! = 0 , 10 , 1000 ) ;
2017-08-21 14:00:17 +03:00
if ( ret ) {
dev_warn ( data - > dev ,
" Partial TLB flush timed out, falling back to full flush \n " ) ;
mtk_iommu_tlb_flush_all ( cookie ) ;
}
/* Clear the CPE status */
writel_relaxed ( 0 , data - > base + REG_MMU_CPE_DONE ) ;
2019-11-04 10:01:06 +03:00
spin_unlock_irqrestore ( & data - > tlb_lock , flags ) ;
2016-02-22 20:20:50 +03:00
}
}
2019-07-02 18:45:15 +03:00
static void mtk_iommu_tlb_flush_page_nosync ( struct iommu_iotlb_gather * gather ,
unsigned long iova , size_t granule ,
2019-07-02 18:44:41 +03:00
void * cookie )
{
2019-11-04 10:01:03 +03:00
struct mtk_iommu_data * data = cookie ;
2019-11-04 10:01:04 +03:00
struct iommu_domain * domain = & data - > m4u_dom - > domain ;
2019-11-04 10:01:03 +03:00
2019-11-04 10:01:04 +03:00
iommu_iotlb_gather_add_page ( domain , gather , iova , granule ) ;
2019-07-02 18:44:41 +03:00
}
2019-07-02 18:43:34 +03:00
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
2016-02-22 20:20:50 +03:00
. tlb_flush_all = mtk_iommu_tlb_flush_all ,
2019-11-04 10:01:06 +03:00
. tlb_flush_walk = mtk_iommu_tlb_flush_range_sync ,
. tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync ,
2019-07-02 18:44:41 +03:00
. tlb_add_page = mtk_iommu_tlb_flush_page_nosync ,
2016-02-22 20:20:50 +03:00
} ;
static irqreturn_t mtk_iommu_isr ( int irq , void * dev_id )
{
struct mtk_iommu_data * data = dev_id ;
struct mtk_iommu_domain * dom = data - > m4u_dom ;
u32 int_state , regval , fault_iova , fault_pa ;
2020-07-03 07:41:23 +03:00
unsigned int fault_larb , fault_port , sub_comm = 0 ;
2016-02-22 20:20:50 +03:00
bool layer , write ;
/* Read error info from registers */
int_state = readl_relaxed ( data - > base + REG_MMU_FAULT_ST1 ) ;
2019-08-24 06:02:03 +03:00
if ( int_state & F_REG_MMU0_FAULT_MASK ) {
regval = readl_relaxed ( data - > base + REG_MMU0_INT_ID ) ;
fault_iova = readl_relaxed ( data - > base + REG_MMU0_FAULT_VA ) ;
fault_pa = readl_relaxed ( data - > base + REG_MMU0_INVLD_PA ) ;
} else {
regval = readl_relaxed ( data - > base + REG_MMU1_INT_ID ) ;
fault_iova = readl_relaxed ( data - > base + REG_MMU1_FAULT_VA ) ;
fault_pa = readl_relaxed ( data - > base + REG_MMU1_INVLD_PA ) ;
}
2016-02-22 20:20:50 +03:00
layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT ;
write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT ;
2019-08-24 06:02:03 +03:00
fault_port = F_MMU_INT_ID_PORT_ID ( regval ) ;
2020-07-03 07:41:23 +03:00
if ( MTK_IOMMU_HAS_FLAG ( data - > plat_data , HAS_SUB_COMM ) ) {
fault_larb = F_MMU_INT_ID_COMM_ID ( regval ) ;
sub_comm = F_MMU_INT_ID_SUB_COMM_ID ( regval ) ;
} else {
fault_larb = F_MMU_INT_ID_LARB_ID ( regval ) ;
}
fault_larb = data - > plat_data - > larbid_remap [ fault_larb ] [ sub_comm ] ;
2019-08-24 06:01:57 +03:00
2016-02-22 20:20:50 +03:00
if ( report_iommu_fault ( & dom - > domain , data - > dev , fault_iova ,
write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ ) ) {
dev_err_ratelimited (
data - > dev ,
" fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s \n " ,
int_state , fault_iova , fault_pa , fault_larb , fault_port ,
layer , write ? " write " : " read " ) ;
}
/* Interrupt clear */
regval = readl_relaxed ( data - > base + REG_MMU_INT_CONTROL0 ) ;
regval | = F_INT_CLR_BIT ;
writel_relaxed ( regval , data - > base + REG_MMU_INT_CONTROL0 ) ;
mtk_iommu_tlb_flush_all ( data ) ;
return IRQ_HANDLED ;
}
static void mtk_iommu_config ( struct mtk_iommu_data * data ,
struct device * dev , bool enable )
{
struct mtk_smi_larb_iommu * larb_mmu ;
unsigned int larbid , portid ;
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2016-10-17 14:49:20 +03:00
int i ;
2016-02-22 20:20:50 +03:00
2016-10-17 14:49:20 +03:00
for ( i = 0 ; i < fwspec - > num_ids ; + + i ) {
larbid = MTK_M4U_TO_LARB ( fwspec - > ids [ i ] ) ;
portid = MTK_M4U_TO_PORT ( fwspec - > ids [ i ] ) ;
2019-08-24 06:02:08 +03:00
larb_mmu = & data - > larb_imu [ larbid ] ;
2016-02-22 20:20:50 +03:00
dev_dbg ( dev , " %s iommu port: %d \n " ,
enable ? " enable " : " disable " , portid ) ;
if ( enable )
larb_mmu - > mmu | = MTK_SMI_MMU_EN ( portid ) ;
else
larb_mmu - > mmu & = ~ MTK_SMI_MMU_EN ( portid ) ;
}
}
2017-08-21 14:00:18 +03:00
static int mtk_iommu_domain_finalise ( struct mtk_iommu_domain * dom )
2016-02-22 20:20:50 +03:00
{
2017-08-21 14:00:18 +03:00
struct mtk_iommu_data * data = mtk_iommu_get_m4u_data ( ) ;
2016-02-22 20:20:50 +03:00
dom - > cfg = ( struct io_pgtable_cfg ) {
. quirks = IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
2019-08-24 06:01:55 +03:00
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_EXT ,
2016-02-22 20:20:50 +03:00
. pgsize_bitmap = mtk_iommu_ops . pgsize_bitmap ,
. ias = 32 ,
2019-08-24 06:01:55 +03:00
. oas = 34 ,
2019-07-02 18:43:34 +03:00
. tlb = & mtk_iommu_flush_ops ,
2016-02-22 20:20:50 +03:00
. iommu_dev = data - > dev ,
} ;
dom - > iop = alloc_io_pgtable_ops ( ARM_V7S , & dom - > cfg , data ) ;
if ( ! dom - > iop ) {
dev_err ( data - > dev , " Failed to alloc io pgtable \n " ) ;
return - EINVAL ;
}
/* Update our support page sizes bitmap */
2016-04-07 20:42:06 +03:00
dom - > domain . pgsize_bitmap = dom - > cfg . pgsize_bitmap ;
2016-02-22 20:20:50 +03:00
return 0 ;
}
static struct iommu_domain * mtk_iommu_domain_alloc ( unsigned type )
{
struct mtk_iommu_domain * dom ;
if ( type ! = IOMMU_DOMAIN_DMA )
return NULL ;
dom = kzalloc ( sizeof ( * dom ) , GFP_KERNEL ) ;
if ( ! dom )
return NULL ;
2017-08-21 14:00:18 +03:00
if ( iommu_get_dma_cookie ( & dom - > domain ) )
goto free_dom ;
if ( mtk_iommu_domain_finalise ( dom ) )
goto put_dma_cookie ;
2016-02-22 20:20:50 +03:00
dom - > domain . geometry . aperture_start = 0 ;
dom - > domain . geometry . aperture_end = DMA_BIT_MASK ( 32 ) ;
dom - > domain . geometry . force_aperture = true ;
return & dom - > domain ;
2017-08-21 14:00:18 +03:00
put_dma_cookie :
iommu_put_dma_cookie ( & dom - > domain ) ;
free_dom :
kfree ( dom ) ;
return NULL ;
2016-02-22 20:20:50 +03:00
}
static void mtk_iommu_domain_free ( struct iommu_domain * domain )
{
2017-08-21 14:00:18 +03:00
struct mtk_iommu_domain * dom = to_mtk_domain ( domain ) ;
free_io_pgtable_ops ( dom - > iop ) ;
2016-02-22 20:20:50 +03:00
iommu_put_dma_cookie ( domain ) ;
kfree ( to_mtk_domain ( domain ) ) ;
}
static int mtk_iommu_attach_device ( struct iommu_domain * domain ,
struct device * dev )
{
2020-03-26 18:08:38 +03:00
struct mtk_iommu_data * data = dev_iommu_priv_get ( dev ) ;
2016-02-22 20:20:50 +03:00
struct mtk_iommu_domain * dom = to_mtk_domain ( domain ) ;
2017-08-21 14:00:18 +03:00
if ( ! data )
2016-02-22 20:20:50 +03:00
return - ENODEV ;
2017-08-21 14:00:18 +03:00
/* Update the pgtable base address register of the M4U HW */
2016-02-22 20:20:50 +03:00
if ( ! data - > m4u_dom ) {
data - > m4u_dom = dom ;
2019-10-25 21:08:37 +03:00
writel ( dom - > cfg . arm_v7s_cfg . ttbr & MMU_PT_ADDR_MASK ,
2017-08-21 14:00:18 +03:00
data - > base + REG_MMU_PT_BASE_ADDR ) ;
2017-08-21 14:00:17 +03:00
}
2017-08-21 14:00:18 +03:00
mtk_iommu_config ( data , dev , true ) ;
2016-02-22 20:20:50 +03:00
return 0 ;
}
static void mtk_iommu_detach_device ( struct iommu_domain * domain ,
struct device * dev )
{
2020-03-26 18:08:38 +03:00
struct mtk_iommu_data * data = dev_iommu_priv_get ( dev ) ;
2016-02-22 20:20:50 +03:00
2016-10-17 14:49:20 +03:00
if ( ! data )
2016-02-22 20:20:50 +03:00
return ;
mtk_iommu_config ( data , dev , false ) ;
}
static int mtk_iommu_map ( struct iommu_domain * domain , unsigned long iova ,
2019-09-08 19:56:38 +03:00
phys_addr_t paddr , size_t size , int prot , gfp_t gfp )
2016-02-22 20:20:50 +03:00
{
struct mtk_iommu_domain * dom = to_mtk_domain ( domain ) ;
2019-08-24 06:01:55 +03:00
struct mtk_iommu_data * data = mtk_iommu_get_m4u_data ( ) ;
2016-02-22 20:20:50 +03:00
2019-08-24 06:01:55 +03:00
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
if ( data - > enable_4GB )
paddr | = BIT_ULL ( 32 ) ;
2019-11-04 10:01:07 +03:00
/* Synchronize with the tlb_lock */
return dom - > iop - > map ( dom - > iop , iova , paddr , size , prot ) ;
2016-02-22 20:20:50 +03:00
}
static size_t mtk_iommu_unmap ( struct iommu_domain * domain ,
2019-07-02 18:44:06 +03:00
unsigned long iova , size_t size ,
struct iommu_iotlb_gather * gather )
2016-02-22 20:20:50 +03:00
{
struct mtk_iommu_domain * dom = to_mtk_domain ( domain ) ;
2019-11-04 10:01:07 +03:00
return dom - > iop - > unmap ( dom - > iop , iova , size , gather ) ;
2016-02-22 20:20:50 +03:00
}
2019-07-02 18:44:06 +03:00
static void mtk_iommu_flush_iotlb_all ( struct iommu_domain * domain )
{
2019-11-04 10:01:02 +03:00
mtk_iommu_tlb_flush_all ( mtk_iommu_get_m4u_data ( ) ) ;
2019-07-02 18:44:06 +03:00
}
static void mtk_iommu_iotlb_sync ( struct iommu_domain * domain ,
struct iommu_iotlb_gather * gather )
2017-09-28 17:55:02 +03:00
{
2019-11-04 10:01:03 +03:00
struct mtk_iommu_data * data = mtk_iommu_get_m4u_data ( ) ;
2019-11-04 10:01:04 +03:00
size_t length = gather - > end - gather - > start ;
2019-11-04 10:01:03 +03:00
2019-11-04 10:01:04 +03:00
if ( gather - > start = = ULONG_MAX )
return ;
2019-11-04 10:01:06 +03:00
mtk_iommu_tlb_flush_range_sync ( gather - > start , length , gather - > pgsize ,
2019-11-04 10:01:05 +03:00
data ) ;
2017-09-28 17:55:02 +03:00
}
2016-02-22 20:20:50 +03:00
static phys_addr_t mtk_iommu_iova_to_phys ( struct iommu_domain * domain ,
dma_addr_t iova )
{
struct mtk_iommu_domain * dom = to_mtk_domain ( domain ) ;
2017-08-21 14:00:20 +03:00
struct mtk_iommu_data * data = mtk_iommu_get_m4u_data ( ) ;
2016-02-22 20:20:50 +03:00
phys_addr_t pa ;
pa = dom - > iop - > iova_to_phys ( dom - > iop , iova ) ;
2019-08-24 06:01:55 +03:00
if ( data - > enable_4GB & & pa > = MTK_IOMMU_4GB_MODE_REMAP_BASE )
pa & = ~ BIT_ULL ( 32 ) ;
2017-08-21 14:00:20 +03:00
2016-02-22 20:20:50 +03:00
return pa ;
}
2020-04-29 16:37:00 +03:00
static struct iommu_device * mtk_iommu_probe_device ( struct device * dev )
2016-02-22 20:20:50 +03:00
{
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2017-02-03 14:57:32 +03:00
struct mtk_iommu_data * data ;
2016-02-22 20:20:50 +03:00
2018-11-29 16:01:00 +03:00
if ( ! fwspec | | fwspec - > ops ! = & mtk_iommu_ops )
2020-04-29 16:37:00 +03:00
return ERR_PTR ( - ENODEV ) ; /* Not a iommu client device */
2016-02-22 20:20:50 +03:00
2020-03-26 18:08:38 +03:00
data = dev_iommu_priv_get ( dev ) ;
2017-02-03 14:57:32 +03:00
2020-04-29 16:37:00 +03:00
return & data - > iommu ;
2016-02-22 20:20:50 +03:00
}
2020-04-29 16:37:00 +03:00
static void mtk_iommu_release_device ( struct device * dev )
2016-02-22 20:20:50 +03:00
{
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2017-02-03 14:57:32 +03:00
2018-11-29 16:01:00 +03:00
if ( ! fwspec | | fwspec - > ops ! = & mtk_iommu_ops )
2016-02-22 20:20:50 +03:00
return ;
2016-10-17 14:49:20 +03:00
iommu_fwspec_free ( dev ) ;
2016-02-22 20:20:50 +03:00
}
static struct iommu_group * mtk_iommu_device_group ( struct device * dev )
{
2017-08-21 14:00:17 +03:00
struct mtk_iommu_data * data = mtk_iommu_get_m4u_data ( ) ;
2016-02-22 20:20:50 +03:00
2016-10-17 14:49:20 +03:00
if ( ! data )
2016-02-22 20:20:50 +03:00
return ERR_PTR ( - ENODEV ) ;
/* All the client devices are in the same m4u iommu-group */
if ( ! data - > m4u_group ) {
data - > m4u_group = iommu_group_alloc ( ) ;
if ( IS_ERR ( data - > m4u_group ) )
dev_err ( dev , " Failed to allocate M4U IOMMU group \n " ) ;
2016-11-11 20:59:24 +03:00
} else {
iommu_group_ref_get ( data - > m4u_group ) ;
2016-02-22 20:20:50 +03:00
}
return data - > m4u_group ;
}
static int mtk_iommu_of_xlate ( struct device * dev , struct of_phandle_args * args )
{
struct platform_device * m4updev ;
if ( args - > args_count ! = 1 ) {
dev_err ( dev , " invalid #iommu-cells(%d) property for IOMMU \n " ,
args - > args_count ) ;
return - EINVAL ;
}
2020-03-26 18:08:38 +03:00
if ( ! dev_iommu_priv_get ( dev ) ) {
2016-02-22 20:20:50 +03:00
/* Get the m4u device */
m4updev = of_find_device_by_node ( args - > np ) ;
if ( WARN_ON ( ! m4updev ) )
return - EINVAL ;
2020-03-26 18:08:38 +03:00
dev_iommu_priv_set ( dev , platform_get_drvdata ( m4updev ) ) ;
2016-02-22 20:20:50 +03:00
}
2016-10-17 14:49:20 +03:00
return iommu_fwspec_add_ids ( dev , args - > args , 1 ) ;
2016-02-22 20:20:50 +03:00
}
2018-10-18 14:13:38 +03:00
static const struct iommu_ops mtk_iommu_ops = {
2016-02-22 20:20:50 +03:00
. domain_alloc = mtk_iommu_domain_alloc ,
. domain_free = mtk_iommu_domain_free ,
. attach_dev = mtk_iommu_attach_device ,
. detach_dev = mtk_iommu_detach_device ,
. map = mtk_iommu_map ,
. unmap = mtk_iommu_unmap ,
2019-07-02 18:44:06 +03:00
. flush_iotlb_all = mtk_iommu_flush_iotlb_all ,
2017-09-28 17:55:02 +03:00
. iotlb_sync = mtk_iommu_iotlb_sync ,
2016-02-22 20:20:50 +03:00
. iova_to_phys = mtk_iommu_iova_to_phys ,
2020-04-29 16:37:00 +03:00
. probe_device = mtk_iommu_probe_device ,
. release_device = mtk_iommu_release_device ,
2016-02-22 20:20:50 +03:00
. device_group = mtk_iommu_device_group ,
. of_xlate = mtk_iommu_of_xlate ,
. pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M ,
} ;
static int mtk_iommu_hw_init ( const struct mtk_iommu_data * data )
{
u32 regval ;
int ret ;
ret = clk_prepare_enable ( data - > bclk ) ;
if ( ret ) {
dev_err ( data - > dev , " Failed to enable iommu bclk(%d) \n " , ret ) ;
return ret ;
}
2019-08-24 06:01:47 +03:00
if ( data - > plat_data - > m4u_plat = = M4U_MT8173 )
2019-08-24 06:01:58 +03:00
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 ;
else
regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR ;
2016-02-22 20:20:50 +03:00
writel_relaxed ( regval , data - > base + REG_MMU_CTRL_REG ) ;
regval = F_L2_MULIT_HIT_EN |
F_TABLE_WALK_FAULT_INT_EN |
F_PREETCH_FIFO_OVERFLOW_INT_EN |
F_MISS_FIFO_OVERFLOW_INT_EN |
F_PREFETCH_FIFO_ERR_INT_EN |
F_MISS_FIFO_ERR_INT_EN ;
writel_relaxed ( regval , data - > base + REG_MMU_INT_CONTROL0 ) ;
regval = F_INT_TRANSLATION_FAULT |
F_INT_MAIN_MULTI_HIT_FAULT |
F_INT_INVALID_PA_FAULT |
F_INT_ENTRY_REPLACEMENT_FAULT |
F_INT_TLB_MISS_FAULT |
F_INT_MISS_TRANSACTION_FIFO_FAULT |
F_INT_PRETETCH_TRANSATION_FIFO_FAULT ;
writel_relaxed ( regval , data - > base + REG_MMU_INT_MAIN_CONTROL ) ;
2019-08-24 06:01:47 +03:00
if ( data - > plat_data - > m4u_plat = = M4U_MT8173 )
2018-03-18 04:52:54 +03:00
regval = ( data - > protect_base > > 1 ) | ( data - > enable_4GB < < 31 ) ;
else
regval = lower_32_bits ( data - > protect_base ) |
upper_32_bits ( data - > protect_base ) ;
writel_relaxed ( regval , data - > base + REG_MMU_IVRP_PADDR ) ;
2020-07-03 07:41:20 +03:00
if ( data - > enable_4GB & &
MTK_IOMMU_HAS_FLAG ( data - > plat_data , HAS_VLD_PA_RNG ) ) {
2017-08-21 14:00:20 +03:00
/*
* If 4 GB mode is enabled , the validate PA range is from
* 0x1 _0000_0000 to 0x1 _ffff_ffff . here record bit [ 32 : 30 ] .
*/
regval = F_MMU_VLD_PA_RNG ( 7 , 4 ) ;
writel_relaxed ( regval , data - > base + REG_MMU_VLD_PA_RNG ) ;
}
2016-02-22 20:20:50 +03:00
writel_relaxed ( 0 , data - > base + REG_MMU_DCM_DIS ) ;
2020-07-03 07:41:24 +03:00
if ( MTK_IOMMU_HAS_FLAG ( data - > plat_data , WR_THROT_EN ) ) {
/* write command throttling mode */
regval = readl_relaxed ( data - > base + REG_MMU_WR_LEN_CTRL ) ;
regval & = ~ F_MMU_WR_THROT_DIS_MASK ;
writel_relaxed ( regval , data - > base + REG_MMU_WR_LEN_CTRL ) ;
}
2017-08-21 14:00:16 +03:00
2020-07-03 07:41:20 +03:00
if ( MTK_IOMMU_HAS_FLAG ( data - > plat_data , RESET_AXI ) ) {
2020-07-03 07:41:19 +03:00
/* The register is called STANDARD_AXI_MODE in this case */
2020-07-03 07:41:21 +03:00
regval = 0 ;
} else {
regval = readl_relaxed ( data - > base + REG_MMU_MISC_CTRL ) ;
regval & = ~ F_MMU_STANDARD_AXI_MODE_MASK ;
if ( MTK_IOMMU_HAS_FLAG ( data - > plat_data , OUT_ORDER_WR_EN ) )
regval & = ~ F_MMU_IN_ORDER_WR_EN_MASK ;
2020-07-03 07:41:19 +03:00
}
2020-07-03 07:41:21 +03:00
writel_relaxed ( regval , data - > base + REG_MMU_MISC_CTRL ) ;
2016-02-22 20:20:50 +03:00
if ( devm_request_irq ( data - > dev , data - > irq , mtk_iommu_isr , 0 ,
dev_name ( data - > dev ) , ( void * ) data ) ) {
writel_relaxed ( 0 , data - > base + REG_MMU_PT_BASE_ADDR ) ;
clk_disable_unprepare ( data - > bclk ) ;
dev_err ( data - > dev , " Failed @ IRQ-%d Request \n " , data - > irq ) ;
return - ENODEV ;
}
return 0 ;
}
static const struct component_master_ops mtk_iommu_com_ops = {
. bind = mtk_iommu_bind ,
. unbind = mtk_iommu_unbind ,
} ;
static int mtk_iommu_probe ( struct platform_device * pdev )
{
struct mtk_iommu_data * data ;
struct device * dev = & pdev - > dev ;
struct resource * res ;
2017-02-03 14:57:32 +03:00
resource_size_t ioaddr ;
2016-02-22 20:20:50 +03:00
struct component_match * match = NULL ;
void * protect ;
2016-03-01 12:36:23 +03:00
int i , larb_nr , ret ;
2016-02-22 20:20:50 +03:00
data = devm_kzalloc ( dev , sizeof ( * data ) , GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
data - > dev = dev ;
2019-08-24 06:01:47 +03:00
data - > plat_data = of_device_get_match_data ( dev ) ;
2016-02-22 20:20:50 +03:00
/* Protect memory. HW will access here while translation fault.*/
protect = devm_kzalloc ( dev , MTK_PROTECT_PA_ALIGN * 2 , GFP_KERNEL ) ;
if ( ! protect )
return - ENOMEM ;
data - > protect_base = ALIGN ( virt_to_phys ( protect ) , MTK_PROTECT_PA_ALIGN ) ;
2016-03-14 01:01:11 +03:00
/* Whether the current dram is over 4GB */
2017-08-24 10:42:12 +03:00
data - > enable_4GB = ! ! ( max_pfn > ( BIT_ULL ( 32 ) > > PAGE_SHIFT ) ) ;
2020-07-03 07:41:20 +03:00
if ( ! MTK_IOMMU_HAS_FLAG ( data - > plat_data , HAS_4GB_MODE ) )
2019-08-24 06:01:55 +03:00
data - > enable_4GB = false ;
2016-03-14 01:01:11 +03:00
2016-02-22 20:20:50 +03:00
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
data - > base = devm_ioremap_resource ( dev , res ) ;
if ( IS_ERR ( data - > base ) )
return PTR_ERR ( data - > base ) ;
2017-02-03 14:57:32 +03:00
ioaddr = res - > start ;
2016-02-22 20:20:50 +03:00
data - > irq = platform_get_irq ( pdev , 0 ) ;
if ( data - > irq < 0 )
return data - > irq ;
2020-07-03 07:41:20 +03:00
if ( MTK_IOMMU_HAS_FLAG ( data - > plat_data , HAS_BCLK ) ) {
2019-08-24 06:01:56 +03:00
data - > bclk = devm_clk_get ( dev , " bclk " ) ;
if ( IS_ERR ( data - > bclk ) )
return PTR_ERR ( data - > bclk ) ;
}
2016-02-22 20:20:50 +03:00
larb_nr = of_count_phandle_with_args ( dev - > of_node ,
" mediatek,larbs " , NULL ) ;
if ( larb_nr < 0 )
return larb_nr ;
for ( i = 0 ; i < larb_nr ; i + + ) {
struct device_node * larbnode ;
struct platform_device * plarbdev ;
2017-08-21 14:00:16 +03:00
u32 id ;
2016-02-22 20:20:50 +03:00
larbnode = of_parse_phandle ( dev - > of_node , " mediatek,larbs " , i ) ;
if ( ! larbnode )
return - EINVAL ;
2019-04-17 05:41:19 +03:00
if ( ! of_device_is_available ( larbnode ) ) {
of_node_put ( larbnode ) ;
2016-02-22 20:20:50 +03:00
continue ;
2019-04-17 05:41:19 +03:00
}
2016-02-22 20:20:50 +03:00
2017-08-21 14:00:16 +03:00
ret = of_property_read_u32 ( larbnode , " mediatek,larb-id " , & id ) ;
if ( ret ) /* The id is consecutive if there is no this property */
id = i ;
2016-02-22 20:20:50 +03:00
plarbdev = of_find_device_by_node ( larbnode ) ;
2019-04-17 05:41:19 +03:00
if ( ! plarbdev ) {
of_node_put ( larbnode ) ;
2017-08-21 14:00:16 +03:00
return - EPROBE_DEFER ;
2019-04-17 05:41:19 +03:00
}
2019-08-24 06:02:08 +03:00
data - > larb_imu [ id ] . dev = & plarbdev - > dev ;
2016-02-22 20:20:50 +03:00
2016-10-19 13:30:34 +03:00
component_match_add_release ( dev , & match , release_of ,
compare_of , larbnode ) ;
2016-02-22 20:20:50 +03:00
}
platform_set_drvdata ( pdev , data ) ;
ret = mtk_iommu_hw_init ( data ) ;
if ( ret )
return ret ;
2017-02-03 14:57:32 +03:00
ret = iommu_device_sysfs_add ( & data - > iommu , dev , NULL ,
" mtk-iommu.%pa " , & ioaddr ) ;
if ( ret )
return ret ;
iommu_device_set_ops ( & data - > iommu , & mtk_iommu_ops ) ;
iommu_device_set_fwnode ( & data - > iommu , & pdev - > dev . of_node - > fwnode ) ;
ret = iommu_device_register ( & data - > iommu ) ;
if ( ret )
return ret ;
2019-11-04 10:01:03 +03:00
spin_lock_init ( & data - > tlb_lock ) ;
2017-08-21 14:00:17 +03:00
list_add_tail ( & data - > list , & m4ulist ) ;
2016-02-22 20:20:50 +03:00
if ( ! iommu_present ( & platform_bus_type ) )
bus_set_iommu ( & platform_bus_type , & mtk_iommu_ops ) ;
return component_master_add_with_match ( dev , & mtk_iommu_com_ops , match ) ;
}
static int mtk_iommu_remove ( struct platform_device * pdev )
{
struct mtk_iommu_data * data = platform_get_drvdata ( pdev ) ;
2017-02-03 14:57:32 +03:00
iommu_device_sysfs_remove ( & data - > iommu ) ;
iommu_device_unregister ( & data - > iommu ) ;
2016-02-22 20:20:50 +03:00
if ( iommu_present ( & platform_bus_type ) )
bus_set_iommu ( & platform_bus_type , NULL ) ;
clk_disable_unprepare ( data - > bclk ) ;
devm_free_irq ( & pdev - > dev , data - > irq , data ) ;
component_master_del ( & pdev - > dev , & mtk_iommu_com_ops ) ;
return 0 ;
}
2016-02-29 12:19:07 +03:00
static int __maybe_unused mtk_iommu_suspend ( struct device * dev )
2016-02-22 20:20:50 +03:00
{
struct mtk_iommu_data * data = dev_get_drvdata ( dev ) ;
struct mtk_iommu_suspend_reg * reg = & data - > reg ;
void __iomem * base = data - > base ;
2020-07-03 07:41:24 +03:00
reg - > wr_len_ctrl = readl_relaxed ( base + REG_MMU_WR_LEN_CTRL ) ;
2020-07-03 07:41:19 +03:00
reg - > misc_ctrl = readl_relaxed ( base + REG_MMU_MISC_CTRL ) ;
2016-02-22 20:20:50 +03:00
reg - > dcm_dis = readl_relaxed ( base + REG_MMU_DCM_DIS ) ;
reg - > ctrl_reg = readl_relaxed ( base + REG_MMU_CTRL_REG ) ;
reg - > int_control0 = readl_relaxed ( base + REG_MMU_INT_CONTROL0 ) ;
reg - > int_main_control = readl_relaxed ( base + REG_MMU_INT_MAIN_CONTROL ) ;
2018-03-18 04:52:54 +03:00
reg - > ivrp_paddr = readl_relaxed ( base + REG_MMU_IVRP_PADDR ) ;
2019-08-24 06:02:06 +03:00
reg - > vld_pa_rng = readl_relaxed ( base + REG_MMU_VLD_PA_RNG ) ;
2017-08-21 14:00:19 +03:00
clk_disable_unprepare ( data - > bclk ) ;
2016-02-22 20:20:50 +03:00
return 0 ;
}
2016-02-29 12:19:07 +03:00
static int __maybe_unused mtk_iommu_resume ( struct device * dev )
2016-02-22 20:20:50 +03:00
{
struct mtk_iommu_data * data = dev_get_drvdata ( dev ) ;
struct mtk_iommu_suspend_reg * reg = & data - > reg ;
2019-08-24 06:02:02 +03:00
struct mtk_iommu_domain * m4u_dom = data - > m4u_dom ;
2016-02-22 20:20:50 +03:00
void __iomem * base = data - > base ;
2017-08-21 14:00:19 +03:00
int ret ;
2016-02-22 20:20:50 +03:00
2017-08-21 14:00:19 +03:00
ret = clk_prepare_enable ( data - > bclk ) ;
if ( ret ) {
dev_err ( data - > dev , " Failed to enable clk(%d) in resume \n " , ret ) ;
return ret ;
}
2020-07-03 07:41:24 +03:00
writel_relaxed ( reg - > wr_len_ctrl , base + REG_MMU_WR_LEN_CTRL ) ;
2020-07-03 07:41:19 +03:00
writel_relaxed ( reg - > misc_ctrl , base + REG_MMU_MISC_CTRL ) ;
2016-02-22 20:20:50 +03:00
writel_relaxed ( reg - > dcm_dis , base + REG_MMU_DCM_DIS ) ;
writel_relaxed ( reg - > ctrl_reg , base + REG_MMU_CTRL_REG ) ;
writel_relaxed ( reg - > int_control0 , base + REG_MMU_INT_CONTROL0 ) ;
writel_relaxed ( reg - > int_main_control , base + REG_MMU_INT_MAIN_CONTROL ) ;
2018-03-18 04:52:54 +03:00
writel_relaxed ( reg - > ivrp_paddr , base + REG_MMU_IVRP_PADDR ) ;
2019-08-24 06:02:06 +03:00
writel_relaxed ( reg - > vld_pa_rng , base + REG_MMU_VLD_PA_RNG ) ;
2019-08-24 06:02:02 +03:00
if ( m4u_dom )
2019-10-25 21:08:37 +03:00
writel ( m4u_dom - > cfg . arm_v7s_cfg . ttbr & MMU_PT_ADDR_MASK ,
2017-08-21 14:00:16 +03:00
base + REG_MMU_PT_BASE_ADDR ) ;
2016-02-22 20:20:50 +03:00
return 0 ;
}
2017-08-21 14:00:16 +03:00
static const struct dev_pm_ops mtk_iommu_pm_ops = {
2017-08-21 14:00:19 +03:00
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS ( mtk_iommu_suspend , mtk_iommu_resume )
2016-02-22 20:20:50 +03:00
} ;
2019-08-24 06:01:47 +03:00
static const struct mtk_iommu_plat_data mt2712_data = {
. m4u_plat = M4U_MT2712 ,
2020-07-03 07:41:20 +03:00
. flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG ,
2020-07-03 07:41:22 +03:00
. inv_sel_reg = REG_MMU_INV_SEL_GEN1 ,
2020-07-03 07:41:23 +03:00
. larbid_remap = { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } } ,
2019-08-24 06:01:47 +03:00
} ;
static const struct mtk_iommu_plat_data mt8173_data = {
. m4u_plat = M4U_MT8173 ,
2020-07-03 07:41:20 +03:00
. flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI ,
2020-07-03 07:41:22 +03:00
. inv_sel_reg = REG_MMU_INV_SEL_GEN1 ,
2020-07-03 07:41:23 +03:00
. larbid_remap = { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } } , /* Linear mapping. */
2019-08-24 06:01:47 +03:00
} ;
2019-08-24 06:02:02 +03:00
static const struct mtk_iommu_plat_data mt8183_data = {
. m4u_plat = M4U_MT8183 ,
2020-07-03 07:41:20 +03:00
. flags = RESET_AXI ,
2020-07-03 07:41:22 +03:00
. inv_sel_reg = REG_MMU_INV_SEL_GEN1 ,
2020-07-03 07:41:23 +03:00
. larbid_remap = { { 0 } , { 4 } , { 5 } , { 6 } , { 7 } , { 2 } , { 3 } , { 1 } } ,
2019-08-24 06:02:02 +03:00
} ;
2016-02-22 20:20:50 +03:00
static const struct of_device_id mtk_iommu_of_ids [ ] = {
2019-08-24 06:01:47 +03:00
{ . compatible = " mediatek,mt2712-m4u " , . data = & mt2712_data } ,
{ . compatible = " mediatek,mt8173-m4u " , . data = & mt8173_data } ,
2019-08-24 06:02:02 +03:00
{ . compatible = " mediatek,mt8183-m4u " , . data = & mt8183_data } ,
2016-02-22 20:20:50 +03:00
{ }
} ;
static struct platform_driver mtk_iommu_driver = {
. probe = mtk_iommu_probe ,
. remove = mtk_iommu_remove ,
. driver = {
. name = " mtk-iommu " ,
2017-08-21 14:00:16 +03:00
. of_match_table = of_match_ptr ( mtk_iommu_of_ids ) ,
2016-02-22 20:20:50 +03:00
. pm = & mtk_iommu_pm_ops ,
}
} ;
2017-08-21 14:00:16 +03:00
static int __init mtk_iommu_init ( void )
2016-02-22 20:20:50 +03:00
{
int ret ;
ret = platform_driver_register ( & mtk_iommu_driver ) ;
2017-08-21 14:00:16 +03:00
if ( ret ! = 0 )
pr_err ( " Failed to register MTK IOMMU driver \n " ) ;
2016-02-22 20:20:50 +03:00
2017-08-21 14:00:16 +03:00
return ret ;
2016-02-22 20:20:50 +03:00
}
2017-08-21 14:00:16 +03:00
subsys_initcall ( mtk_iommu_init )