2019-05-27 09:55:21 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2016-06-08 12:51:00 +03:00
/*
2018-12-01 22:19:12 +03:00
* IOMMU API for MTK architected m4u v1 implementations
*
2016-06-08 12:51:00 +03:00
* Copyright ( c ) 2015 - 2016 MediaTek Inc .
* Author : Honghui Zhang < honghui . zhang @ mediatek . com >
*
* Based on driver / iommu / mtk_iommu . c
*/
# include <linux/bug.h>
# include <linux/clk.h>
# include <linux/component.h>
# include <linux/device.h>
2017-05-11 14:35:51 +03:00
# include <linux/dma-mapping.h>
2016-06-08 12:51:00 +03:00
# include <linux/err.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/iommu.h>
# include <linux/iopoll.h>
# include <linux/list.h>
2021-03-26 06:23:36 +03:00
# include <linux/module.h>
2016-06-08 12:51:00 +03:00
# include <linux/of_address.h>
# include <linux/of_irq.h>
# include <linux/of_platform.h>
# include <linux/platform_device.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <asm/barrier.h>
# include <asm/dma-iommu.h>
2022-05-03 10:14:18 +03:00
# include <dt-bindings/memory/mtk-memory-port.h>
2016-06-08 12:51:00 +03:00
# include <dt-bindings/memory/mt2701-larb-port.h>
# include <soc/mediatek/smi.h>
# define REG_MMU_PT_BASE_ADDR 0x000
# define F_ALL_INVLD 0x2
# define F_MMU_INV_RANGE 0x1
# define F_INVLD_EN0 BIT(0)
# define F_INVLD_EN1 BIT(1)
# define F_MMU_FAULT_VA_MSK 0xfffff000
# define MTK_PROTECT_PA_ALIGN 128
# define REG_MMU_CTRL_REG 0x210
# define F_MMU_CTRL_COHERENT_EN BIT(8)
# define REG_MMU_IVRP_PADDR 0x214
# define REG_MMU_INT_CONTROL 0x220
# define F_INT_TRANSLATION_FAULT BIT(0)
# define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
# define F_INT_INVALID_PA_FAULT BIT(2)
# define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
# define F_INT_TABLE_WALK_FAULT BIT(4)
# define F_INT_TLB_MISS_FAULT BIT(5)
# define F_INT_PFH_DMA_FIFO_OVERFLOW BIT(6)
# define F_INT_MISS_DMA_FIFO_OVERFLOW BIT(7)
# define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
# define F_INT_CLR_BIT BIT(12)
# define REG_MMU_FAULT_ST 0x224
# define REG_MMU_FAULT_VA 0x228
# define REG_MMU_INVLD_PA 0x22C
# define REG_MMU_INT_ID 0x388
# define REG_MMU_INVALIDATE 0x5c0
# define REG_MMU_INVLD_START_A 0x5c4
# define REG_MMU_INVLD_END_A 0x5c8
# define REG_MMU_INV_SEL 0x5d8
# define REG_MMU_STANDARD_AXI_MODE 0x5e8
# define REG_MMU_DCM 0x5f0
# define F_MMU_DCM_ON BIT(1)
# define REG_MMU_CPE_DONE 0x60c
# define F_DESC_VALID 0x2
# define F_DESC_NONSEC BIT(3)
# define MT2701_M4U_TF_LARB(TF) (6 - (((TF) >> 13) & 0x7))
# define MT2701_M4U_TF_PORT(TF) (((TF) >> 8) & 0xF)
/* MTK generation one iommu HW only support 4K size mapping */
# define MT2701_IOMMU_PAGE_SHIFT 12
# define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT)
2022-05-05 16:27:30 +03:00
# define MT2701_LARB_NR_MAX 3
2016-06-08 12:51:00 +03:00
/*
* MTK m4u support 4 GB iova address space , and only support 4 K page
* mapping . So the pagetable size should be exactly as 4 M .
*/
# define M2701_IOMMU_PGT_SIZE SZ_4M
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_suspend_reg {
2022-05-03 10:14:18 +03:00
u32 standard_axi_mode ;
u32 dcm_dis ;
u32 ctrl_reg ;
u32 int_control0 ;
} ;
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data {
2022-05-03 10:14:17 +03:00
void __iomem * base ;
int irq ;
struct device * dev ;
struct clk * bclk ;
phys_addr_t protect_base ; /* protect memory base */
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_domain * m4u_dom ;
2022-05-03 10:14:17 +03:00
struct iommu_device iommu ;
struct dma_iommu_mapping * mapping ;
struct mtk_smi_larb_iommu larb_imu [ MTK_LARB_NR_MAX ] ;
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_suspend_reg reg ;
2022-05-03 10:14:17 +03:00
} ;
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_domain {
2016-06-08 12:51:00 +03:00
spinlock_t pgtlock ; /* lock for page table */
struct iommu_domain domain ;
u32 * pgt_va ;
dma_addr_t pgt_pa ;
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data ;
2016-06-08 12:51:00 +03:00
} ;
2022-05-03 10:14:19 +03:00
static int mtk_iommu_v1_bind ( struct device * dev )
2022-05-03 10:14:17 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data = dev_get_drvdata ( dev ) ;
2022-05-03 10:14:17 +03:00
return component_bind_all ( dev , & data - > larb_imu ) ;
}
2022-05-03 10:14:19 +03:00
static void mtk_iommu_v1_unbind ( struct device * dev )
2022-05-03 10:14:17 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data = dev_get_drvdata ( dev ) ;
2022-05-03 10:14:17 +03:00
component_unbind_all ( dev , & data - > larb_imu ) ;
}
2022-05-03 10:14:19 +03:00
static struct mtk_iommu_v1_domain * to_mtk_domain ( struct iommu_domain * dom )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
return container_of ( dom , struct mtk_iommu_v1_domain , domain ) ;
2016-06-08 12:51:00 +03:00
}
static const int mt2701_m4u_in_larb [ ] = {
LARB0_PORT_OFFSET , LARB1_PORT_OFFSET ,
LARB2_PORT_OFFSET , LARB3_PORT_OFFSET
} ;
static inline int mt2701_m4u_to_larb ( int id )
{
int i ;
for ( i = ARRAY_SIZE ( mt2701_m4u_in_larb ) - 1 ; i > = 0 ; i - - )
if ( ( id ) > = mt2701_m4u_in_larb [ i ] )
return i ;
return 0 ;
}
static inline int mt2701_m4u_to_port ( int id )
{
int larb = mt2701_m4u_to_larb ( id ) ;
return id - mt2701_m4u_in_larb [ larb ] ;
}
2022-05-03 10:14:19 +03:00
static void mtk_iommu_v1_tlb_flush_all ( struct mtk_iommu_v1_data * data )
2016-06-08 12:51:00 +03:00
{
writel_relaxed ( F_INVLD_EN1 | F_INVLD_EN0 ,
data - > base + REG_MMU_INV_SEL ) ;
writel_relaxed ( F_ALL_INVLD , data - > base + REG_MMU_INVALIDATE ) ;
wmb ( ) ; /* Make sure the tlb flush all done */
}
2022-05-03 10:14:19 +03:00
static void mtk_iommu_v1_tlb_flush_range ( struct mtk_iommu_v1_data * data ,
unsigned long iova , size_t size )
2016-06-08 12:51:00 +03:00
{
int ret ;
u32 tmp ;
writel_relaxed ( F_INVLD_EN1 | F_INVLD_EN0 ,
data - > base + REG_MMU_INV_SEL ) ;
writel_relaxed ( iova & F_MMU_FAULT_VA_MSK ,
data - > base + REG_MMU_INVLD_START_A ) ;
writel_relaxed ( ( iova + size - 1 ) & F_MMU_FAULT_VA_MSK ,
data - > base + REG_MMU_INVLD_END_A ) ;
writel_relaxed ( F_MMU_INV_RANGE , data - > base + REG_MMU_INVALIDATE ) ;
ret = readl_poll_timeout_atomic ( data - > base + REG_MMU_CPE_DONE ,
tmp , tmp ! = 0 , 10 , 100000 ) ;
if ( ret ) {
dev_warn ( data - > dev ,
" Partial TLB flush timed out, falling back to full flush \n " ) ;
2022-05-03 10:14:19 +03:00
mtk_iommu_v1_tlb_flush_all ( data ) ;
2016-06-08 12:51:00 +03:00
}
/* Clear the CPE status */
writel_relaxed ( 0 , data - > base + REG_MMU_CPE_DONE ) ;
}
2022-05-03 10:14:19 +03:00
static irqreturn_t mtk_iommu_v1_isr ( int irq , void * dev_id )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data = dev_id ;
struct mtk_iommu_v1_domain * dom = data - > m4u_dom ;
2016-06-08 12:51:00 +03:00
u32 int_state , regval , fault_iova , fault_pa ;
unsigned int fault_larb , fault_port ;
/* Read error information from registers */
int_state = readl_relaxed ( data - > base + REG_MMU_FAULT_ST ) ;
fault_iova = readl_relaxed ( data - > base + REG_MMU_FAULT_VA ) ;
fault_iova & = F_MMU_FAULT_VA_MSK ;
fault_pa = readl_relaxed ( data - > base + REG_MMU_INVLD_PA ) ;
regval = readl_relaxed ( data - > base + REG_MMU_INT_ID ) ;
fault_larb = MT2701_M4U_TF_LARB ( regval ) ;
fault_port = MT2701_M4U_TF_PORT ( regval ) ;
/*
* MTK v1 iommu HW could not determine whether the fault is read or
* write fault , report as read fault .
*/
if ( report_iommu_fault ( & dom - > domain , data - > dev , fault_iova ,
IOMMU_FAULT_READ ) )
dev_err_ratelimited ( data - > dev ,
" fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d \n " ,
int_state , fault_iova , fault_pa ,
fault_larb , fault_port ) ;
/* Interrupt clear */
regval = readl_relaxed ( data - > base + REG_MMU_INT_CONTROL ) ;
regval | = F_INT_CLR_BIT ;
writel_relaxed ( regval , data - > base + REG_MMU_INT_CONTROL ) ;
2022-05-03 10:14:19 +03:00
mtk_iommu_v1_tlb_flush_all ( data ) ;
2016-06-08 12:51:00 +03:00
return IRQ_HANDLED ;
}
2022-05-03 10:14:19 +03:00
static void mtk_iommu_v1_config ( struct mtk_iommu_v1_data * data ,
struct device * dev , bool enable )
2016-06-08 12:51:00 +03:00
{
struct mtk_smi_larb_iommu * larb_mmu ;
unsigned int larbid , portid ;
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2016-10-17 14:49:21 +03:00
int i ;
2016-06-08 12:51:00 +03:00
2016-10-17 14:49:21 +03:00
for ( i = 0 ; i < fwspec - > num_ids ; + + i ) {
larbid = mt2701_m4u_to_larb ( fwspec - > ids [ i ] ) ;
portid = mt2701_m4u_to_port ( fwspec - > ids [ i ] ) ;
2019-08-24 06:02:08 +03:00
larb_mmu = & data - > larb_imu [ larbid ] ;
2016-06-08 12:51:00 +03:00
dev_dbg ( dev , " %s iommu port: %d \n " ,
enable ? " enable " : " disable " , portid ) ;
if ( enable )
larb_mmu - > mmu | = MTK_SMI_MMU_EN ( portid ) ;
else
larb_mmu - > mmu & = ~ MTK_SMI_MMU_EN ( portid ) ;
}
}
2022-05-03 10:14:19 +03:00
static int mtk_iommu_v1_domain_finalise ( struct mtk_iommu_v1_data * data )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_domain * dom = data - > m4u_dom ;
2016-06-08 12:51:00 +03:00
spin_lock_init ( & dom - > pgtlock ) ;
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-01-04 11:23:09 +03:00
dom - > pgt_va = dma_alloc_coherent ( data - > dev , M2701_IOMMU_PGT_SIZE ,
& dom - > pgt_pa , GFP_KERNEL ) ;
2016-06-08 12:51:00 +03:00
if ( ! dom - > pgt_va )
return - ENOMEM ;
writel ( dom - > pgt_pa , data - > base + REG_MMU_PT_BASE_ADDR ) ;
dom - > data = data ;
return 0 ;
}
2022-05-03 10:14:19 +03:00
static struct iommu_domain * mtk_iommu_v1_domain_alloc ( unsigned type )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_domain * dom ;
2016-06-08 12:51:00 +03:00
if ( type ! = IOMMU_DOMAIN_UNMANAGED )
return NULL ;
dom = kzalloc ( sizeof ( * dom ) , GFP_KERNEL ) ;
if ( ! dom )
return NULL ;
return & dom - > domain ;
}
2022-05-03 10:14:19 +03:00
static void mtk_iommu_v1_domain_free ( struct iommu_domain * domain )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_domain * dom = to_mtk_domain ( domain ) ;
struct mtk_iommu_v1_data * data = dom - > data ;
2016-06-08 12:51:00 +03:00
dma_free_coherent ( data - > dev , M2701_IOMMU_PGT_SIZE ,
dom - > pgt_va , dom - > pgt_pa ) ;
kfree ( to_mtk_domain ( domain ) ) ;
}
2022-05-03 10:14:19 +03:00
static int mtk_iommu_v1_attach_device ( struct iommu_domain * domain , struct device * dev )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data = dev_iommu_priv_get ( dev ) ;
struct mtk_iommu_v1_domain * dom = to_mtk_domain ( domain ) ;
2020-05-15 11:08:43 +03:00
struct dma_iommu_mapping * mtk_mapping ;
2016-06-08 12:51:00 +03:00
int ret ;
2020-05-15 11:08:43 +03:00
/* Only allow the domain created internally. */
2020-06-25 16:08:31 +03:00
mtk_mapping = data - > mapping ;
2020-05-15 11:08:43 +03:00
if ( mtk_mapping - > domain ! = domain )
return 0 ;
2016-06-08 12:51:00 +03:00
if ( ! data - > m4u_dom ) {
data - > m4u_dom = dom ;
2022-05-03 10:14:19 +03:00
ret = mtk_iommu_v1_domain_finalise ( data ) ;
2016-06-08 12:51:00 +03:00
if ( ret ) {
data - > m4u_dom = NULL ;
return ret ;
}
}
2022-05-03 10:14:19 +03:00
mtk_iommu_v1_config ( data , dev , true ) ;
2016-06-08 12:51:00 +03:00
return 0 ;
}
2022-05-03 10:14:19 +03:00
static void mtk_iommu_v1_detach_device ( struct iommu_domain * domain , struct device * dev )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data = dev_iommu_priv_get ( dev ) ;
2016-06-08 12:51:00 +03:00
2022-05-03 10:14:19 +03:00
mtk_iommu_v1_config ( data , dev , false ) ;
2016-06-08 12:51:00 +03:00
}
2022-05-03 10:14:19 +03:00
static int mtk_iommu_v1_map ( struct iommu_domain * domain , unsigned long iova ,
phys_addr_t paddr , size_t size , int prot , gfp_t gfp )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_domain * dom = to_mtk_domain ( domain ) ;
2016-06-08 12:51:00 +03:00
unsigned int page_num = size > > MT2701_IOMMU_PAGE_SHIFT ;
unsigned long flags ;
unsigned int i ;
u32 * pgt_base_iova = dom - > pgt_va + ( iova > > MT2701_IOMMU_PAGE_SHIFT ) ;
u32 pabase = ( u32 ) paddr ;
int map_size = 0 ;
spin_lock_irqsave ( & dom - > pgtlock , flags ) ;
for ( i = 0 ; i < page_num ; i + + ) {
if ( pgt_base_iova [ i ] ) {
memset ( pgt_base_iova , 0 , i * sizeof ( u32 ) ) ;
break ;
}
pgt_base_iova [ i ] = pabase | F_DESC_VALID | F_DESC_NONSEC ;
pabase + = MT2701_IOMMU_PAGE_SIZE ;
map_size + = MT2701_IOMMU_PAGE_SIZE ;
}
spin_unlock_irqrestore ( & dom - > pgtlock , flags ) ;
2022-05-03 10:14:19 +03:00
mtk_iommu_v1_tlb_flush_range ( dom - > data , iova , size ) ;
2016-06-08 12:51:00 +03:00
return map_size = = size ? 0 : - EEXIST ;
}
2022-05-03 10:14:19 +03:00
static size_t mtk_iommu_v1_unmap ( struct iommu_domain * domain , unsigned long iova ,
size_t size , struct iommu_iotlb_gather * gather )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_domain * dom = to_mtk_domain ( domain ) ;
2016-06-08 12:51:00 +03:00
unsigned long flags ;
u32 * pgt_base_iova = dom - > pgt_va + ( iova > > MT2701_IOMMU_PAGE_SHIFT ) ;
unsigned int page_num = size > > MT2701_IOMMU_PAGE_SHIFT ;
spin_lock_irqsave ( & dom - > pgtlock , flags ) ;
memset ( pgt_base_iova , 0 , page_num * sizeof ( u32 ) ) ;
spin_unlock_irqrestore ( & dom - > pgtlock , flags ) ;
2022-05-03 10:14:19 +03:00
mtk_iommu_v1_tlb_flush_range ( dom - > data , iova , size ) ;
2016-06-08 12:51:00 +03:00
return size ;
}
2022-05-03 10:14:19 +03:00
static phys_addr_t mtk_iommu_v1_iova_to_phys ( struct iommu_domain * domain , dma_addr_t iova )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_domain * dom = to_mtk_domain ( domain ) ;
2016-06-08 12:51:00 +03:00
unsigned long flags ;
phys_addr_t pa ;
spin_lock_irqsave ( & dom - > pgtlock , flags ) ;
pa = * ( dom - > pgt_va + ( iova > > MT2701_IOMMU_PAGE_SHIFT ) ) ;
pa = pa & ( ~ ( MT2701_IOMMU_PAGE_SIZE - 1 ) ) ;
spin_unlock_irqrestore ( & dom - > pgtlock , flags ) ;
return pa ;
}
2022-05-03 10:14:19 +03:00
static const struct iommu_ops mtk_iommu_v1_ops ;
2016-10-17 14:49:21 +03:00
2016-06-08 12:51:00 +03:00
/*
* MTK generation one iommu HW only support one iommu domain , and all the client
* sharing the same iova address space .
*/
2022-05-03 10:14:19 +03:00
static int mtk_iommu_v1_create_mapping ( struct device * dev , struct of_phandle_args * args )
2016-06-08 12:51:00 +03:00
{
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data ;
2016-06-08 12:51:00 +03:00
struct platform_device * m4updev ;
struct dma_iommu_mapping * mtk_mapping ;
int ret ;
if ( args - > args_count ! = 1 ) {
dev_err ( dev , " invalid #iommu-cells(%d) property for IOMMU \n " ,
args - > args_count ) ;
return - EINVAL ;
}
2018-11-29 16:01:00 +03:00
if ( ! fwspec ) {
2022-05-03 10:14:19 +03:00
ret = iommu_fwspec_init ( dev , & args - > np - > fwnode , & mtk_iommu_v1_ops ) ;
2016-10-17 14:49:21 +03:00
if ( ret )
return ret ;
2018-11-29 16:01:00 +03:00
fwspec = dev_iommu_fwspec_get ( dev ) ;
2022-05-03 10:14:19 +03:00
} else if ( dev_iommu_fwspec_get ( dev ) - > ops ! = & mtk_iommu_v1_ops ) {
2016-10-17 14:49:21 +03:00
return - EINVAL ;
}
2020-03-26 18:08:38 +03:00
if ( ! dev_iommu_priv_get ( dev ) ) {
2016-06-08 12:51:00 +03:00
/* Get the m4u device */
m4updev = of_find_device_by_node ( args - > np ) ;
if ( WARN_ON ( ! m4updev ) )
return - EINVAL ;
2020-03-26 18:08:38 +03:00
dev_iommu_priv_set ( dev , platform_get_drvdata ( m4updev ) ) ;
2016-06-08 12:51:00 +03:00
}
2016-10-17 14:49:21 +03:00
ret = iommu_fwspec_add_ids ( dev , args - > args , 1 ) ;
if ( ret )
return ret ;
2016-06-08 12:51:00 +03:00
2020-03-26 18:08:38 +03:00
data = dev_iommu_priv_get ( dev ) ;
2020-06-25 16:08:31 +03:00
mtk_mapping = data - > mapping ;
2016-06-08 12:51:00 +03:00
if ( ! mtk_mapping ) {
/* MTK iommu support 4GB iova address space. */
mtk_mapping = arm_iommu_create_mapping ( & platform_bus_type ,
0 , 1ULL < < 32 ) ;
2016-10-17 14:49:21 +03:00
if ( IS_ERR ( mtk_mapping ) )
return PTR_ERR ( mtk_mapping ) ;
2020-06-25 16:08:31 +03:00
data - > mapping = mtk_mapping ;
2016-06-08 12:51:00 +03:00
}
return 0 ;
}
2022-05-03 10:14:19 +03:00
static int mtk_iommu_v1_def_domain_type ( struct device * dev )
2020-05-15 11:08:43 +03:00
{
return IOMMU_DOMAIN_UNMANAGED ;
}
2022-05-03 10:14:19 +03:00
static struct iommu_device * mtk_iommu_v1_probe_device ( struct device * dev )
2016-06-08 12:51:00 +03:00
{
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2016-06-08 12:51:00 +03:00
struct of_phandle_args iommu_spec ;
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data ;
media: iommu/mediatek: Add device_link between the consumer and the larb devices
MediaTek IOMMU-SMI diagram is like below. all the consumer connect with
smi-larb, then connect with smi-common.
M4U
|
smi-common
|
-------------
| | ...
| |
larb1 larb2
| |
vdec venc
When the consumer works, it should enable the smi-larb's power which
also need enable the smi-common's power firstly.
Thus, First of all, use the device link connect the consumer and the
smi-larbs. then add device link between the smi-larb and smi-common.
This patch adds device_link between the consumer and the larbs.
When device_link_add, I add the flag DL_FLAG_STATELESS to avoid calling
pm_runtime_xx to keep the original status of clocks. It can avoid two
issues:
1) Display HW show fastlogo abnormally reported in [1]. At the beggining,
all the clocks are enabled before entering kernel, but the clocks for
display HW(always in larb0) will be gated after clk_enable and clk_disable
called from device_link_add(->pm_runtime_resume) and rpm_idle. The clock
operation happened before display driver probe. At that time, the display
HW will be abnormal.
2) A deadlock issue reported in [2]. Use DL_FLAG_STATELESS to skip
pm_runtime_xx to avoid the deadlock.
Corresponding, DL_FLAG_AUTOREMOVE_CONSUMER can't be added, then
device_link_removed should be added explicitly.
Meanwhile, Currently we don't have a device connect with 2 larbs at the
same time. Disallow this case, print the error log.
[1] https://lore.kernel.org/linux-mediatek/1564213888.22908.4.camel@mhfsdcap03/
[2] https://lore.kernel.org/patchwork/patch/1086569/
Suggested-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Yong Wu <yong.wu@mediatek.com>
Tested-by: Frank Wunderlich <frank-w@public-files.de> # BPI-R2/MT7623
Acked-by: Joerg Roedel <jroedel@suse.de>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
2022-01-17 10:05:02 +03:00
int err , idx = 0 , larbid , larbidx ;
struct device_link * link ;
struct device * larbdev ;
2016-06-08 12:51:00 +03:00
2022-01-17 10:04:59 +03:00
/*
* In the deferred case , free the existed fwspec .
* Always initialize the fwspec internally .
*/
if ( fwspec ) {
iommu_fwspec_free ( dev ) ;
fwspec = dev_iommu_fwspec_get ( dev ) ;
}
2021-04-12 09:48:42 +03:00
while ( ! of_parse_phandle_with_args ( dev - > of_node , " iommus " ,
" #iommu-cells " ,
idx , & iommu_spec ) ) {
2016-06-08 12:51:00 +03:00
2022-05-03 10:14:19 +03:00
err = mtk_iommu_v1_create_mapping ( dev , & iommu_spec ) ;
2021-04-12 09:48:42 +03:00
of_node_put ( iommu_spec . np ) ;
if ( err )
return ERR_PTR ( err ) ;
2019-01-23 10:46:29 +03:00
/* dev->iommu_fwspec might have changed */
fwspec = dev_iommu_fwspec_get ( dev ) ;
2021-04-12 09:48:42 +03:00
idx + + ;
2016-06-08 12:51:00 +03:00
}
2022-05-03 10:14:19 +03:00
if ( ! fwspec | | fwspec - > ops ! = & mtk_iommu_v1_ops )
2020-04-29 16:37:01 +03:00
return ERR_PTR ( - ENODEV ) ; /* Not a iommu client device */
2016-06-08 12:51:00 +03:00
2020-04-29 16:37:01 +03:00
data = dev_iommu_priv_get ( dev ) ;
2016-06-08 12:51:00 +03:00
media: iommu/mediatek: Add device_link between the consumer and the larb devices
MediaTek IOMMU-SMI diagram is like below. all the consumer connect with
smi-larb, then connect with smi-common.
M4U
|
smi-common
|
-------------
| | ...
| |
larb1 larb2
| |
vdec venc
When the consumer works, it should enable the smi-larb's power which
also need enable the smi-common's power firstly.
Thus, First of all, use the device link connect the consumer and the
smi-larbs. then add device link between the smi-larb and smi-common.
This patch adds device_link between the consumer and the larbs.
When device_link_add, I add the flag DL_FLAG_STATELESS to avoid calling
pm_runtime_xx to keep the original status of clocks. It can avoid two
issues:
1) Display HW show fastlogo abnormally reported in [1]. At the beggining,
all the clocks are enabled before entering kernel, but the clocks for
display HW(always in larb0) will be gated after clk_enable and clk_disable
called from device_link_add(->pm_runtime_resume) and rpm_idle. The clock
operation happened before display driver probe. At that time, the display
HW will be abnormal.
2) A deadlock issue reported in [2]. Use DL_FLAG_STATELESS to skip
pm_runtime_xx to avoid the deadlock.
Corresponding, DL_FLAG_AUTOREMOVE_CONSUMER can't be added, then
device_link_removed should be added explicitly.
Meanwhile, Currently we don't have a device connect with 2 larbs at the
same time. Disallow this case, print the error log.
[1] https://lore.kernel.org/linux-mediatek/1564213888.22908.4.camel@mhfsdcap03/
[2] https://lore.kernel.org/patchwork/patch/1086569/
Suggested-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Yong Wu <yong.wu@mediatek.com>
Tested-by: Frank Wunderlich <frank-w@public-files.de> # BPI-R2/MT7623
Acked-by: Joerg Roedel <jroedel@suse.de>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
2022-01-17 10:05:02 +03:00
/* Link the consumer device with the smi-larb device(supplier) */
larbid = mt2701_m4u_to_larb ( fwspec - > ids [ 0 ] ) ;
2022-05-05 16:27:30 +03:00
if ( larbid > = MT2701_LARB_NR_MAX )
return ERR_PTR ( - EINVAL ) ;
media: iommu/mediatek: Add device_link between the consumer and the larb devices
MediaTek IOMMU-SMI diagram is like below. all the consumer connect with
smi-larb, then connect with smi-common.
M4U
|
smi-common
|
-------------
| | ...
| |
larb1 larb2
| |
vdec venc
When the consumer works, it should enable the smi-larb's power which
also need enable the smi-common's power firstly.
Thus, First of all, use the device link connect the consumer and the
smi-larbs. then add device link between the smi-larb and smi-common.
This patch adds device_link between the consumer and the larbs.
When device_link_add, I add the flag DL_FLAG_STATELESS to avoid calling
pm_runtime_xx to keep the original status of clocks. It can avoid two
issues:
1) Display HW show fastlogo abnormally reported in [1]. At the beggining,
all the clocks are enabled before entering kernel, but the clocks for
display HW(always in larb0) will be gated after clk_enable and clk_disable
called from device_link_add(->pm_runtime_resume) and rpm_idle. The clock
operation happened before display driver probe. At that time, the display
HW will be abnormal.
2) A deadlock issue reported in [2]. Use DL_FLAG_STATELESS to skip
pm_runtime_xx to avoid the deadlock.
Corresponding, DL_FLAG_AUTOREMOVE_CONSUMER can't be added, then
device_link_removed should be added explicitly.
Meanwhile, Currently we don't have a device connect with 2 larbs at the
same time. Disallow this case, print the error log.
[1] https://lore.kernel.org/linux-mediatek/1564213888.22908.4.camel@mhfsdcap03/
[2] https://lore.kernel.org/patchwork/patch/1086569/
Suggested-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Yong Wu <yong.wu@mediatek.com>
Tested-by: Frank Wunderlich <frank-w@public-files.de> # BPI-R2/MT7623
Acked-by: Joerg Roedel <jroedel@suse.de>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
2022-01-17 10:05:02 +03:00
for ( idx = 1 ; idx < fwspec - > num_ids ; idx + + ) {
larbidx = mt2701_m4u_to_larb ( fwspec - > ids [ idx ] ) ;
if ( larbid ! = larbidx ) {
dev_err ( dev , " Can only use one larb. Fail@larb%d-%d. \n " ,
larbid , larbidx ) ;
return ERR_PTR ( - EINVAL ) ;
}
}
larbdev = data - > larb_imu [ larbid ] . dev ;
2022-05-05 16:27:30 +03:00
if ( ! larbdev )
return ERR_PTR ( - EINVAL ) ;
media: iommu/mediatek: Add device_link between the consumer and the larb devices
MediaTek IOMMU-SMI diagram is like below. all the consumer connect with
smi-larb, then connect with smi-common.
M4U
|
smi-common
|
-------------
| | ...
| |
larb1 larb2
| |
vdec venc
When the consumer works, it should enable the smi-larb's power which
also need enable the smi-common's power firstly.
Thus, First of all, use the device link connect the consumer and the
smi-larbs. then add device link between the smi-larb and smi-common.
This patch adds device_link between the consumer and the larbs.
When device_link_add, I add the flag DL_FLAG_STATELESS to avoid calling
pm_runtime_xx to keep the original status of clocks. It can avoid two
issues:
1) Display HW show fastlogo abnormally reported in [1]. At the beggining,
all the clocks are enabled before entering kernel, but the clocks for
display HW(always in larb0) will be gated after clk_enable and clk_disable
called from device_link_add(->pm_runtime_resume) and rpm_idle. The clock
operation happened before display driver probe. At that time, the display
HW will be abnormal.
2) A deadlock issue reported in [2]. Use DL_FLAG_STATELESS to skip
pm_runtime_xx to avoid the deadlock.
Corresponding, DL_FLAG_AUTOREMOVE_CONSUMER can't be added, then
device_link_removed should be added explicitly.
Meanwhile, Currently we don't have a device connect with 2 larbs at the
same time. Disallow this case, print the error log.
[1] https://lore.kernel.org/linux-mediatek/1564213888.22908.4.camel@mhfsdcap03/
[2] https://lore.kernel.org/patchwork/patch/1086569/
Suggested-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Yong Wu <yong.wu@mediatek.com>
Tested-by: Frank Wunderlich <frank-w@public-files.de> # BPI-R2/MT7623
Acked-by: Joerg Roedel <jroedel@suse.de>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
2022-01-17 10:05:02 +03:00
link = device_link_add ( dev , larbdev ,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS ) ;
if ( ! link )
dev_err ( dev , " Unable to link %s \n " , dev_name ( larbdev ) ) ;
2020-04-29 16:37:01 +03:00
return & data - > iommu ;
}
2018-01-26 10:11:28 +03:00
2022-05-03 10:14:19 +03:00
static void mtk_iommu_v1_probe_finalize ( struct device * dev )
2020-04-29 16:37:01 +03:00
{
struct dma_iommu_mapping * mtk_mapping ;
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data ;
2020-04-29 16:37:01 +03:00
int err ;
data = dev_iommu_priv_get ( dev ) ;
2020-06-25 16:08:31 +03:00
mtk_mapping = data - > mapping ;
2018-01-26 10:11:28 +03:00
2020-04-29 16:37:01 +03:00
err = arm_iommu_attach_device ( dev , mtk_mapping ) ;
if ( err )
dev_err ( dev , " Can't create IOMMU mapping - DMA-OPS will not work \n " ) ;
2016-06-08 12:51:00 +03:00
}
2022-05-03 10:14:19 +03:00
static void mtk_iommu_v1_release_device ( struct device * dev )
2016-06-08 12:51:00 +03:00
{
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data ;
media: iommu/mediatek: Add device_link between the consumer and the larb devices
MediaTek IOMMU-SMI diagram is like below. all the consumer connect with
smi-larb, then connect with smi-common.
M4U
|
smi-common
|
-------------
| | ...
| |
larb1 larb2
| |
vdec venc
When the consumer works, it should enable the smi-larb's power which
also need enable the smi-common's power firstly.
Thus, First of all, use the device link connect the consumer and the
smi-larbs. then add device link between the smi-larb and smi-common.
This patch adds device_link between the consumer and the larbs.
When device_link_add, I add the flag DL_FLAG_STATELESS to avoid calling
pm_runtime_xx to keep the original status of clocks. It can avoid two
issues:
1) Display HW show fastlogo abnormally reported in [1]. At the beggining,
all the clocks are enabled before entering kernel, but the clocks for
display HW(always in larb0) will be gated after clk_enable and clk_disable
called from device_link_add(->pm_runtime_resume) and rpm_idle. The clock
operation happened before display driver probe. At that time, the display
HW will be abnormal.
2) A deadlock issue reported in [2]. Use DL_FLAG_STATELESS to skip
pm_runtime_xx to avoid the deadlock.
Corresponding, DL_FLAG_AUTOREMOVE_CONSUMER can't be added, then
device_link_removed should be added explicitly.
Meanwhile, Currently we don't have a device connect with 2 larbs at the
same time. Disallow this case, print the error log.
[1] https://lore.kernel.org/linux-mediatek/1564213888.22908.4.camel@mhfsdcap03/
[2] https://lore.kernel.org/patchwork/patch/1086569/
Suggested-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Yong Wu <yong.wu@mediatek.com>
Tested-by: Frank Wunderlich <frank-w@public-files.de> # BPI-R2/MT7623
Acked-by: Joerg Roedel <jroedel@suse.de>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
2022-01-17 10:05:02 +03:00
struct device * larbdev ;
unsigned int larbid ;
2017-03-31 16:12:31 +03:00
media: iommu/mediatek: Add device_link between the consumer and the larb devices
MediaTek IOMMU-SMI diagram is like below. all the consumer connect with
smi-larb, then connect with smi-common.
M4U
|
smi-common
|
-------------
| | ...
| |
larb1 larb2
| |
vdec venc
When the consumer works, it should enable the smi-larb's power which
also need enable the smi-common's power firstly.
Thus, First of all, use the device link connect the consumer and the
smi-larbs. then add device link between the smi-larb and smi-common.
This patch adds device_link between the consumer and the larbs.
When device_link_add, I add the flag DL_FLAG_STATELESS to avoid calling
pm_runtime_xx to keep the original status of clocks. It can avoid two
issues:
1) Display HW show fastlogo abnormally reported in [1]. At the beggining,
all the clocks are enabled before entering kernel, but the clocks for
display HW(always in larb0) will be gated after clk_enable and clk_disable
called from device_link_add(->pm_runtime_resume) and rpm_idle. The clock
operation happened before display driver probe. At that time, the display
HW will be abnormal.
2) A deadlock issue reported in [2]. Use DL_FLAG_STATELESS to skip
pm_runtime_xx to avoid the deadlock.
Corresponding, DL_FLAG_AUTOREMOVE_CONSUMER can't be added, then
device_link_removed should be added explicitly.
Meanwhile, Currently we don't have a device connect with 2 larbs at the
same time. Disallow this case, print the error log.
[1] https://lore.kernel.org/linux-mediatek/1564213888.22908.4.camel@mhfsdcap03/
[2] https://lore.kernel.org/patchwork/patch/1086569/
Suggested-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Yong Wu <yong.wu@mediatek.com>
Tested-by: Frank Wunderlich <frank-w@public-files.de> # BPI-R2/MT7623
Acked-by: Joerg Roedel <jroedel@suse.de>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
2022-01-17 10:05:02 +03:00
data = dev_iommu_priv_get ( dev ) ;
larbid = mt2701_m4u_to_larb ( fwspec - > ids [ 0 ] ) ;
larbdev = data - > larb_imu [ larbid ] . dev ;
device_link_remove ( dev , larbdev ) ;
2016-06-08 12:51:00 +03:00
}
2022-05-03 10:14:19 +03:00
static int mtk_iommu_v1_hw_init ( const struct mtk_iommu_v1_data * data )
2016-06-08 12:51:00 +03:00
{
u32 regval ;
int ret ;
ret = clk_prepare_enable ( data - > bclk ) ;
if ( ret ) {
dev_err ( data - > dev , " Failed to enable iommu bclk(%d) \n " , ret ) ;
return ret ;
}
regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL ( 2 ) ;
writel_relaxed ( regval , data - > base + REG_MMU_CTRL_REG ) ;
regval = F_INT_TRANSLATION_FAULT |
F_INT_MAIN_MULTI_HIT_FAULT |
F_INT_INVALID_PA_FAULT |
F_INT_ENTRY_REPLACEMENT_FAULT |
F_INT_TABLE_WALK_FAULT |
F_INT_TLB_MISS_FAULT |
F_INT_PFH_DMA_FIFO_OVERFLOW |
F_INT_MISS_DMA_FIFO_OVERFLOW ;
writel_relaxed ( regval , data - > base + REG_MMU_INT_CONTROL ) ;
/* protect memory,hw will write here while translation fault */
writel_relaxed ( data - > protect_base ,
data - > base + REG_MMU_IVRP_PADDR ) ;
writel_relaxed ( F_MMU_DCM_ON , data - > base + REG_MMU_DCM ) ;
2022-05-03 10:14:19 +03:00
if ( devm_request_irq ( data - > dev , data - > irq , mtk_iommu_v1_isr , 0 ,
2016-06-08 12:51:00 +03:00
dev_name ( data - > dev ) , ( void * ) data ) ) {
writel_relaxed ( 0 , data - > base + REG_MMU_PT_BASE_ADDR ) ;
clk_disable_unprepare ( data - > bclk ) ;
dev_err ( data - > dev , " Failed @ IRQ-%d Request \n " , data - > irq ) ;
return - ENODEV ;
}
return 0 ;
}
2022-05-03 10:14:19 +03:00
static const struct iommu_ops mtk_iommu_v1_ops = {
. domain_alloc = mtk_iommu_v1_domain_alloc ,
. probe_device = mtk_iommu_v1_probe_device ,
. probe_finalize = mtk_iommu_v1_probe_finalize ,
. release_device = mtk_iommu_v1_release_device ,
. def_domain_type = mtk_iommu_v1_def_domain_type ,
2020-04-29 16:37:01 +03:00
. device_group = generic_device_group ,
2016-06-08 12:51:00 +03:00
. pgsize_bitmap = ~ 0UL < < MT2701_IOMMU_PAGE_SHIFT ,
2021-03-26 06:23:36 +03:00
. owner = THIS_MODULE ,
2022-02-16 05:52:49 +03:00
. default_domain_ops = & ( const struct iommu_domain_ops ) {
2022-05-03 10:14:19 +03:00
. attach_dev = mtk_iommu_v1_attach_device ,
. detach_dev = mtk_iommu_v1_detach_device ,
. map = mtk_iommu_v1_map ,
. unmap = mtk_iommu_v1_unmap ,
. iova_to_phys = mtk_iommu_v1_iova_to_phys ,
. free = mtk_iommu_v1_domain_free ,
2022-02-16 05:52:49 +03:00
}
2016-06-08 12:51:00 +03:00
} ;
2022-05-03 10:14:19 +03:00
static const struct of_device_id mtk_iommu_v1_of_ids [ ] = {
2016-06-08 12:51:00 +03:00
{ . compatible = " mediatek,mt2701-m4u " , } ,
{ }
} ;
2022-05-03 10:14:19 +03:00
static const struct component_master_ops mtk_iommu_v1_com_ops = {
. bind = mtk_iommu_v1_bind ,
. unbind = mtk_iommu_v1_unbind ,
2016-06-08 12:51:00 +03:00
} ;
2022-05-03 10:14:19 +03:00
static int mtk_iommu_v1_probe ( struct platform_device * pdev )
2016-06-08 12:51:00 +03:00
{
struct device * dev = & pdev - > dev ;
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data ;
2016-06-08 12:51:00 +03:00
struct resource * res ;
struct component_match * match = NULL ;
void * protect ;
2021-04-12 09:48:42 +03:00
int larb_nr , ret , i ;
2016-06-08 12:51:00 +03:00
data = devm_kzalloc ( dev , sizeof ( * data ) , GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
data - > dev = dev ;
/* Protect memory. HW will access here while translation fault.*/
protect = devm_kzalloc ( dev , MTK_PROTECT_PA_ALIGN * 2 ,
GFP_KERNEL | GFP_DMA ) ;
if ( ! protect )
return - ENOMEM ;
data - > protect_base = ALIGN ( virt_to_phys ( protect ) , MTK_PROTECT_PA_ALIGN ) ;
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
data - > base = devm_ioremap_resource ( dev , res ) ;
if ( IS_ERR ( data - > base ) )
return PTR_ERR ( data - > base ) ;
data - > irq = platform_get_irq ( pdev , 0 ) ;
if ( data - > irq < 0 )
return data - > irq ;
data - > bclk = devm_clk_get ( dev , " bclk " ) ;
if ( IS_ERR ( data - > bclk ) )
return PTR_ERR ( data - > bclk ) ;
2021-04-12 09:48:42 +03:00
larb_nr = of_count_phandle_with_args ( dev - > of_node ,
" mediatek,larbs " , NULL ) ;
if ( larb_nr < 0 )
return larb_nr ;
for ( i = 0 ; i < larb_nr ; i + + ) {
struct device_node * larbnode ;
2016-06-08 12:51:00 +03:00
struct platform_device * plarbdev ;
2021-04-12 09:48:42 +03:00
larbnode = of_parse_phandle ( dev - > of_node , " mediatek,larbs " , i ) ;
if ( ! larbnode )
return - EINVAL ;
2016-06-08 12:51:00 +03:00
2021-04-12 09:48:42 +03:00
if ( ! of_device_is_available ( larbnode ) ) {
of_node_put ( larbnode ) ;
2016-06-08 12:51:00 +03:00
continue ;
2021-04-12 09:48:42 +03:00
}
2016-06-08 12:51:00 +03:00
2021-04-12 09:48:42 +03:00
plarbdev = of_find_device_by_node ( larbnode ) ;
2016-06-08 12:51:00 +03:00
if ( ! plarbdev ) {
2021-04-12 09:48:42 +03:00
of_node_put ( larbnode ) ;
2022-01-17 10:05:00 +03:00
return - ENODEV ;
2016-06-08 12:51:00 +03:00
}
2022-01-17 10:05:01 +03:00
if ( ! plarbdev - > dev . driver ) {
of_node_put ( larbnode ) ;
return - EPROBE_DEFER ;
}
2021-04-12 09:48:42 +03:00
data - > larb_imu [ i ] . dev = & plarbdev - > dev ;
2016-06-08 12:51:00 +03:00
2022-02-14 09:08:15 +03:00
component_match_add_release ( dev , & match , component_release_of ,
component_compare_of , larbnode ) ;
2016-06-08 12:51:00 +03:00
}
platform_set_drvdata ( pdev , data ) ;
2022-05-03 10:14:19 +03:00
ret = mtk_iommu_v1_hw_init ( data ) ;
2016-06-08 12:51:00 +03:00
if ( ret )
return ret ;
2017-03-31 16:12:31 +03:00
ret = iommu_device_sysfs_add ( & data - > iommu , & pdev - > dev , NULL ,
dev_name ( & pdev - > dev ) ) ;
if ( ret )
return ret ;
2022-05-03 10:14:19 +03:00
ret = iommu_device_register ( & data - > iommu , & mtk_iommu_v1_ops , dev ) ;
2017-03-31 16:12:31 +03:00
if ( ret )
2021-04-12 09:48:43 +03:00
goto out_sysfs_remove ;
2017-03-31 16:12:31 +03:00
2022-05-03 10:14:19 +03:00
ret = component_master_add_with_match ( dev , & mtk_iommu_v1_com_ops , match ) ;
2021-04-12 09:48:43 +03:00
if ( ret )
2022-08-15 19:20:13 +03:00
goto out_dev_unreg ;
2021-04-12 09:48:43 +03:00
return ret ;
out_dev_unreg :
iommu_device_unregister ( & data - > iommu ) ;
out_sysfs_remove :
iommu_device_sysfs_remove ( & data - > iommu ) ;
return ret ;
2016-06-08 12:51:00 +03:00
}
2022-05-03 10:14:19 +03:00
static int mtk_iommu_v1_remove ( struct platform_device * pdev )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data = platform_get_drvdata ( pdev ) ;
2016-06-08 12:51:00 +03:00
2017-03-31 16:12:31 +03:00
iommu_device_sysfs_remove ( & data - > iommu ) ;
iommu_device_unregister ( & data - > iommu ) ;
2016-06-08 12:51:00 +03:00
clk_disable_unprepare ( data - > bclk ) ;
devm_free_irq ( & pdev - > dev , data - > irq , data ) ;
2022-05-03 10:14:19 +03:00
component_master_del ( & pdev - > dev , & mtk_iommu_v1_com_ops ) ;
2016-06-08 12:51:00 +03:00
return 0 ;
}
2022-05-03 10:14:19 +03:00
static int __maybe_unused mtk_iommu_v1_suspend ( struct device * dev )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data = dev_get_drvdata ( dev ) ;
struct mtk_iommu_v1_suspend_reg * reg = & data - > reg ;
2016-06-08 12:51:00 +03:00
void __iomem * base = data - > base ;
reg - > standard_axi_mode = readl_relaxed ( base +
REG_MMU_STANDARD_AXI_MODE ) ;
reg - > dcm_dis = readl_relaxed ( base + REG_MMU_DCM ) ;
reg - > ctrl_reg = readl_relaxed ( base + REG_MMU_CTRL_REG ) ;
reg - > int_control0 = readl_relaxed ( base + REG_MMU_INT_CONTROL ) ;
return 0 ;
}
2022-05-03 10:14:19 +03:00
static int __maybe_unused mtk_iommu_v1_resume ( struct device * dev )
2016-06-08 12:51:00 +03:00
{
2022-05-03 10:14:19 +03:00
struct mtk_iommu_v1_data * data = dev_get_drvdata ( dev ) ;
struct mtk_iommu_v1_suspend_reg * reg = & data - > reg ;
2016-06-08 12:51:00 +03:00
void __iomem * base = data - > base ;
writel_relaxed ( data - > m4u_dom - > pgt_pa , base + REG_MMU_PT_BASE_ADDR ) ;
writel_relaxed ( reg - > standard_axi_mode ,
base + REG_MMU_STANDARD_AXI_MODE ) ;
writel_relaxed ( reg - > dcm_dis , base + REG_MMU_DCM ) ;
writel_relaxed ( reg - > ctrl_reg , base + REG_MMU_CTRL_REG ) ;
writel_relaxed ( reg - > int_control0 , base + REG_MMU_INT_CONTROL ) ;
writel_relaxed ( data - > protect_base , base + REG_MMU_IVRP_PADDR ) ;
return 0 ;
}
2022-05-03 10:14:19 +03:00
static const struct dev_pm_ops mtk_iommu_v1_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS ( mtk_iommu_v1_suspend , mtk_iommu_v1_resume )
2016-06-08 12:51:00 +03:00
} ;
2022-05-03 10:14:19 +03:00
static struct platform_driver mtk_iommu_v1_driver = {
. probe = mtk_iommu_v1_probe ,
. remove = mtk_iommu_v1_remove ,
2016-06-08 12:51:00 +03:00
. driver = {
2017-10-30 14:37:55 +03:00
. name = " mtk-iommu-v1 " ,
2022-05-03 10:14:19 +03:00
. of_match_table = mtk_iommu_v1_of_ids ,
. pm = & mtk_iommu_v1_pm_ops ,
2016-06-08 12:51:00 +03:00
}
} ;
2022-05-03 10:14:19 +03:00
module_platform_driver ( mtk_iommu_v1_driver ) ;
2016-06-08 12:51:00 +03:00
2021-03-26 06:23:36 +03:00
MODULE_DESCRIPTION ( " IOMMU API for MediaTek M4U v1 implementations " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;