2019-05-27 09:55:21 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2016-06-08 12:50:58 +03:00
/*
* Copyright ( c ) 2015 - 2016 MediaTek Inc .
* Author : Honghui Zhang < honghui . zhang @ mediatek . com >
*/
# ifndef _MTK_IOMMU_H_
# define _MTK_IOMMU_H_
# include <linux/clk.h>
# include <linux/component.h>
# include <linux/device.h>
# include <linux/io.h>
2019-02-05 19:37:31 +03:00
# include <linux/io-pgtable.h>
2016-06-08 12:50:58 +03:00
# include <linux/iommu.h>
# include <linux/list.h>
# include <linux/spinlock.h>
2020-07-13 13:16:48 +03:00
# include <linux/dma-mapping.h>
2016-06-08 12:50:58 +03:00
# include <soc/mediatek/smi.h>
2021-01-11 14:18:49 +03:00
# include <dt-bindings/memory/mtk-memory-port.h>
2016-06-08 12:50:58 +03:00
2020-07-03 07:41:23 +03:00
# define MTK_LARB_COM_MAX 8
# define MTK_LARB_SUBCOM_MAX 4
2021-01-11 14:19:09 +03:00
# define MTK_IOMMU_GROUP_MAX 8
2016-06-08 12:50:58 +03:00
struct mtk_iommu_suspend_reg {
2020-07-03 07:41:19 +03:00
union {
u32 standard_axi_mode ; /* v1 */
u32 misc_ctrl ; /* v2 */
} ;
2016-06-08 12:50:58 +03:00
u32 dcm_dis ;
u32 ctrl_reg ;
u32 int_control0 ;
u32 int_main_control ;
2018-03-18 04:52:54 +03:00
u32 ivrp_paddr ;
2019-08-24 06:02:06 +03:00
u32 vld_pa_rng ;
2020-07-03 07:41:24 +03:00
u32 wr_len_ctrl ;
2016-06-08 12:50:58 +03:00
} ;
2017-08-21 14:00:16 +03:00
enum mtk_iommu_plat {
M4U_MT2701 ,
M4U_MT2712 ,
2020-07-03 07:41:27 +03:00
M4U_MT6779 ,
2020-09-07 13:16:49 +03:00
M4U_MT8167 ,
2017-08-21 14:00:16 +03:00
M4U_MT8173 ,
2019-08-24 06:02:02 +03:00
M4U_MT8183 ,
2021-01-11 14:19:13 +03:00
M4U_MT8192 ,
2017-08-21 14:00:16 +03:00
} ;
2021-01-11 14:19:07 +03:00
struct mtk_iommu_iova_region ;
2019-08-24 06:01:47 +03:00
struct mtk_iommu_plat_data {
enum mtk_iommu_plat m4u_plat ;
2020-07-03 07:41:20 +03:00
u32 flags ;
2020-07-03 07:41:22 +03:00
u32 inv_sel_reg ;
2021-01-11 14:19:07 +03:00
unsigned int iova_region_nr ;
const struct mtk_iommu_iova_region * iova_region ;
2020-07-03 07:41:23 +03:00
unsigned char larbid_remap [ MTK_LARB_COM_MAX ] [ MTK_LARB_SUBCOM_MAX ] ;
2019-08-24 06:01:47 +03:00
} ;
2016-06-08 12:50:58 +03:00
struct mtk_iommu_domain ;
struct mtk_iommu_data {
void __iomem * base ;
int irq ;
struct device * dev ;
struct clk * bclk ;
phys_addr_t protect_base ; /* protect memory base */
struct mtk_iommu_suspend_reg reg ;
struct mtk_iommu_domain * m4u_dom ;
2021-01-11 14:19:09 +03:00
struct iommu_group * m4u_group [ MTK_IOMMU_GROUP_MAX ] ;
2016-06-08 12:50:58 +03:00
bool enable_4GB ;
2019-11-04 10:01:03 +03:00
spinlock_t tlb_lock ; /* lock for tlb range flush */
2017-02-03 14:57:32 +03:00
struct iommu_device iommu ;
2019-08-24 06:01:47 +03:00
const struct mtk_iommu_plat_data * plat_data ;
2021-01-11 14:18:59 +03:00
struct device * smicomm_dev ;
2017-08-21 14:00:17 +03:00
2020-06-25 16:08:31 +03:00
struct dma_iommu_mapping * mapping ; /* For mtk_iommu_v1.c */
2017-08-21 14:00:17 +03:00
struct list_head list ;
2019-08-24 06:02:08 +03:00
struct mtk_smi_larb_iommu larb_imu [ MTK_LARB_NR_MAX ] ;
2016-06-08 12:50:58 +03:00
} ;
2016-08-09 16:46:46 +03:00
static inline int compare_of ( struct device * dev , void * data )
2016-06-08 12:50:58 +03:00
{
return dev - > of_node = = data ;
}
2016-10-19 13:30:34 +03:00
static inline void release_of ( struct device * dev , void * data )
{
of_node_put ( data ) ;
}
2016-08-09 16:46:46 +03:00
static inline int mtk_iommu_bind ( struct device * dev )
2016-06-08 12:50:58 +03:00
{
struct mtk_iommu_data * data = dev_get_drvdata ( dev ) ;
2019-08-24 06:02:08 +03:00
return component_bind_all ( dev , & data - > larb_imu ) ;
2016-06-08 12:50:58 +03:00
}
2016-08-09 16:46:46 +03:00
static inline void mtk_iommu_unbind ( struct device * dev )
2016-06-08 12:50:58 +03:00
{
struct mtk_iommu_data * data = dev_get_drvdata ( dev ) ;
2019-08-24 06:02:08 +03:00
component_unbind_all ( dev , & data - > larb_imu ) ;
2016-06-08 12:50:58 +03:00
}
# endif