iommu/mediatek-v1: Just rename mtk_iommu to mtk_iommu_v1

No functional change. Just rename this for readable. Differentiate this
from mtk_iommu.c

Signed-off-by: Yong Wu <yong.wu@mediatek.com>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Reviewed-by: Matthias Brugger <matthias.bgg@gmail.com>
Link: https://lore.kernel.org/r/20220503071427.2285-29-yong.wu@mediatek.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Yong Wu 2022-05-03 15:14:19 +08:00 committed by Joerg Roedel
parent 6a513de3ef
commit ad9b10e533

View File

@ -85,53 +85,53 @@
*/ */
#define M2701_IOMMU_PGT_SIZE SZ_4M #define M2701_IOMMU_PGT_SIZE SZ_4M
struct mtk_iommu_suspend_reg { struct mtk_iommu_v1_suspend_reg {
u32 standard_axi_mode; u32 standard_axi_mode;
u32 dcm_dis; u32 dcm_dis;
u32 ctrl_reg; u32 ctrl_reg;
u32 int_control0; u32 int_control0;
}; };
struct mtk_iommu_data { struct mtk_iommu_v1_data {
void __iomem *base; void __iomem *base;
int irq; int irq;
struct device *dev; struct device *dev;
struct clk *bclk; struct clk *bclk;
phys_addr_t protect_base; /* protect memory base */ phys_addr_t protect_base; /* protect memory base */
struct mtk_iommu_domain *m4u_dom; struct mtk_iommu_v1_domain *m4u_dom;
struct iommu_device iommu; struct iommu_device iommu;
struct dma_iommu_mapping *mapping; struct dma_iommu_mapping *mapping;
struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
struct mtk_iommu_suspend_reg reg; struct mtk_iommu_v1_suspend_reg reg;
}; };
struct mtk_iommu_domain { struct mtk_iommu_v1_domain {
spinlock_t pgtlock; /* lock for page table */ spinlock_t pgtlock; /* lock for page table */
struct iommu_domain domain; struct iommu_domain domain;
u32 *pgt_va; u32 *pgt_va;
dma_addr_t pgt_pa; dma_addr_t pgt_pa;
struct mtk_iommu_data *data; struct mtk_iommu_v1_data *data;
}; };
static int mtk_iommu_bind(struct device *dev) static int mtk_iommu_v1_bind(struct device *dev)
{ {
struct mtk_iommu_data *data = dev_get_drvdata(dev); struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
return component_bind_all(dev, &data->larb_imu); return component_bind_all(dev, &data->larb_imu);
} }
static void mtk_iommu_unbind(struct device *dev) static void mtk_iommu_v1_unbind(struct device *dev)
{ {
struct mtk_iommu_data *data = dev_get_drvdata(dev); struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
component_unbind_all(dev, &data->larb_imu); component_unbind_all(dev, &data->larb_imu);
} }
static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) static struct mtk_iommu_v1_domain *to_mtk_domain(struct iommu_domain *dom)
{ {
return container_of(dom, struct mtk_iommu_domain, domain); return container_of(dom, struct mtk_iommu_v1_domain, domain);
} }
static const int mt2701_m4u_in_larb[] = { static const int mt2701_m4u_in_larb[] = {
@ -157,7 +157,7 @@ static inline int mt2701_m4u_to_port(int id)
return id - mt2701_m4u_in_larb[larb]; return id - mt2701_m4u_in_larb[larb];
} }
static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) static void mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data *data)
{ {
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + REG_MMU_INV_SEL); data->base + REG_MMU_INV_SEL);
@ -165,8 +165,8 @@ static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
wmb(); /* Make sure the tlb flush all done */ wmb(); /* Make sure the tlb flush all done */
} }
static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data, static void mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data *data,
unsigned long iova, size_t size) unsigned long iova, size_t size)
{ {
int ret; int ret;
u32 tmp; u32 tmp;
@ -184,16 +184,16 @@ static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
if (ret) { if (ret) {
dev_warn(data->dev, dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n"); "Partial TLB flush timed out, falling back to full flush\n");
mtk_iommu_tlb_flush_all(data); mtk_iommu_v1_tlb_flush_all(data);
} }
/* Clear the CPE status */ /* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE); writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
} }
static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) static irqreturn_t mtk_iommu_v1_isr(int irq, void *dev_id)
{ {
struct mtk_iommu_data *data = dev_id; struct mtk_iommu_v1_data *data = dev_id;
struct mtk_iommu_domain *dom = data->m4u_dom; struct mtk_iommu_v1_domain *dom = data->m4u_dom;
u32 int_state, regval, fault_iova, fault_pa; u32 int_state, regval, fault_iova, fault_pa;
unsigned int fault_larb, fault_port; unsigned int fault_larb, fault_port;
@ -223,13 +223,13 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
regval |= F_INT_CLR_BIT; regval |= F_INT_CLR_BIT;
writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
mtk_iommu_tlb_flush_all(data); mtk_iommu_v1_tlb_flush_all(data);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void mtk_iommu_config(struct mtk_iommu_data *data, static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
struct device *dev, bool enable) struct device *dev, bool enable)
{ {
struct mtk_smi_larb_iommu *larb_mmu; struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid; unsigned int larbid, portid;
@ -251,9 +251,9 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
} }
} }
static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
{ {
struct mtk_iommu_domain *dom = data->m4u_dom; struct mtk_iommu_v1_domain *dom = data->m4u_dom;
spin_lock_init(&dom->pgtlock); spin_lock_init(&dom->pgtlock);
@ -269,9 +269,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
return 0; return 0;
} }
static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type)
{ {
struct mtk_iommu_domain *dom; struct mtk_iommu_v1_domain *dom;
if (type != IOMMU_DOMAIN_UNMANAGED) if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL; return NULL;
@ -283,21 +283,20 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
return &dom->domain; return &dom->domain;
} }
static void mtk_iommu_domain_free(struct iommu_domain *domain) static void mtk_iommu_v1_domain_free(struct iommu_domain *domain)
{ {
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = dom->data; struct mtk_iommu_v1_data *data = dom->data;
dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE, dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
dom->pgt_va, dom->pgt_pa); dom->pgt_va, dom->pgt_pa);
kfree(to_mtk_domain(domain)); kfree(to_mtk_domain(domain));
} }
static int mtk_iommu_attach_device(struct iommu_domain *domain, static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev)
struct device *dev)
{ {
struct mtk_iommu_data *data = dev_iommu_priv_get(dev); struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
struct dma_iommu_mapping *mtk_mapping; struct dma_iommu_mapping *mtk_mapping;
int ret; int ret;
@ -308,29 +307,28 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
if (!data->m4u_dom) { if (!data->m4u_dom) {
data->m4u_dom = dom; data->m4u_dom = dom;
ret = mtk_iommu_domain_finalise(data); ret = mtk_iommu_v1_domain_finalise(data);
if (ret) { if (ret) {
data->m4u_dom = NULL; data->m4u_dom = NULL;
return ret; return ret;
} }
} }
mtk_iommu_config(data, dev, true); mtk_iommu_v1_config(data, dev, true);
return 0; return 0;
} }
static void mtk_iommu_detach_device(struct iommu_domain *domain, static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct device *dev)
struct device *dev)
{ {
struct mtk_iommu_data *data = dev_iommu_priv_get(dev); struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
mtk_iommu_config(data, dev, false); mtk_iommu_v1_config(data, dev, false);
} }
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
unsigned long flags; unsigned long flags;
unsigned int i; unsigned int i;
@ -351,16 +349,15 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
spin_unlock_irqrestore(&dom->pgtlock, flags); spin_unlock_irqrestore(&dom->pgtlock, flags);
mtk_iommu_tlb_flush_range(dom->data, iova, size); mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
return map_size == size ? 0 : -EEXIST; return map_size == size ? 0 : -EEXIST;
} }
static size_t mtk_iommu_unmap(struct iommu_domain *domain, static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova,
unsigned long iova, size_t size, size_t size, struct iommu_iotlb_gather *gather)
struct iommu_iotlb_gather *gather)
{ {
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
unsigned long flags; unsigned long flags;
u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
@ -369,15 +366,14 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
memset(pgt_base_iova, 0, page_num * sizeof(u32)); memset(pgt_base_iova, 0, page_num * sizeof(u32));
spin_unlock_irqrestore(&dom->pgtlock, flags); spin_unlock_irqrestore(&dom->pgtlock, flags);
mtk_iommu_tlb_flush_range(dom->data, iova, size); mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
return size; return size;
} }
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t mtk_iommu_v1_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
dma_addr_t iova)
{ {
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
unsigned long flags; unsigned long flags;
phys_addr_t pa; phys_addr_t pa;
@ -389,17 +385,16 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
return pa; return pa;
} }
static const struct iommu_ops mtk_iommu_ops; static const struct iommu_ops mtk_iommu_v1_ops;
/* /*
* MTK generation one iommu HW only support one iommu domain, and all the client * MTK generation one iommu HW only support one iommu domain, and all the client
* sharing the same iova address space. * sharing the same iova address space.
*/ */
static int mtk_iommu_create_mapping(struct device *dev, static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args)
struct of_phandle_args *args)
{ {
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_data *data; struct mtk_iommu_v1_data *data;
struct platform_device *m4updev; struct platform_device *m4updev;
struct dma_iommu_mapping *mtk_mapping; struct dma_iommu_mapping *mtk_mapping;
int ret; int ret;
@ -411,11 +406,11 @@ static int mtk_iommu_create_mapping(struct device *dev,
} }
if (!fwspec) { if (!fwspec) {
ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops); ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_v1_ops);
if (ret) if (ret)
return ret; return ret;
fwspec = dev_iommu_fwspec_get(dev); fwspec = dev_iommu_fwspec_get(dev);
} else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_ops) { } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_v1_ops) {
return -EINVAL; return -EINVAL;
} }
@ -447,16 +442,16 @@ static int mtk_iommu_create_mapping(struct device *dev,
return 0; return 0;
} }
static int mtk_iommu_def_domain_type(struct device *dev) static int mtk_iommu_v1_def_domain_type(struct device *dev)
{ {
return IOMMU_DOMAIN_UNMANAGED; return IOMMU_DOMAIN_UNMANAGED;
} }
static struct iommu_device *mtk_iommu_probe_device(struct device *dev) static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
{ {
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct of_phandle_args iommu_spec; struct of_phandle_args iommu_spec;
struct mtk_iommu_data *data; struct mtk_iommu_v1_data *data;
int err, idx = 0, larbid, larbidx; int err, idx = 0, larbid, larbidx;
struct device_link *link; struct device_link *link;
struct device *larbdev; struct device *larbdev;
@ -474,7 +469,7 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
"#iommu-cells", "#iommu-cells",
idx, &iommu_spec)) { idx, &iommu_spec)) {
err = mtk_iommu_create_mapping(dev, &iommu_spec); err = mtk_iommu_v1_create_mapping(dev, &iommu_spec);
of_node_put(iommu_spec.np); of_node_put(iommu_spec.np);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
@ -484,7 +479,7 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
idx++; idx++;
} }
if (!fwspec || fwspec->ops != &mtk_iommu_ops) if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
return ERR_PTR(-ENODEV); /* Not a iommu client device */ return ERR_PTR(-ENODEV); /* Not a iommu client device */
data = dev_iommu_priv_get(dev); data = dev_iommu_priv_get(dev);
@ -509,10 +504,10 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
return &data->iommu; return &data->iommu;
} }
static void mtk_iommu_probe_finalize(struct device *dev) static void mtk_iommu_v1_probe_finalize(struct device *dev)
{ {
struct dma_iommu_mapping *mtk_mapping; struct dma_iommu_mapping *mtk_mapping;
struct mtk_iommu_data *data; struct mtk_iommu_v1_data *data;
int err; int err;
data = dev_iommu_priv_get(dev); data = dev_iommu_priv_get(dev);
@ -523,14 +518,14 @@ static void mtk_iommu_probe_finalize(struct device *dev)
dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n"); dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
} }
static void mtk_iommu_release_device(struct device *dev) static void mtk_iommu_v1_release_device(struct device *dev)
{ {
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_data *data; struct mtk_iommu_v1_data *data;
struct device *larbdev; struct device *larbdev;
unsigned int larbid; unsigned int larbid;
if (!fwspec || fwspec->ops != &mtk_iommu_ops) if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
return; return;
data = dev_iommu_priv_get(dev); data = dev_iommu_priv_get(dev);
@ -541,7 +536,7 @@ static void mtk_iommu_release_device(struct device *dev)
iommu_fwspec_free(dev); iommu_fwspec_free(dev);
} }
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
{ {
u32 regval; u32 regval;
int ret; int ret;
@ -571,7 +566,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM); writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0,
dev_name(data->dev), (void *)data)) { dev_name(data->dev), (void *)data)) {
writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
clk_disable_unprepare(data->bclk); clk_disable_unprepare(data->bclk);
@ -582,39 +577,39 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
return 0; return 0;
} }
static const struct iommu_ops mtk_iommu_ops = { static const struct iommu_ops mtk_iommu_v1_ops = {
.domain_alloc = mtk_iommu_domain_alloc, .domain_alloc = mtk_iommu_v1_domain_alloc,
.probe_device = mtk_iommu_probe_device, .probe_device = mtk_iommu_v1_probe_device,
.probe_finalize = mtk_iommu_probe_finalize, .probe_finalize = mtk_iommu_v1_probe_finalize,
.release_device = mtk_iommu_release_device, .release_device = mtk_iommu_v1_release_device,
.def_domain_type = mtk_iommu_def_domain_type, .def_domain_type = mtk_iommu_v1_def_domain_type,
.device_group = generic_device_group, .device_group = generic_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) { .default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_attach_device, .attach_dev = mtk_iommu_v1_attach_device,
.detach_dev = mtk_iommu_detach_device, .detach_dev = mtk_iommu_v1_detach_device,
.map = mtk_iommu_map, .map = mtk_iommu_v1_map,
.unmap = mtk_iommu_unmap, .unmap = mtk_iommu_v1_unmap,
.iova_to_phys = mtk_iommu_iova_to_phys, .iova_to_phys = mtk_iommu_v1_iova_to_phys,
.free = mtk_iommu_domain_free, .free = mtk_iommu_v1_domain_free,
} }
}; };
static const struct of_device_id mtk_iommu_of_ids[] = { static const struct of_device_id mtk_iommu_v1_of_ids[] = {
{ .compatible = "mediatek,mt2701-m4u", }, { .compatible = "mediatek,mt2701-m4u", },
{} {}
}; };
static const struct component_master_ops mtk_iommu_com_ops = { static const struct component_master_ops mtk_iommu_v1_com_ops = {
.bind = mtk_iommu_bind, .bind = mtk_iommu_v1_bind,
.unbind = mtk_iommu_unbind, .unbind = mtk_iommu_v1_unbind,
}; };
static int mtk_iommu_probe(struct platform_device *pdev) static int mtk_iommu_v1_probe(struct platform_device *pdev)
{ {
struct mtk_iommu_data *data;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct mtk_iommu_v1_data *data;
struct resource *res; struct resource *res;
struct component_match *match = NULL; struct component_match *match = NULL;
void *protect; void *protect;
@ -681,7 +676,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data); platform_set_drvdata(pdev, data);
ret = mtk_iommu_hw_init(data); ret = mtk_iommu_v1_hw_init(data);
if (ret) if (ret)
return ret; return ret;
@ -690,17 +685,17 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret) if (ret)
return ret; return ret;
ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
if (ret) if (ret)
goto out_sysfs_remove; goto out_sysfs_remove;
if (!iommu_present(&platform_bus_type)) { if (!iommu_present(&platform_bus_type)) {
ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_v1_ops);
if (ret) if (ret)
goto out_dev_unreg; goto out_dev_unreg;
} }
ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
if (ret) if (ret)
goto out_bus_set_null; goto out_bus_set_null;
return ret; return ret;
@ -714,9 +709,9 @@ out_sysfs_remove:
return ret; return ret;
} }
static int mtk_iommu_remove(struct platform_device *pdev) static int mtk_iommu_v1_remove(struct platform_device *pdev)
{ {
struct mtk_iommu_data *data = platform_get_drvdata(pdev); struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev);
iommu_device_sysfs_remove(&data->iommu); iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu); iommu_device_unregister(&data->iommu);
@ -726,14 +721,14 @@ static int mtk_iommu_remove(struct platform_device *pdev)
clk_disable_unprepare(data->bclk); clk_disable_unprepare(data->bclk);
devm_free_irq(&pdev->dev, data->irq, data); devm_free_irq(&pdev->dev, data->irq, data);
component_master_del(&pdev->dev, &mtk_iommu_com_ops); component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
return 0; return 0;
} }
static int __maybe_unused mtk_iommu_suspend(struct device *dev) static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev)
{ {
struct mtk_iommu_data *data = dev_get_drvdata(dev); struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
struct mtk_iommu_suspend_reg *reg = &data->reg; struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
void __iomem *base = data->base; void __iomem *base = data->base;
reg->standard_axi_mode = readl_relaxed(base + reg->standard_axi_mode = readl_relaxed(base +
@ -744,10 +739,10 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
return 0; return 0;
} }
static int __maybe_unused mtk_iommu_resume(struct device *dev) static int __maybe_unused mtk_iommu_v1_resume(struct device *dev)
{ {
struct mtk_iommu_data *data = dev_get_drvdata(dev); struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
struct mtk_iommu_suspend_reg *reg = &data->reg; struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
void __iomem *base = data->base; void __iomem *base = data->base;
writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR); writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
@ -760,20 +755,20 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
return 0; return 0;
} }
static const struct dev_pm_ops mtk_iommu_pm_ops = { static const struct dev_pm_ops mtk_iommu_v1_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_v1_suspend, mtk_iommu_v1_resume)
}; };
static struct platform_driver mtk_iommu_driver = { static struct platform_driver mtk_iommu_v1_driver = {
.probe = mtk_iommu_probe, .probe = mtk_iommu_v1_probe,
.remove = mtk_iommu_remove, .remove = mtk_iommu_v1_remove,
.driver = { .driver = {
.name = "mtk-iommu-v1", .name = "mtk-iommu-v1",
.of_match_table = mtk_iommu_of_ids, .of_match_table = mtk_iommu_v1_of_ids,
.pm = &mtk_iommu_pm_ops, .pm = &mtk_iommu_v1_pm_ops,
} }
}; };
module_platform_driver(mtk_iommu_driver); module_platform_driver(mtk_iommu_v1_driver);
MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations"); MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");