iommu: Remove struct iommu_ops *iommu from arch_setup_dma_ops()
This is not being used to pass ops, it is just a way to tell if an iommu driver was probed. These days this can be detected directly via device_iommu_mapped(). Call device_iommu_mapped() in the two places that need to check it and remove the iommu parameter everywhere. Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Moritz Fischer <mdf@kernel.org> Acked-by: Christoph Hellwig <hch@lst.de> Acked-by: Rob Herring <robh@kernel.org> Tested-by: Hector Martin <marcan@marcan.st> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/1-v2-16e4def25ebb+820-iommu_fwspec_p1_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
7be423336e
commit
4720287c7b
@ -91,7 +91,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
* Plug in direct dma map ops.
|
||||
*/
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
bool coherent)
|
||||
{
|
||||
/*
|
||||
* IOC hardware snoops all DMA traffic keeping the caches consistent
|
||||
|
@ -34,7 +34,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
bool coherent)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_CPU_V7M)) {
|
||||
/*
|
||||
|
@ -1713,7 +1713,7 @@ void arm_iommu_detach_device(struct device *dev)
|
||||
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
||||
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
bool coherent)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping;
|
||||
|
||||
@ -1748,7 +1748,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
|
||||
#else
|
||||
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
bool coherent)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1757,7 +1757,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
|
||||
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
bool coherent)
|
||||
{
|
||||
/*
|
||||
* Due to legacy code that sets the ->dma_coherent flag from a bus
|
||||
@ -1776,8 +1776,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
if (dev->dma_ops)
|
||||
return;
|
||||
|
||||
if (iommu)
|
||||
arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
|
||||
if (device_iommu_mapped(dev))
|
||||
arm_setup_iommu_dma_ops(dev, dma_base, size, coherent);
|
||||
|
||||
xen_setup_dma_ops(dev);
|
||||
dev->archdata.dma_ops_setup = true;
|
||||
|
@ -47,7 +47,7 @@ void arch_teardown_dma_ops(struct device *dev)
|
||||
#endif
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
bool coherent)
|
||||
{
|
||||
int cls = cache_line_size_of_cpu();
|
||||
|
||||
@ -58,7 +58,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
ARCH_DMA_MINALIGN, cls);
|
||||
|
||||
dev->dma_coherent = coherent;
|
||||
if (iommu)
|
||||
if (device_iommu_mapped(dev))
|
||||
iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
|
||||
|
||||
xen_setup_dma_ops(dev);
|
||||
|
@ -138,7 +138,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
bool coherent)
|
||||
{
|
||||
dev->dma_coherent = coherent;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
bool coherent)
|
||||
{
|
||||
WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
|
||||
TAINT_CPU_OUT_OF_SPEC,
|
||||
|
@ -1641,8 +1641,7 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
|
||||
if (PTR_ERR(iommu) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
arch_setup_dma_ops(dev, 0, U64_MAX,
|
||||
iommu, attr == DEV_DMA_COHERENT);
|
||||
arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ void hv_setup_dma_ops(struct device *dev, bool coherent)
|
||||
* Hyper-V does not offer a vIOMMU in the guest
|
||||
* VM, so pass 0/NULL for the IOMMU settings
|
||||
*/
|
||||
arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
|
||||
arch_setup_dma_ops(dev, 0, 0, coherent);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
|
||||
|
||||
|
@ -193,7 +193,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
|
||||
dev_dbg(dev, "device is%sbehind an iommu\n",
|
||||
iommu ? " " : " not ");
|
||||
|
||||
arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
|
||||
arch_setup_dma_ops(dev, dma_start, size, coherent);
|
||||
|
||||
if (!iommu)
|
||||
of_dma_set_restricted_buffer(dev, np);
|
||||
|
@ -427,10 +427,10 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent);
|
||||
bool coherent);
|
||||
#else
|
||||
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
u64 size, const struct iommu_ops *iommu, bool coherent)
|
||||
u64 size, bool coherent)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
|
||||
|
Loading…
Reference in New Issue
Block a user