x86: remove arch specific dma_supported implementation
And instead wire it up as method for all the dma_map_ops instances. Note that this also means the arch specific check will be fully instead of partially applied in the AMD iommu driver. Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
		| @@ -33,9 +33,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | ||||
| bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); | ||||
| #define arch_dma_alloc_attrs arch_dma_alloc_attrs | ||||
| 
 | ||||
| #define HAVE_ARCH_DMA_SUPPORTED 1 | ||||
| extern int dma_supported(struct device *hwdev, u64 mask); | ||||
| 
 | ||||
| extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | ||||
| 					dma_addr_t *dma_addr, gfp_t flag, | ||||
| 					unsigned long attrs); | ||||
|   | ||||
| @@ -6,6 +6,8 @@ extern int force_iommu, no_iommu; | ||||
| extern int iommu_detected; | ||||
| extern int iommu_pass_through; | ||||
| 
 | ||||
| int x86_dma_supported(struct device *dev, u64 mask); | ||||
| 
 | ||||
| /* 10 seconds */ | ||||
| #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) | ||||
| 
 | ||||
|   | ||||
| @@ -704,6 +704,7 @@ static const struct dma_map_ops gart_dma_ops = { | ||||
| 	.alloc				= gart_alloc_coherent, | ||||
| 	.free				= gart_free_coherent, | ||||
| 	.mapping_error			= gart_mapping_error, | ||||
| 	.dma_supported			= x86_dma_supported, | ||||
| }; | ||||
| 
 | ||||
| static void gart_iommu_shutdown(void) | ||||
|   | ||||
| @@ -493,6 +493,7 @@ static const struct dma_map_ops calgary_dma_ops = { | ||||
| 	.map_page = calgary_map_page, | ||||
| 	.unmap_page = calgary_unmap_page, | ||||
| 	.mapping_error = calgary_mapping_error, | ||||
| 	.dma_supported = x86_dma_supported, | ||||
| }; | ||||
| 
 | ||||
| static inline void __iomem * busno_to_bbar(unsigned char num) | ||||
|   | ||||
| @@ -213,10 +213,8 @@ static __init int iommu_setup(char *p) | ||||
| } | ||||
| early_param("iommu", iommu_setup); | ||||
| 
 | ||||
| int dma_supported(struct device *dev, u64 mask) | ||||
| int x86_dma_supported(struct device *dev, u64 mask) | ||||
| { | ||||
| 	const struct dma_map_ops *ops = get_dma_ops(dev); | ||||
| 
 | ||||
| #ifdef CONFIG_PCI | ||||
| 	if (mask > 0xffffffff && forbid_dac > 0) { | ||||
| 		dev_info(dev, "PCI: Disallowing DAC for device\n"); | ||||
| @@ -224,9 +222,6 @@ int dma_supported(struct device *dev, u64 mask) | ||||
| 	} | ||||
| #endif | ||||
| 
 | ||||
| 	if (ops->dma_supported) | ||||
| 		return ops->dma_supported(dev, mask); | ||||
| 
 | ||||
| 	/* Copied from i386. Doesn't make much sense, because it will
 | ||||
| 	   only work for pci_alloc_coherent. | ||||
| 	   The caller just has to use GFP_DMA in this case. */ | ||||
| @@ -252,7 +247,6 @@ int dma_supported(struct device *dev, u64 mask) | ||||
| 
 | ||||
| 	return 1; | ||||
| } | ||||
| EXPORT_SYMBOL(dma_supported); | ||||
| 
 | ||||
| static int __init pci_iommu_init(void) | ||||
| { | ||||
|   | ||||
| @@ -104,4 +104,5 @@ const struct dma_map_ops nommu_dma_ops = { | ||||
| 	.sync_sg_for_device	= nommu_sync_sg_for_device, | ||||
| 	.is_phys		= 1, | ||||
| 	.mapping_error		= nommu_mapping_error, | ||||
| 	.dma_supported		= x86_dma_supported, | ||||
| }; | ||||
|   | ||||
| @@ -26,6 +26,7 @@ | ||||
| #include <linux/pci_ids.h> | ||||
| #include <linux/export.h> | ||||
| #include <linux/list.h> | ||||
| #include <asm/iommu.h> | ||||
| 
 | ||||
| #define STA2X11_SWIOTLB_SIZE (4*1024*1024) | ||||
| extern int swiotlb_late_init_with_default_size(size_t default_size); | ||||
| @@ -191,7 +192,7 @@ static const struct dma_map_ops sta2x11_dma_ops = { | ||||
| 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | ||||
| 	.sync_sg_for_device = swiotlb_sync_sg_for_device, | ||||
| 	.mapping_error = swiotlb_dma_mapping_error, | ||||
| 	.dma_supported = NULL, /* FIXME: we should use this instead! */ | ||||
| 	.dma_supported = x86_dma_supported, | ||||
| }; | ||||
| 
 | ||||
| /* At setup time, we use our own ops if the device is a ConneXt one */ | ||||
|   | ||||
| @@ -2731,6 +2731,8 @@ free_mem: | ||||
|  */ | ||||
| static int amd_iommu_dma_supported(struct device *dev, u64 mask) | ||||
| { | ||||
| 	if (!x86_dma_supported(dev, mask)) | ||||
| 		return 0; | ||||
| 	return check_device(dev); | ||||
| } | ||||
| 
 | ||||
|   | ||||
| @@ -3981,6 +3981,9 @@ struct dma_map_ops intel_dma_ops = { | ||||
| 	.map_page = intel_map_page, | ||||
| 	.unmap_page = intel_unmap_page, | ||||
| 	.mapping_error = intel_mapping_error, | ||||
| #ifdef CONFIG_X86 | ||||
| 	.dma_supported = x86_dma_supported, | ||||
| #endif | ||||
| }; | ||||
| 
 | ||||
| static inline int iommu_domain_cache_init(void) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user