iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers
The remaining internal callsites don't care about having prototypes compatible with the relevant dma_map_ops callbacks, so the extra level of indirection just wastes space and complictaes things. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
b61d271e59
commit
796a08cf16
@ -717,18 +717,6 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
|
||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, int prot)
|
||||
{
|
||||
return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
|
||||
}
|
||||
|
||||
static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(dev, handle, size);
|
||||
}
|
||||
|
||||
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
@ -974,7 +962,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
|
||||
*handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
|
||||
ioprot);
|
||||
if (*handle == DMA_MAPPING_ERROR) {
|
||||
if (coherent)
|
||||
__free_pages(page, get_order(size));
|
||||
@ -991,7 +980,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
|
||||
*handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot);
|
||||
if (*handle == DMA_MAPPING_ERROR) {
|
||||
dma_release_from_contiguous(dev, page,
|
||||
size >> PAGE_SHIFT);
|
||||
@ -1005,7 +994,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
|
||||
arch_dma_prep_coherent(page, iosize);
|
||||
memset(addr, 0, size);
|
||||
} else {
|
||||
__iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
|
||||
__iommu_dma_unmap(dev, *handle, iosize);
|
||||
dma_release_from_contiguous(dev, page,
|
||||
size >> PAGE_SHIFT);
|
||||
}
|
||||
@ -1044,12 +1033,12 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
* Hence how dodgy the below logic looks...
|
||||
*/
|
||||
if (dma_in_atomic_pool(cpu_addr, size)) {
|
||||
__iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
|
||||
__iommu_dma_unmap(dev, handle, iosize);
|
||||
dma_free_from_pool(cpu_addr, size);
|
||||
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
struct page *page = vmalloc_to_page(cpu_addr);
|
||||
|
||||
__iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
|
||||
__iommu_dma_unmap(dev, handle, iosize);
|
||||
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
|
||||
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
|
||||
} else if (is_vmalloc_addr(cpu_addr)){
|
||||
@ -1060,7 +1049,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
__iommu_dma_free(dev, area->pages, iosize, &handle);
|
||||
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
|
||||
} else {
|
||||
__iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
|
||||
__iommu_dma_unmap(dev, handle, iosize);
|
||||
__free_pages(virt_to_page(cpu_addr), get_order(size));
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user