swiotlb: split swiotlb_tbl_sync_single
Split swiotlb_tbl_sync_single into two separate funtions for the to device and to cpu synchronization. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
2bdba622c3
commit
80808d273a
@ -750,7 +750,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
|
||||
arch_sync_dma_for_cpu(phys, size, dir);
|
||||
|
||||
if (is_swiotlb_buffer(phys))
|
||||
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
|
||||
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
|
||||
}
|
||||
|
||||
static void iommu_dma_sync_single_for_device(struct device *dev,
|
||||
@ -763,7 +763,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
||||
if (is_swiotlb_buffer(phys))
|
||||
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
|
||||
swiotlb_sync_single_for_device(dev, phys, size, dir);
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
@ -784,8 +784,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
|
||||
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
|
||||
|
||||
if (is_swiotlb_buffer(sg_phys(sg)))
|
||||
swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
|
||||
dir, SYNC_FOR_CPU);
|
||||
swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
|
||||
sg->length, dir);
|
||||
}
|
||||
}
|
||||
|
||||
@ -801,8 +801,8 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i) {
|
||||
if (is_swiotlb_buffer(sg_phys(sg)))
|
||||
swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
|
||||
dir, SYNC_FOR_DEVICE);
|
||||
swiotlb_sync_single_for_device(dev, sg_phys(sg),
|
||||
sg->length, dir);
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
|
||||
|
@ -462,7 +462,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
|
||||
}
|
||||
|
||||
if (is_xen_swiotlb_buffer(dev, dma_addr))
|
||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
|
||||
swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -472,7 +472,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
|
||||
phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
|
||||
|
||||
if (is_xen_swiotlb_buffer(dev, dma_addr))
|
||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
|
||||
swiotlb_sync_single_for_device(dev, paddr, size, dir);
|
||||
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
|
||||
|
@ -42,14 +42,6 @@ extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
|
||||
extern int swiotlb_late_init_with_default_size(size_t default_size);
|
||||
extern void __init swiotlb_update_mem_attributes(void);
|
||||
|
||||
/*
|
||||
* Enumeration for sync targets
|
||||
*/
|
||||
enum dma_sync_target {
|
||||
SYNC_FOR_CPU = 0,
|
||||
SYNC_FOR_DEVICE = 1,
|
||||
};
|
||||
|
||||
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
|
||||
size_t mapping_size, size_t alloc_size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
@ -60,11 +52,10 @@ extern void swiotlb_tbl_unmap_single(struct device *hwdev,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void swiotlb_tbl_sync_single(struct device *hwdev,
|
||||
phys_addr_t tlb_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
enum dma_sync_target target);
|
||||
|
||||
void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
|
||||
|
@ -344,8 +344,8 @@ void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||
swiotlb_tbl_sync_single(dev, paddr, sg->length,
|
||||
dir, SYNC_FOR_DEVICE);
|
||||
swiotlb_sync_single_for_device(dev, paddr, sg->length,
|
||||
dir);
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_for_device(paddr, sg->length,
|
||||
@ -370,8 +370,8 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
arch_sync_dma_for_cpu(paddr, sg->length, dir);
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
|
||||
SYNC_FOR_CPU);
|
||||
swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
|
||||
dir);
|
||||
|
||||
if (dir == DMA_FROM_DEVICE)
|
||||
arch_dma_mark_clean(paddr, sg->length);
|
||||
|
@ -57,7 +57,7 @@ static inline void dma_direct_sync_single_for_device(struct device *dev,
|
||||
phys_addr_t paddr = dma_to_phys(dev, addr);
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
|
||||
swiotlb_sync_single_for_device(dev, paddr, size, dir);
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_for_device(paddr, size, dir);
|
||||
@ -74,7 +74,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
}
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
|
||||
swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
|
||||
|
||||
if (dir == DMA_FROM_DEVICE)
|
||||
arch_dma_mark_clean(paddr, size);
|
||||
|
@ -715,26 +715,22 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
|
||||
spin_unlock_irqrestore(&io_tlb_lock, flags);
|
||||
}
|
||||
|
||||
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
enum dma_sync_target target)
|
||||
void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
switch (target) {
|
||||
case SYNC_FOR_CPU:
|
||||
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||
swiotlb_bounce(hwdev, tlb_addr, size, DMA_FROM_DEVICE);
|
||||
else
|
||||
BUG_ON(dir != DMA_TO_DEVICE);
|
||||
break;
|
||||
case SYNC_FOR_DEVICE:
|
||||
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||
swiotlb_bounce(hwdev, tlb_addr, size, DMA_TO_DEVICE);
|
||||
else
|
||||
BUG_ON(dir != DMA_FROM_DEVICE);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
|
||||
swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
|
||||
else
|
||||
BUG_ON(dir != DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
|
||||
swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
|
||||
else
|
||||
BUG_ON(dir != DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user