swiotlb: move orig addr and size validation into swiotlb_bounce
Move the code to find and validate the original buffer address and size from the callers into swiotlb_bounce. This means a tiny bit of extra work in the swiotlb_map path, but avoids code duplication and a leads to a better code structure. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
2973073a80
commit
2bdba622c3
@ -460,12 +460,25 @@ void __init swiotlb_exit(void)
|
|||||||
/*
|
/*
|
||||||
* Bounce: copy the swiotlb buffer from or back to the original dma location
|
* Bounce: copy the swiotlb buffer from or back to the original dma location
|
||||||
*/
|
*/
|
||||||
static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
|
static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
|
||||||
size_t size, enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
|
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
||||||
|
size_t alloc_size = io_tlb_alloc_size[index];
|
||||||
|
phys_addr_t orig_addr = io_tlb_orig_addr[index];
|
||||||
unsigned long pfn = PFN_DOWN(orig_addr);
|
unsigned long pfn = PFN_DOWN(orig_addr);
|
||||||
unsigned char *vaddr = phys_to_virt(tlb_addr);
|
unsigned char *vaddr = phys_to_virt(tlb_addr);
|
||||||
|
|
||||||
|
if (orig_addr == INVALID_PHYS_ADDR)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (size > alloc_size) {
|
||||||
|
dev_WARN_ONCE(dev, 1,
|
||||||
|
"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
|
||||||
|
alloc_size, size);
|
||||||
|
size = alloc_size;
|
||||||
|
}
|
||||||
|
|
||||||
if (PageHighMem(pfn_to_page(pfn))) {
|
if (PageHighMem(pfn_to_page(pfn))) {
|
||||||
/* The buffer does not have a mapping. Map it in and copy */
|
/* The buffer does not have a mapping. Map it in and copy */
|
||||||
unsigned int offset = orig_addr & ~PAGE_MASK;
|
unsigned int offset = orig_addr & ~PAGE_MASK;
|
||||||
@ -644,21 +657,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
|||||||
tlb_addr = slot_addr(io_tlb_start, index) + offset;
|
tlb_addr = slot_addr(io_tlb_start, index) + offset;
|
||||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||||
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
|
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||||
swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
|
swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
|
||||||
return tlb_addr;
|
return tlb_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void validate_sync_size_and_truncate(struct device *hwdev, size_t alloc_size, size_t *size)
|
|
||||||
{
|
|
||||||
if (*size > alloc_size) {
|
|
||||||
/* Warn and truncate mapping_size */
|
|
||||||
dev_WARN_ONCE(hwdev, 1,
|
|
||||||
"Attempt for buffer overflow. Original size: %zu. Mapping size: %zu.\n",
|
|
||||||
alloc_size, *size);
|
|
||||||
*size = alloc_size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tlb_addr is the physical address of the bounce buffer to unmap.
|
* tlb_addr is the physical address of the bounce buffer to unmap.
|
||||||
*/
|
*/
|
||||||
@ -669,19 +671,15 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
|
unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
|
||||||
int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
|
int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
|
||||||
phys_addr_t orig_addr = io_tlb_orig_addr[index];
|
int nslots = nr_slots(io_tlb_alloc_size[index] + offset);
|
||||||
size_t alloc_size = io_tlb_alloc_size[index];
|
int count, i;
|
||||||
int i, count, nslots = nr_slots(alloc_size + offset);
|
|
||||||
|
|
||||||
validate_sync_size_and_truncate(hwdev, alloc_size, &mapping_size);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First, sync the memory before unmapping the entry
|
* First, sync the memory before unmapping the entry
|
||||||
*/
|
*/
|
||||||
if (orig_addr != INVALID_PHYS_ADDR &&
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||||
!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||||
((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
|
swiotlb_bounce(hwdev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
|
||||||
swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the buffer to the free list by setting the corresponding
|
* Return the buffer to the free list by setting the corresponding
|
||||||
@ -721,27 +719,16 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
|
|||||||
size_t size, enum dma_data_direction dir,
|
size_t size, enum dma_data_direction dir,
|
||||||
enum dma_sync_target target)
|
enum dma_sync_target target)
|
||||||
{
|
{
|
||||||
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
|
||||||
size_t alloc_size = io_tlb_alloc_size[index];
|
|
||||||
phys_addr_t orig_addr = io_tlb_orig_addr[index];
|
|
||||||
|
|
||||||
if (orig_addr == INVALID_PHYS_ADDR)
|
|
||||||
return;
|
|
||||||
|
|
||||||
validate_sync_size_and_truncate(hwdev, alloc_size, &size);
|
|
||||||
|
|
||||||
switch (target) {
|
switch (target) {
|
||||||
case SYNC_FOR_CPU:
|
case SYNC_FOR_CPU:
|
||||||
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
|
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||||
swiotlb_bounce(orig_addr, tlb_addr,
|
swiotlb_bounce(hwdev, tlb_addr, size, DMA_FROM_DEVICE);
|
||||||
size, DMA_FROM_DEVICE);
|
|
||||||
else
|
else
|
||||||
BUG_ON(dir != DMA_TO_DEVICE);
|
BUG_ON(dir != DMA_TO_DEVICE);
|
||||||
break;
|
break;
|
||||||
case SYNC_FOR_DEVICE:
|
case SYNC_FOR_DEVICE:
|
||||||
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
|
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||||
swiotlb_bounce(orig_addr, tlb_addr,
|
swiotlb_bounce(hwdev, tlb_addr, size, DMA_TO_DEVICE);
|
||||||
size, DMA_TO_DEVICE);
|
|
||||||
else
|
else
|
||||||
BUG_ON(dir != DMA_FROM_DEVICE);
|
BUG_ON(dir != DMA_FROM_DEVICE);
|
||||||
break;
|
break;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user