ARM: dma-mapping: fix for speculative prefetching
ARMv6 and ARMv7 CPUs can perform speculative prefetching, which makes DMA cache coherency handling slightly more interesting. Rather than being able to rely upon the CPU not accessing the DMA buffer until DMA has completed, we now must expect that the cache could be loaded with possibly stale data from the DMA buffer. Where DMA involves data being transferred to the device, we clean the cache before handing it over for DMA, otherwise we invalidate the buffer to get rid of potential writebacks. On DMA Completion, if data was transferred from the device, we invalidate the buffer to get rid of any stale speculative prefetches. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com>
This commit is contained in:
parent
702b94bff3
commit
2ffe2da3e7
@ -271,10 +271,9 @@ ENTRY(v6_dma_flush_range)
|
|||||||
*/
|
*/
|
||||||
ENTRY(v6_dma_map_area)
|
ENTRY(v6_dma_map_area)
|
||||||
add r1, r1, r0
|
add r1, r1, r0
|
||||||
cmp r2, #DMA_TO_DEVICE
|
teq r2, #DMA_FROM_DEVICE
|
||||||
beq v6_dma_clean_range
|
beq v6_dma_inv_range
|
||||||
bcs v6_dma_inv_range
|
b v6_dma_clean_range
|
||||||
b v6_dma_flush_range
|
|
||||||
ENDPROC(v6_dma_map_area)
|
ENDPROC(v6_dma_map_area)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -284,6 +283,9 @@ ENDPROC(v6_dma_map_area)
|
|||||||
* - dir - DMA direction
|
* - dir - DMA direction
|
||||||
*/
|
*/
|
||||||
ENTRY(v6_dma_unmap_area)
|
ENTRY(v6_dma_unmap_area)
|
||||||
|
add r1, r1, r0
|
||||||
|
teq r2, #DMA_TO_DEVICE
|
||||||
|
bne v6_dma_inv_range
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
ENDPROC(v6_dma_unmap_area)
|
ENDPROC(v6_dma_unmap_area)
|
||||||
|
|
||||||
|
@ -279,10 +279,9 @@ ENDPROC(v7_dma_flush_range)
|
|||||||
*/
|
*/
|
||||||
ENTRY(v7_dma_map_area)
|
ENTRY(v7_dma_map_area)
|
||||||
add r1, r1, r0
|
add r1, r1, r0
|
||||||
cmp r2, #DMA_TO_DEVICE
|
teq r2, #DMA_FROM_DEVICE
|
||||||
beq v7_dma_clean_range
|
beq v7_dma_inv_range
|
||||||
bcs v7_dma_inv_range
|
b v7_dma_clean_range
|
||||||
b v7_dma_flush_range
|
|
||||||
ENDPROC(v7_dma_map_area)
|
ENDPROC(v7_dma_map_area)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -292,6 +291,9 @@ ENDPROC(v7_dma_map_area)
|
|||||||
* - dir - DMA direction
|
* - dir - DMA direction
|
||||||
*/
|
*/
|
||||||
ENTRY(v7_dma_unmap_area)
|
ENTRY(v7_dma_unmap_area)
|
||||||
|
add r1, r1, r0
|
||||||
|
teq r2, #DMA_TO_DEVICE
|
||||||
|
bne v7_dma_inv_range
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
ENDPROC(v7_dma_unmap_area)
|
ENDPROC(v7_dma_unmap_area)
|
||||||
|
|
||||||
|
@ -404,34 +404,22 @@ EXPORT_SYMBOL(dma_free_coherent);
|
|||||||
* platforms with CONFIG_DMABOUNCE.
|
* platforms with CONFIG_DMABOUNCE.
|
||||||
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
||||||
*/
|
*/
|
||||||
static void dma_cache_maint(const void *start, size_t size, int direction)
|
|
||||||
{
|
|
||||||
void (*outer_op)(unsigned long, unsigned long);
|
|
||||||
|
|
||||||
switch (direction) {
|
|
||||||
case DMA_FROM_DEVICE: /* invalidate only */
|
|
||||||
outer_op = outer_inv_range;
|
|
||||||
break;
|
|
||||||
case DMA_TO_DEVICE: /* writeback only */
|
|
||||||
outer_op = outer_clean_range;
|
|
||||||
break;
|
|
||||||
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
|
||||||
outer_op = outer_flush_range;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
outer_op(__pa(start), __pa(start) + size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
|
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
|
unsigned long paddr;
|
||||||
|
|
||||||
BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
|
BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
|
||||||
|
|
||||||
dmac_map_area(kaddr, size, dir);
|
dmac_map_area(kaddr, size, dir);
|
||||||
dma_cache_maint(kaddr, size, dir);
|
|
||||||
|
paddr = __pa(kaddr);
|
||||||
|
if (dir == DMA_FROM_DEVICE) {
|
||||||
|
outer_inv_range(paddr, paddr + size);
|
||||||
|
} else {
|
||||||
|
outer_clean_range(paddr, paddr + size);
|
||||||
|
}
|
||||||
|
/* FIXME: non-speculating: flush on bidirectional mappings? */
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(___dma_single_cpu_to_dev);
|
EXPORT_SYMBOL(___dma_single_cpu_to_dev);
|
||||||
|
|
||||||
@ -440,6 +428,13 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
|
|||||||
{
|
{
|
||||||
BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
|
BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
|
||||||
|
|
||||||
|
/* FIXME: non-speculating: not required */
|
||||||
|
/* don't bother invalidating if DMA to device */
|
||||||
|
if (dir != DMA_TO_DEVICE) {
|
||||||
|
unsigned long paddr = __pa(kaddr);
|
||||||
|
outer_inv_range(paddr, paddr + size);
|
||||||
|
}
|
||||||
|
|
||||||
dmac_unmap_area(kaddr, size, dir);
|
dmac_unmap_area(kaddr, size, dir);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(___dma_single_dev_to_cpu);
|
EXPORT_SYMBOL(___dma_single_dev_to_cpu);
|
||||||
@ -487,32 +482,29 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
|||||||
size_t size, enum dma_data_direction dir)
|
size_t size, enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
unsigned long paddr;
|
unsigned long paddr;
|
||||||
void (*outer_op)(unsigned long, unsigned long);
|
|
||||||
|
|
||||||
switch (direction) {
|
|
||||||
case DMA_FROM_DEVICE: /* invalidate only */
|
|
||||||
outer_op = outer_inv_range;
|
|
||||||
break;
|
|
||||||
case DMA_TO_DEVICE: /* writeback only */
|
|
||||||
outer_op = outer_clean_range;
|
|
||||||
break;
|
|
||||||
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
|
||||||
outer_op = outer_flush_range;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_cache_maint_page(page, off, size, dir, dmac_map_area);
|
dma_cache_maint_page(page, off, size, dir, dmac_map_area);
|
||||||
|
|
||||||
paddr = page_to_phys(page) + off;
|
paddr = page_to_phys(page) + off;
|
||||||
outer_op(paddr, paddr + size);
|
if (dir == DMA_FROM_DEVICE) {
|
||||||
|
outer_inv_range(paddr, paddr + size);
|
||||||
|
} else {
|
||||||
|
outer_clean_range(paddr, paddr + size);
|
||||||
|
}
|
||||||
|
/* FIXME: non-speculating: flush on bidirectional mappings? */
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(___dma_page_cpu_to_dev);
|
EXPORT_SYMBOL(___dma_page_cpu_to_dev);
|
||||||
|
|
||||||
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||||
size_t size, enum dma_data_direction dir)
|
size_t size, enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
|
unsigned long paddr = page_to_phys(page) + off;
|
||||||
|
|
||||||
|
/* FIXME: non-speculating: not required */
|
||||||
|
/* don't bother invalidating if DMA to device */
|
||||||
|
if (dir != DMA_TO_DEVICE)
|
||||||
|
outer_inv_range(paddr, paddr + size);
|
||||||
|
|
||||||
dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
|
dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(___dma_page_dev_to_cpu);
|
EXPORT_SYMBOL(___dma_page_dev_to_cpu);
|
||||||
|
Loading…
Reference in New Issue
Block a user