2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2005-04-17 02:20:36 +04:00
/*
* arch / arm / common / dmabounce . c
*
* Special dma_ { map / unmap / dma_sync } _ * routines for systems that have
* limited DMA windows . These functions utilize bounce buffers to
* copy data to / from buffers located outside the DMA region . This
* only works for systems in which DMA memory is at the bottom of
2006-03-23 00:02:11 +03:00
* RAM , the remainder of memory is at the top and the DMA memory
2007-05-11 23:40:30 +04:00
* can be marked as ZONE_DMA . Anything beyond that such as discontiguous
2005-04-17 02:20:36 +04:00
* DMA windows will require custom implementations that reserve memory
* areas at early bootup .
*
* Original version by Brad Parker ( brad @ heeltoe . com )
* Re - written by Christopher Hoover < ch @ murgatroid . com >
* Made generic by Deepak Saxena < dsaxena @ plexity . net >
*
* Copyright ( C ) 2002 Hewlett Packard Company .
* Copyright ( C ) 2004 MontaVista Software , Inc .
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/slab.h>
2008-09-09 23:54:13 +04:00
# include <linux/page-flags.h>
2005-04-17 02:20:36 +04:00
# include <linux/device.h>
2020-09-14 09:37:11 +03:00
# include <linux/dma-direct.h>
2005-04-17 02:20:36 +04:00
# include <linux/dmapool.h>
# include <linux/list.h>
2007-10-23 11:11:41 +04:00
# include <linux/scatterlist.h>
2005-04-17 02:20:36 +04:00
2005-06-20 19:56:08 +04:00
# include <asm/cacheflush.h>
2017-05-22 11:53:03 +03:00
# include <asm/dma-iommu.h>
2005-06-20 19:56:08 +04:00
2005-04-17 02:20:36 +04:00
# undef STATS
2005-10-31 00:12:08 +03:00
2005-04-17 02:20:36 +04:00
# ifdef STATS
# define DO_STATS(X) do { X ; } while (0)
# else
# define DO_STATS(X) do { } while (0)
# endif
/* ************************************************** */
struct safe_buffer {
struct list_head node ;
/* original request */
void * ptr ;
size_t size ;
int direction ;
/* safe buffer info */
2005-10-31 00:12:08 +03:00
struct dmabounce_pool * pool ;
2005-04-17 02:20:36 +04:00
void * safe ;
dma_addr_t safe_dma_addr ;
} ;
2005-10-31 00:12:08 +03:00
struct dmabounce_pool {
unsigned long size ;
struct dma_pool * pool ;
# ifdef STATS
unsigned long allocs ;
# endif
} ;
2005-04-17 02:20:36 +04:00
struct dmabounce_device_info {
struct device * dev ;
struct list_head safe_buffers ;
# ifdef STATS
unsigned long total_allocs ;
unsigned long map_op_count ;
unsigned long bounce_count ;
2007-02-12 13:53:50 +03:00
int attr_res ;
2005-04-17 02:20:36 +04:00
# endif
2005-10-31 00:12:08 +03:00
struct dmabounce_pool small ;
struct dmabounce_pool large ;
2006-06-23 01:27:14 +04:00
rwlock_t lock ;
2011-07-04 11:32:21 +04:00
int ( * needs_bounce ) ( struct device * , dma_addr_t , size_t ) ;
2005-04-17 02:20:36 +04:00
} ;
# ifdef STATS
2007-02-12 13:53:50 +03:00
static ssize_t dmabounce_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
2005-04-17 02:20:36 +04:00
{
2007-02-12 13:53:50 +03:00
struct dmabounce_device_info * device_info = dev - > archdata . dmabounce ;
return sprintf ( buf , " %lu %lu %lu %lu %lu %lu \n " ,
device_info - > small . allocs ,
device_info - > large . allocs ,
2005-10-31 00:12:08 +03:00
device_info - > total_allocs - device_info - > small . allocs -
device_info - > large . allocs ,
2007-02-12 13:53:50 +03:00
device_info - > total_allocs ,
device_info - > map_op_count ,
device_info - > bounce_count ) ;
2005-04-17 02:20:36 +04:00
}
2007-02-12 13:53:50 +03:00
static DEVICE_ATTR ( dmabounce_stats , 0400 , dmabounce_show , NULL ) ;
2005-04-17 02:20:36 +04:00
# endif
/* allocate a 'safe' buffer and keep track of it */
static inline struct safe_buffer *
alloc_safe_buffer ( struct dmabounce_device_info * device_info , void * ptr ,
2005-10-31 00:12:08 +03:00
size_t size , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
struct safe_buffer * buf ;
2005-10-31 00:12:08 +03:00
struct dmabounce_pool * pool ;
2005-04-17 02:20:36 +04:00
struct device * dev = device_info - > dev ;
2006-06-23 01:27:14 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
dev_dbg ( dev , " %s(ptr=%p, size=%d, dir=%d) \n " ,
__func__ , ptr , size , dir ) ;
2005-10-31 00:12:08 +03:00
if ( size < = device_info - > small . size ) {
pool = & device_info - > small ;
} else if ( size < = device_info - > large . size ) {
pool = & device_info - > large ;
} else {
pool = NULL ;
}
2005-04-17 02:20:36 +04:00
buf = kmalloc ( sizeof ( struct safe_buffer ) , GFP_ATOMIC ) ;
if ( buf = = NULL ) {
dev_warn ( dev , " %s: kmalloc failed \n " , __func__ ) ;
return NULL ;
}
2005-10-31 00:12:08 +03:00
buf - > ptr = ptr ;
buf - > size = size ;
buf - > direction = dir ;
buf - > pool = pool ;
2005-04-17 02:20:36 +04:00
2005-10-31 00:12:08 +03:00
if ( pool ) {
buf - > safe = dma_pool_alloc ( pool - > pool , GFP_ATOMIC ,
& buf - > safe_dma_addr ) ;
2005-04-17 02:20:36 +04:00
} else {
2005-10-31 00:12:08 +03:00
buf - > safe = dma_alloc_coherent ( dev , size , & buf - > safe_dma_addr ,
GFP_ATOMIC ) ;
2005-04-17 02:20:36 +04:00
}
2005-10-31 00:12:08 +03:00
if ( buf - > safe = = NULL ) {
dev_warn ( dev ,
" %s: could not alloc dma memory (size=%d) \n " ,
__func__ , size ) ;
2005-04-17 02:20:36 +04:00
kfree ( buf ) ;
return NULL ;
}
# ifdef STATS
2005-10-31 00:12:08 +03:00
if ( pool )
pool - > allocs + + ;
device_info - > total_allocs + + ;
2005-04-17 02:20:36 +04:00
# endif
2006-06-23 01:27:14 +04:00
write_lock_irqsave ( & device_info - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
list_add ( & buf - > node , & device_info - > safe_buffers ) ;
2006-06-23 01:27:14 +04:00
write_unlock_irqrestore ( & device_info - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
return buf ;
}
/* determine if a buffer is from our "safe" pool */
static inline struct safe_buffer *
find_safe_buffer ( struct dmabounce_device_info * device_info , dma_addr_t safe_dma_addr )
{
2006-08-18 18:32:14 +04:00
struct safe_buffer * b , * rb = NULL ;
2006-06-23 01:27:14 +04:00
unsigned long flags ;
read_lock_irqsave ( & device_info - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
2005-06-23 00:25:58 +04:00
list_for_each_entry ( b , & device_info - > safe_buffers , node )
ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops
This patch removes the need for the offset parameter in dma bounce
functions. This is required to let dma-mapping framework on ARM
architecture to use common, generic dma_map_ops based dma-mapping
helpers.
Background and more detailed explaination:
dma_*_range_* functions are available from the early days of the dma
mapping api. They are the correct way of doing a partial syncs on the
buffer (usually used by the network device drivers). This patch changes
only the internal implementation of the dma bounce functions to let
them tunnel through dma_map_ops structure. The driver api stays
unchanged, so driver are obliged to call dma_*_range_* functions to
keep code clean and easy to understand.
The only drawback from this patch is reduced detection of the dma api
abuse. Let us consider the following code:
dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE);
dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE);
Without the patch such code fails, because dma bounce code is unable
to find the bounce buffer for the given dma_address. After the patch
the above sync call will be equivalent to:
dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE);
which succeeds.
I don't consider this as a real problem, because DMA API abuse should be
caught by debug_dma_* function family. This patch lets us to simplify
the internal low-level implementation without chaning the driver visible
API.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
2012-02-10 22:55:20 +04:00
if ( b - > safe_dma_addr < = safe_dma_addr & &
b - > safe_dma_addr + b - > size > safe_dma_addr ) {
2006-08-18 18:32:14 +04:00
rb = b ;
2006-06-23 01:27:14 +04:00
break ;
2006-08-18 18:32:14 +04:00
}
2005-04-17 02:20:36 +04:00
2006-06-23 01:27:14 +04:00
read_unlock_irqrestore ( & device_info - > lock , flags ) ;
2006-08-18 18:32:14 +04:00
return rb ;
2005-04-17 02:20:36 +04:00
}
static inline void
free_safe_buffer ( struct dmabounce_device_info * device_info , struct safe_buffer * buf )
{
2006-06-23 01:27:14 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
dev_dbg ( device_info - > dev , " %s(buf=%p) \n " , __func__ , buf ) ;
2006-06-23 01:27:14 +04:00
write_lock_irqsave ( & device_info - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
list_del ( & buf - > node ) ;
2006-06-23 01:27:14 +04:00
write_unlock_irqrestore ( & device_info - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
if ( buf - > pool )
2005-10-31 00:12:08 +03:00
dma_pool_free ( buf - > pool - > pool , buf - > safe , buf - > safe_dma_addr ) ;
2005-04-17 02:20:36 +04:00
else
dma_free_coherent ( device_info - > dev , buf - > size , buf - > safe ,
buf - > safe_dma_addr ) ;
kfree ( buf ) ;
}
/* ************************************************** */
2008-09-26 01:16:22 +04:00
static struct safe_buffer * find_safe_buffer_dev ( struct device * dev ,
dma_addr_t dma_addr , const char * where )
{
if ( ! dev | | ! dev - > archdata . dmabounce )
return NULL ;
if ( dma_mapping_error ( dev , dma_addr ) ) {
2011-07-04 02:53:13 +04:00
dev_err ( dev , " Trying to %s invalid mapping \n " , where ) ;
2008-09-26 01:16:22 +04:00
return NULL ;
}
return find_safe_buffer ( dev - > archdata . dmabounce , dma_addr ) ;
}
2011-07-04 01:28:32 +04:00
static int needs_bounce ( struct device * dev , dma_addr_t dma_addr , size_t size )
2005-04-17 02:20:36 +04:00
{
2011-07-04 01:28:32 +04:00
if ( ! dev | | ! dev - > archdata . dmabounce )
return 0 ;
2005-04-17 02:20:36 +04:00
if ( dev - > dma_mask ) {
2011-07-04 01:28:32 +04:00
unsigned long limit , mask = * dev - > dma_mask ;
2005-04-17 02:20:36 +04:00
limit = ( mask + 1 ) & ~ mask ;
if ( limit & & size > limit ) {
dev_err ( dev , " DMA mapping too big (requested %#x "
" mask %#Lx) \n " , size , * dev - > dma_mask ) ;
2011-07-04 01:28:32 +04:00
return - E2BIG ;
2005-04-17 02:20:36 +04:00
}
2011-07-04 01:28:32 +04:00
/* Figure out if we need to bounce from the DMA mask. */
if ( ( dma_addr | ( dma_addr + size - 1 ) ) & ~ mask )
return 1 ;
2005-04-17 02:20:36 +04:00
}
2011-07-04 11:32:21 +04:00
return ! ! dev - > archdata . dmabounce - > needs_bounce ( dev , dma_addr , size ) ;
2011-07-04 01:28:32 +04:00
}
static inline dma_addr_t map_single ( struct device * dev , void * ptr , size_t size ,
2016-12-15 02:04:32 +03:00
enum dma_data_direction dir ,
unsigned long attrs )
2011-07-04 01:28:32 +04:00
{
struct dmabounce_device_info * device_info = dev - > archdata . dmabounce ;
2011-07-04 01:39:43 +04:00
struct safe_buffer * buf ;
2011-07-04 01:28:32 +04:00
if ( device_info )
DO_STATS ( device_info - > map_op_count + + ) ;
2011-07-04 01:39:43 +04:00
buf = alloc_safe_buffer ( device_info , ptr , size , dir ) ;
2011-07-04 02:54:34 +04:00
if ( buf = = NULL ) {
2011-07-04 01:39:43 +04:00
dev_err ( dev , " %s: unable to map unsafe buffer %p! \n " ,
__func__ , ptr ) ;
2018-11-21 20:57:36 +03:00
return DMA_MAPPING_ERROR ;
2011-07-04 01:39:43 +04:00
}
2011-07-04 01:28:32 +04:00
2011-07-04 01:39:43 +04:00
dev_dbg ( dev , " %s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x) \n " ,
__func__ , buf - > ptr , virt_to_dma ( dev , buf - > ptr ) ,
buf - > safe , buf - > safe_dma_addr ) ;
2005-04-17 02:20:36 +04:00
2016-12-15 02:04:32 +03:00
if ( ( dir = = DMA_TO_DEVICE | | dir = = DMA_BIDIRECTIONAL ) & &
! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) ) {
2011-07-04 01:39:43 +04:00
dev_dbg ( dev , " %s: copy unsafe %p to safe %p, size %d \n " ,
__func__ , ptr , buf - > safe , size ) ;
memcpy ( buf - > safe , ptr , size ) ;
2005-04-17 02:20:36 +04:00
}
2011-07-04 01:39:43 +04:00
return buf - > safe_dma_addr ;
2005-04-17 02:20:36 +04:00
}
2011-07-04 01:39:43 +04:00
static inline void unmap_single ( struct device * dev , struct safe_buffer * buf ,
2016-12-15 02:04:32 +03:00
size_t size , enum dma_data_direction dir ,
unsigned long attrs )
2005-04-17 02:20:36 +04:00
{
2011-07-04 01:39:43 +04:00
BUG_ON ( buf - > size ! = size ) ;
BUG_ON ( buf - > direction ! = dir ) ;
2005-04-17 02:20:36 +04:00
2011-07-04 01:39:43 +04:00
dev_dbg ( dev , " %s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x) \n " ,
__func__ , buf - > ptr , virt_to_dma ( dev , buf - > ptr ) ,
buf - > safe , buf - > safe_dma_addr ) ;
2005-04-17 02:20:36 +04:00
2011-07-04 01:39:43 +04:00
DO_STATS ( dev - > archdata . dmabounce - > bounce_count + + ) ;
2005-04-17 02:20:36 +04:00
2016-12-15 02:04:32 +03:00
if ( ( dir = = DMA_FROM_DEVICE | | dir = = DMA_BIDIRECTIONAL ) & &
! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) ) {
2011-07-04 01:39:43 +04:00
void * ptr = buf - > ptr ;
2005-06-20 15:31:14 +04:00
2011-07-04 01:39:43 +04:00
dev_dbg ( dev , " %s: copy back safe %p to unsafe %p size %d \n " ,
__func__ , buf - > safe , ptr , size ) ;
memcpy ( ptr , buf - > safe , size ) ;
2005-06-20 15:31:14 +04:00
2011-07-04 01:39:43 +04:00
/*
* Since we may have written to a page cache page ,
* we need to ensure that the data will be coherent
* with user mappings .
*/
__cpuc_flush_dcache_area ( ptr , size ) ;
2005-04-17 02:20:36 +04:00
}
2011-07-04 01:39:43 +04:00
free_safe_buffer ( dev - > archdata . dmabounce , buf ) ;
2005-04-17 02:20:36 +04:00
}
/* ************************************************** */
/*
* see if a buffer address is in an ' unsafe ' range . if it is
* allocate a ' safe ' buffer and copy the unsafe buffer into it .
* substitute the safe buffer for the unsafe one .
* ( basically move the buffer from an unsafe area to a safe one )
*/
2012-02-10 22:55:20 +04:00
static dma_addr_t dmabounce_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size , enum dma_data_direction dir ,
2016-08-03 23:46:00 +03:00
unsigned long attrs )
2008-09-25 23:59:12 +04:00
{
2011-07-04 01:39:43 +04:00
dma_addr_t dma_addr ;
int ret ;
2008-09-25 23:59:12 +04:00
dev_dbg ( dev , " %s(page=%p,off=%#lx,size=%zx,dir=%x) \n " ,
__func__ , page , offset , size , dir ) ;
2011-07-04 01:39:43 +04:00
dma_addr = pfn_to_dma ( dev , page_to_pfn ( page ) ) + offset ;
ret = needs_bounce ( dev , dma_addr , size ) ;
if ( ret < 0 )
2018-11-21 20:57:36 +03:00
return DMA_MAPPING_ERROR ;
2011-07-04 01:39:43 +04:00
if ( ret = = 0 ) {
2012-02-10 22:55:20 +04:00
arm_dma_ops . sync_single_for_device ( dev , dma_addr , size , dir ) ;
2011-07-04 01:39:43 +04:00
return dma_addr ;
}
2008-09-09 23:54:13 +04:00
if ( PageHighMem ( page ) ) {
2011-07-04 01:39:43 +04:00
dev_err ( dev , " DMA buffer bouncing of HIGHMEM pages is not supported \n " ) ;
2018-11-21 20:57:36 +03:00
return DMA_MAPPING_ERROR ;
2008-09-09 23:54:13 +04:00
}
2016-12-15 02:04:32 +03:00
return map_single ( dev , page_address ( page ) + offset , size , dir , attrs ) ;
2008-09-25 23:59:12 +04:00
}
2005-04-17 02:20:36 +04:00
/*
* see if a mapped address was really a " safe " buffer and if so , copy
* the data from the safe buffer back to the unsafe buffer and free up
* the safe buffer . ( basically return things back to the way they
* should be )
*/
2012-02-10 22:55:20 +04:00
static void dmabounce_unmap_page ( struct device * dev , dma_addr_t dma_addr , size_t size ,
2016-08-03 23:46:00 +03:00
enum dma_data_direction dir , unsigned long attrs )
2005-04-17 02:20:36 +04:00
{
2011-07-04 01:39:43 +04:00
struct safe_buffer * buf ;
2011-07-04 02:56:17 +04:00
dev_dbg ( dev , " %s(dma=%#x,size=%d,dir=%x) \n " ,
__func__ , dma_addr , size , dir ) ;
2005-04-17 02:20:36 +04:00
2011-07-04 01:39:43 +04:00
buf = find_safe_buffer_dev ( dev , dma_addr , __func__ ) ;
if ( ! buf ) {
2012-02-10 22:55:20 +04:00
arm_dma_ops . sync_single_for_cpu ( dev , dma_addr , size , dir ) ;
2011-07-04 01:39:43 +04:00
return ;
}
2016-12-15 02:04:32 +03:00
unmap_single ( dev , buf , size , dir , attrs ) ;
2005-04-17 02:20:36 +04:00
}
2012-02-10 22:55:20 +04:00
static int __dmabounce_sync_for_cpu ( struct device * dev , dma_addr_t addr ,
ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops
This patch removes the need for the offset parameter in dma bounce
functions. This is required to let dma-mapping framework on ARM
architecture to use common, generic dma_map_ops based dma-mapping
helpers.
Background and more detailed explaination:
dma_*_range_* functions are available from the early days of the dma
mapping api. They are the correct way of doing a partial syncs on the
buffer (usually used by the network device drivers). This patch changes
only the internal implementation of the dma bounce functions to let
them tunnel through dma_map_ops structure. The driver api stays
unchanged, so driver are obliged to call dma_*_range_* functions to
keep code clean and easy to understand.
The only drawback from this patch is reduced detection of the dma api
abuse. Let us consider the following code:
dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE);
dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE);
Without the patch such code fails, because dma bounce code is unable
to find the bounce buffer for the given dma_address. After the patch
the above sync call will be equivalent to:
dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE);
which succeeds.
I don't consider this as a real problem, because DMA API abuse should be
caught by debug_dma_* function family. This patch lets us to simplify
the internal low-level implementation without chaning the driver visible
API.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
2012-02-10 22:55:20 +04:00
size_t sz , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
2008-09-26 01:16:22 +04:00
struct safe_buffer * buf ;
ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops
This patch removes the need for the offset parameter in dma bounce
functions. This is required to let dma-mapping framework on ARM
architecture to use common, generic dma_map_ops based dma-mapping
helpers.
Background and more detailed explaination:
dma_*_range_* functions are available from the early days of the dma
mapping api. They are the correct way of doing a partial syncs on the
buffer (usually used by the network device drivers). This patch changes
only the internal implementation of the dma bounce functions to let
them tunnel through dma_map_ops structure. The driver api stays
unchanged, so driver are obliged to call dma_*_range_* functions to
keep code clean and easy to understand.
The only drawback from this patch is reduced detection of the dma api
abuse. Let us consider the following code:
dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE);
dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE);
Without the patch such code fails, because dma bounce code is unable
to find the bounce buffer for the given dma_address. After the patch
the above sync call will be equivalent to:
dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE);
which succeeds.
I don't consider this as a real problem, because DMA API abuse should be
caught by debug_dma_* function family. This patch lets us to simplify
the internal low-level implementation without chaning the driver visible
API.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
2012-02-10 22:55:20 +04:00
unsigned long off ;
2008-09-26 01:16:22 +04:00
2012-06-13 16:04:58 +04:00
dev_dbg ( dev , " %s(dma=%#x,sz=%zx,dir=%x) \n " ,
__func__ , addr , sz , dir ) ;
2008-09-26 01:16:22 +04:00
buf = find_safe_buffer_dev ( dev , addr , __func__ ) ;
if ( ! buf )
return 1 ;
ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops
This patch removes the need for the offset parameter in dma bounce
functions. This is required to let dma-mapping framework on ARM
architecture to use common, generic dma_map_ops based dma-mapping
helpers.
Background and more detailed explaination:
dma_*_range_* functions are available from the early days of the dma
mapping api. They are the correct way of doing a partial syncs on the
buffer (usually used by the network device drivers). This patch changes
only the internal implementation of the dma bounce functions to let
them tunnel through dma_map_ops structure. The driver api stays
unchanged, so driver are obliged to call dma_*_range_* functions to
keep code clean and easy to understand.
The only drawback from this patch is reduced detection of the dma api
abuse. Let us consider the following code:
dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE);
dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE);
Without the patch such code fails, because dma bounce code is unable
to find the bounce buffer for the given dma_address. After the patch
the above sync call will be equivalent to:
dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE);
which succeeds.
I don't consider this as a real problem, because DMA API abuse should be
caught by debug_dma_* function family. This patch lets us to simplify
the internal low-level implementation without chaning the driver visible
API.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
2012-02-10 22:55:20 +04:00
off = addr - buf - > safe_dma_addr ;
2008-09-29 16:48:17 +04:00
BUG_ON ( buf - > direction ! = dir ) ;
2012-06-13 16:04:58 +04:00
dev_dbg ( dev , " %s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x) \n " ,
__func__ , buf - > ptr , virt_to_dma ( dev , buf - > ptr ) , off ,
2008-09-26 01:16:22 +04:00
buf - > safe , buf - > safe_dma_addr ) ;
DO_STATS ( dev - > archdata . dmabounce - > bounce_count + + ) ;
if ( dir = = DMA_FROM_DEVICE | | dir = = DMA_BIDIRECTIONAL ) {
dev_dbg ( dev , " %s: copy back safe %p to unsafe %p size %d \n " ,
__func__ , buf - > safe + off , buf - > ptr + off , sz ) ;
memcpy ( buf - > ptr + off , buf - > safe + off , sz ) ;
}
return 0 ;
2005-04-17 02:20:36 +04:00
}
2012-02-10 22:55:20 +04:00
static void dmabounce_sync_for_cpu ( struct device * dev ,
dma_addr_t handle , size_t size , enum dma_data_direction dir )
{
if ( ! __dmabounce_sync_for_cpu ( dev , handle , size , dir ) )
return ;
arm_dma_ops . sync_single_for_cpu ( dev , handle , size , dir ) ;
}
static int __dmabounce_sync_for_device ( struct device * dev , dma_addr_t addr ,
ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops
This patch removes the need for the offset parameter in dma bounce
functions. This is required to let dma-mapping framework on ARM
architecture to use common, generic dma_map_ops based dma-mapping
helpers.
Background and more detailed explaination:
dma_*_range_* functions are available from the early days of the dma
mapping api. They are the correct way of doing a partial syncs on the
buffer (usually used by the network device drivers). This patch changes
only the internal implementation of the dma bounce functions to let
them tunnel through dma_map_ops structure. The driver api stays
unchanged, so driver are obliged to call dma_*_range_* functions to
keep code clean and easy to understand.
The only drawback from this patch is reduced detection of the dma api
abuse. Let us consider the following code:
dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE);
dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE);
Without the patch such code fails, because dma bounce code is unable
to find the bounce buffer for the given dma_address. After the patch
the above sync call will be equivalent to:
dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE);
which succeeds.
I don't consider this as a real problem, because DMA API abuse should be
caught by debug_dma_* function family. This patch lets us to simplify
the internal low-level implementation without chaning the driver visible
API.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
2012-02-10 22:55:20 +04:00
size_t sz , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
2008-09-26 01:16:22 +04:00
struct safe_buffer * buf ;
ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops
This patch removes the need for the offset parameter in dma bounce
functions. This is required to let dma-mapping framework on ARM
architecture to use common, generic dma_map_ops based dma-mapping
helpers.
Background and more detailed explaination:
dma_*_range_* functions are available from the early days of the dma
mapping api. They are the correct way of doing a partial syncs on the
buffer (usually used by the network device drivers). This patch changes
only the internal implementation of the dma bounce functions to let
them tunnel through dma_map_ops structure. The driver api stays
unchanged, so driver are obliged to call dma_*_range_* functions to
keep code clean and easy to understand.
The only drawback from this patch is reduced detection of the dma api
abuse. Let us consider the following code:
dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE);
dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE);
Without the patch such code fails, because dma bounce code is unable
to find the bounce buffer for the given dma_address. After the patch
the above sync call will be equivalent to:
dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE);
which succeeds.
I don't consider this as a real problem, because DMA API abuse should be
caught by debug_dma_* function family. This patch lets us to simplify
the internal low-level implementation without chaning the driver visible
API.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
2012-02-10 22:55:20 +04:00
unsigned long off ;
2008-09-26 01:16:22 +04:00
2012-06-13 16:04:58 +04:00
dev_dbg ( dev , " %s(dma=%#x,sz=%zx,dir=%x) \n " ,
__func__ , addr , sz , dir ) ;
2008-09-26 01:16:22 +04:00
buf = find_safe_buffer_dev ( dev , addr , __func__ ) ;
if ( ! buf )
return 1 ;
ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops
This patch removes the need for the offset parameter in dma bounce
functions. This is required to let dma-mapping framework on ARM
architecture to use common, generic dma_map_ops based dma-mapping
helpers.
Background and more detailed explaination:
dma_*_range_* functions are available from the early days of the dma
mapping api. They are the correct way of doing a partial syncs on the
buffer (usually used by the network device drivers). This patch changes
only the internal implementation of the dma bounce functions to let
them tunnel through dma_map_ops structure. The driver api stays
unchanged, so driver are obliged to call dma_*_range_* functions to
keep code clean and easy to understand.
The only drawback from this patch is reduced detection of the dma api
abuse. Let us consider the following code:
dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE);
dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE);
Without the patch such code fails, because dma bounce code is unable
to find the bounce buffer for the given dma_address. After the patch
the above sync call will be equivalent to:
dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE);
which succeeds.
I don't consider this as a real problem, because DMA API abuse should be
caught by debug_dma_* function family. This patch lets us to simplify
the internal low-level implementation without chaning the driver visible
API.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
2012-02-10 22:55:20 +04:00
off = addr - buf - > safe_dma_addr ;
2008-09-29 16:48:17 +04:00
BUG_ON ( buf - > direction ! = dir ) ;
2012-06-13 16:04:58 +04:00
dev_dbg ( dev , " %s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x) \n " ,
__func__ , buf - > ptr , virt_to_dma ( dev , buf - > ptr ) , off ,
2008-09-26 01:16:22 +04:00
buf - > safe , buf - > safe_dma_addr ) ;
DO_STATS ( dev - > archdata . dmabounce - > bounce_count + + ) ;
if ( dir = = DMA_TO_DEVICE | | dir = = DMA_BIDIRECTIONAL ) {
dev_dbg ( dev , " %s: copy out unsafe %p to safe %p, size %d \n " ,
__func__ , buf - > ptr + off , buf - > safe + off , sz ) ;
memcpy ( buf - > safe + off , buf - > ptr + off , sz ) ;
}
return 0 ;
2005-04-17 02:20:36 +04:00
}
2012-02-10 22:55:20 +04:00
static void dmabounce_sync_for_device ( struct device * dev ,
dma_addr_t handle , size_t size , enum dma_data_direction dir )
{
if ( ! __dmabounce_sync_for_device ( dev , handle , size , dir ) )
return ;
arm_dma_ops . sync_single_for_device ( dev , handle , size , dir ) ;
}
2017-05-22 12:53:36 +03:00
static int dmabounce_dma_supported ( struct device * dev , u64 dma_mask )
2012-02-10 22:55:20 +04:00
{
if ( dev - > archdata . dmabounce )
return 0 ;
2017-05-22 12:53:36 +03:00
return arm_dma_ops . dma_supported ( dev , dma_mask ) ;
2012-02-10 22:55:20 +04:00
}
2017-01-21 00:04:01 +03:00
static const struct dma_map_ops dmabounce_ops = {
2012-05-16 20:31:23 +04:00
. alloc = arm_dma_alloc ,
. free = arm_dma_free ,
. mmap = arm_dma_mmap ,
2012-06-13 12:01:15 +04:00
. get_sgtable = arm_dma_get_sgtable ,
2012-02-10 22:55:20 +04:00
. map_page = dmabounce_map_page ,
. unmap_page = dmabounce_unmap_page ,
. sync_single_for_cpu = dmabounce_sync_for_cpu ,
. sync_single_for_device = dmabounce_sync_for_device ,
. map_sg = arm_dma_map_sg ,
. unmap_sg = arm_dma_unmap_sg ,
. sync_sg_for_cpu = arm_dma_sync_sg_for_cpu ,
. sync_sg_for_device = arm_dma_sync_sg_for_device ,
2017-05-22 12:53:36 +03:00
. dma_supported = dmabounce_dma_supported ,
2012-02-10 22:55:20 +04:00
} ;
2005-04-17 02:20:36 +04:00
2008-09-26 01:23:31 +04:00
static int dmabounce_init_pool ( struct dmabounce_pool * pool , struct device * dev ,
const char * name , unsigned long size )
2005-10-31 00:12:08 +03:00
{
pool - > size = size ;
DO_STATS ( pool - > allocs = 0 ) ;
pool - > pool = dma_pool_create ( name , dev , size ,
0 /* byte alignment */ ,
0 /* no page-crossing issues */ ) ;
return pool - > pool ? 0 : - ENOMEM ;
}
2008-09-26 01:23:31 +04:00
int dmabounce_register_dev ( struct device * dev , unsigned long small_buffer_size ,
2011-07-04 11:32:21 +04:00
unsigned long large_buffer_size ,
int ( * needs_bounce_fn ) ( struct device * , dma_addr_t , size_t ) )
2005-04-17 02:20:36 +04:00
{
struct dmabounce_device_info * device_info ;
2005-10-31 00:12:08 +03:00
int ret ;
2005-04-17 02:20:36 +04:00
device_info = kmalloc ( sizeof ( struct dmabounce_device_info ) , GFP_ATOMIC ) ;
if ( ! device_info ) {
2008-05-02 08:02:41 +04:00
dev_err ( dev ,
" Could not allocated dmabounce_device_info \n " ) ;
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
}
2005-10-31 00:12:08 +03:00
ret = dmabounce_init_pool ( & device_info - > small , dev ,
" small_dmabounce_pool " , small_buffer_size ) ;
if ( ret ) {
dev_err ( dev ,
" dmabounce: could not allocate DMA pool for %ld byte objects \n " ,
small_buffer_size ) ;
goto err_free ;
2005-04-17 02:20:36 +04:00
}
if ( large_buffer_size ) {
2005-10-31 00:12:08 +03:00
ret = dmabounce_init_pool ( & device_info - > large , dev ,
" large_dmabounce_pool " ,
large_buffer_size ) ;
if ( ret ) {
dev_err ( dev ,
" dmabounce: could not allocate DMA pool for %ld byte objects \n " ,
large_buffer_size ) ;
goto err_destroy ;
2005-04-17 02:20:36 +04:00
}
}
device_info - > dev = dev ;
INIT_LIST_HEAD ( & device_info - > safe_buffers ) ;
2006-06-23 01:27:14 +04:00
rwlock_init ( & device_info - > lock ) ;
2011-07-04 11:32:21 +04:00
device_info - > needs_bounce = needs_bounce_fn ;
2005-04-17 02:20:36 +04:00
# ifdef STATS
device_info - > total_allocs = 0 ;
device_info - > map_op_count = 0 ;
device_info - > bounce_count = 0 ;
2007-02-12 13:53:50 +03:00
device_info - > attr_res = device_create_file ( dev , & dev_attr_dmabounce_stats ) ;
2005-04-17 02:20:36 +04:00
# endif
2007-02-12 13:28:24 +03:00
dev - > archdata . dmabounce = device_info ;
2012-02-10 22:55:20 +04:00
set_dma_ops ( dev , & dmabounce_ops ) ;
2005-04-17 02:20:36 +04:00
2008-05-02 08:02:41 +04:00
dev_info ( dev , " dmabounce: registered device \n " ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
2005-10-31 00:12:08 +03:00
err_destroy :
dma_pool_destroy ( device_info - > small . pool ) ;
err_free :
kfree ( device_info ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
2008-09-26 01:23:31 +04:00
EXPORT_SYMBOL ( dmabounce_register_dev ) ;
2005-04-17 02:20:36 +04:00
2008-09-26 01:23:31 +04:00
void dmabounce_unregister_dev ( struct device * dev )
2005-04-17 02:20:36 +04:00
{
2007-02-12 13:28:24 +03:00
struct dmabounce_device_info * device_info = dev - > archdata . dmabounce ;
dev - > archdata . dmabounce = NULL ;
2012-02-10 22:55:20 +04:00
set_dma_ops ( dev , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( ! device_info ) {
2008-05-02 08:02:41 +04:00
dev_warn ( dev ,
" Never registered with dmabounce but attempting "
" to unregister! \n " ) ;
2005-04-17 02:20:36 +04:00
return ;
}
if ( ! list_empty ( & device_info - > safe_buffers ) ) {
2008-05-02 08:02:41 +04:00
dev_err ( dev ,
" Removing from dmabounce with pending buffers! \n " ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
}
2005-10-31 00:12:08 +03:00
if ( device_info - > small . pool )
dma_pool_destroy ( device_info - > small . pool ) ;
if ( device_info - > large . pool )
dma_pool_destroy ( device_info - > large . pool ) ;
2005-04-17 02:20:36 +04:00
# ifdef STATS
2007-02-12 13:53:50 +03:00
if ( device_info - > attr_res = = 0 )
device_remove_file ( dev , & dev_attr_dmabounce_stats ) ;
2005-04-17 02:20:36 +04:00
# endif
kfree ( device_info ) ;
2008-05-02 08:02:41 +04:00
dev_info ( dev , " dmabounce: device unregistered \n " ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( dmabounce_unregister_dev ) ;
MODULE_AUTHOR ( " Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net> " ) ;
MODULE_DESCRIPTION ( " Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows " ) ;
MODULE_LICENSE ( " GPL " ) ;