2013-01-18 15:12:20 +05:30
/*
* Copyright ( C ) 2004 , 2007 - 2010 , 2011 - 2012 Synopsys , Inc . ( www . synopsys . com )
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
/*
* DMA Coherent API Notes
*
* I / O is inherently non - coherent on ARC . So a coherent DMA buffer is
2016-05-21 13:45:35 +02:00
* implemented by accessing it using a kernel virtual address , with
2013-01-18 15:12:20 +05:30
* Cache bit off in the TLB entry .
*
* The default DMA address = = Phy address which is 0x8000 _0000 based .
*/
# include <linux/dma-mapping.h>
2015-05-25 19:54:28 +03:00
# include <asm/cache.h>
2013-01-18 15:12:20 +05:30
# include <asm/cacheflush.h>
2016-01-20 15:01:26 -08:00
static void * arc_dma_alloc ( struct device * dev , size_t size ,
2016-08-03 13:46:00 -07:00
dma_addr_t * dma_handle , gfp_t gfp , unsigned long attrs )
2013-01-18 15:12:20 +05:30
{
2016-03-14 15:03:59 +05:30
unsigned long order = get_order ( size ) ;
struct page * page ;
phys_addr_t paddr ;
void * kvaddr ;
2016-03-14 15:34:36 +05:30
int need_coh = 1 , need_kvaddr = 0 ;
2013-01-18 15:12:20 +05:30
2016-03-14 15:03:59 +05:30
page = alloc_pages ( gfp , order ) ;
if ( ! page )
2013-01-18 15:12:20 +05:30
return NULL ;
2015-05-25 19:54:28 +03:00
/*
* IOC relies on all data ( even coherent DMA data ) being in cache
* Thus allocate normal cached memory
*
* The gains with IOC are two pronged :
2016-03-14 15:34:36 +05:30
* - For streaming data , elides need for cache maintenance , saving
2015-05-25 19:54:28 +03:00
* cycles in flush code , and bus bandwidth as all the lines of a
* buffer need to be flushed out to memory
* - For coherent data , Read / Write to buffers terminate early in cache
* ( vs . always going to memory - thus are faster )
*/
2016-10-13 15:58:59 -07:00
if ( ( is_isa_arcv2 ( ) & & ioc_enable ) | |
2016-08-03 13:46:00 -07:00
( attrs & DMA_ATTR_NON_CONSISTENT ) )
2016-03-14 15:34:36 +05:30
need_coh = 0 ;
/*
* - A coherent buffer needs MMU mapping to enforce non - cachability
* - A highmem page needs a virtual handle ( hence MMU mapping )
* independent of cachability
*/
if ( PageHighMem ( page ) | | need_coh )
need_kvaddr = 1 ;
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys ( page ) ;
2016-03-16 16:38:57 +05:30
* dma_handle = plat_phys_to_dma ( dev , paddr ) ;
2013-01-18 15:12:20 +05:30
/* This is kernel Virtual address (0x7000_0000 based) */
2016-03-14 15:34:36 +05:30
if ( need_kvaddr ) {
2016-03-16 15:04:39 +05:30
kvaddr = ioremap_nocache ( paddr , size ) ;
2016-03-14 15:34:36 +05:30
if ( kvaddr = = NULL ) {
__free_pages ( page , order ) ;
return NULL ;
}
} else {
2016-03-16 15:04:39 +05:30
kvaddr = ( void * ) ( u32 ) paddr ;
2016-03-14 15:03:59 +05:30
}
2013-01-18 15:12:20 +05:30
2015-04-03 12:37:07 +03:00
/*
* Evict any existing L1 and / or L2 lines for the backing page
* in case it was used earlier as a normal " cached " page .
* Yeah this bit us - STAR 9000898266
*
* Although core does call flush_cache_vmap ( ) , it gets kvaddr hence
* can ' t be used to efficiently flush L1 and / or L2 which need paddr
* Currently flush_cache_vmap nukes the L1 cache completely which
* will be optimized as a separate commit
*/
2016-03-14 15:34:36 +05:30
if ( need_coh )
2016-03-16 15:04:39 +05:30
dma_cache_wback_inv ( paddr , size ) ;
2015-04-03 12:37:07 +03:00
2013-01-18 15:12:20 +05:30
return kvaddr ;
}
2016-01-20 15:01:26 -08:00
static void arc_dma_free ( struct device * dev , size_t size , void * vaddr ,
2016-08-03 13:46:00 -07:00
dma_addr_t dma_handle , unsigned long attrs )
2013-01-18 15:12:20 +05:30
{
2016-07-03 10:07:48 +03:00
phys_addr_t paddr = plat_dma_to_phys ( dev , dma_handle ) ;
struct page * page = virt_to_page ( paddr ) ;
2016-03-14 15:34:36 +05:30
int is_non_coh = 1 ;
2016-08-03 13:46:00 -07:00
is_non_coh = ( attrs & DMA_ATTR_NON_CONSISTENT ) | |
2016-10-13 15:58:59 -07:00
( is_isa_arcv2 ( ) & & ioc_enable ) ;
2016-03-14 15:03:59 +05:30
2016-03-14 15:34:36 +05:30
if ( PageHighMem ( page ) | | ! is_non_coh )
2016-01-20 15:01:26 -08:00
iounmap ( ( void __force __iomem * ) vaddr ) ;
2013-01-18 15:12:20 +05:30
2016-03-14 15:03:59 +05:30
__free_pages ( page , get_order ( size ) ) ;
2013-01-18 15:12:20 +05:30
}
2016-11-03 18:06:13 +03:00
static int arc_dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
unsigned long attrs )
{
unsigned long user_count = vma_pages ( vma ) ;
unsigned long count = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
unsigned long pfn = __phys_to_pfn ( plat_dma_to_phys ( dev , dma_addr ) ) ;
unsigned long off = vma - > vm_pgoff ;
int ret = - ENXIO ;
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
2017-07-20 11:19:58 +01:00
if ( dma_mmap_from_dev_coherent ( dev , vma , cpu_addr , size , & ret ) )
2016-11-03 18:06:13 +03:00
return ret ;
if ( off < count & & user_count < = ( count - off ) ) {
ret = remap_pfn_range ( vma , vma - > vm_start ,
pfn + off ,
user_count < < PAGE_SHIFT ,
vma - > vm_page_prot ) ;
}
return ret ;
}
2013-01-18 15:12:20 +05:30
/*
2016-01-20 15:01:26 -08:00
* streaming DMA Mapping API . . .
* CPU accesses page via normal paddr , thus needs to explicitly made
* consistent before each use
2013-01-18 15:12:20 +05:30
*/
2016-03-16 15:04:39 +05:30
static void _dma_cache_sync ( phys_addr_t paddr , size_t size ,
2016-01-20 15:01:26 -08:00
enum dma_data_direction dir )
{
switch ( dir ) {
case DMA_FROM_DEVICE :
dma_cache_inv ( paddr , size ) ;
break ;
case DMA_TO_DEVICE :
dma_cache_wback ( paddr , size ) ;
break ;
case DMA_BIDIRECTIONAL :
dma_cache_wback_inv ( paddr , size ) ;
break ;
default :
2016-03-16 15:04:39 +05:30
pr_err ( " Invalid DMA dir [%d] for OP @ %pa[p] \n " , dir , & paddr ) ;
2016-01-20 15:01:26 -08:00
}
}
static dma_addr_t arc_dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size , enum dma_data_direction dir ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2016-01-20 15:01:26 -08:00
{
2016-03-16 15:04:39 +05:30
phys_addr_t paddr = page_to_phys ( page ) + offset ;
2016-12-14 15:04:29 -08:00
if ( ! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) )
_dma_cache_sync ( paddr , size , dir ) ;
2016-03-16 16:38:57 +05:30
return plat_phys_to_dma ( dev , paddr ) ;
2016-01-20 15:01:26 -08:00
}
static int arc_dma_map_sg ( struct device * dev , struct scatterlist * sg ,
2016-08-03 13:46:00 -07:00
int nents , enum dma_data_direction dir , unsigned long attrs )
2016-01-20 15:01:26 -08:00
{
struct scatterlist * s ;
int i ;
for_each_sg ( sg , s , nents , i )
s - > dma_address = dma_map_page ( dev , sg_page ( s ) , s - > offset ,
s - > length , dir ) ;
return nents ;
}
static void arc_dma_sync_single_for_cpu ( struct device * dev ,
dma_addr_t dma_handle , size_t size , enum dma_data_direction dir )
{
2016-03-16 16:38:57 +05:30
_dma_cache_sync ( plat_dma_to_phys ( dev , dma_handle ) , size , DMA_FROM_DEVICE ) ;
2016-01-20 15:01:26 -08:00
}
static void arc_dma_sync_single_for_device ( struct device * dev ,
dma_addr_t dma_handle , size_t size , enum dma_data_direction dir )
2013-01-18 15:12:20 +05:30
{
2016-03-16 16:38:57 +05:30
_dma_cache_sync ( plat_dma_to_phys ( dev , dma_handle ) , size , DMA_TO_DEVICE ) ;
2013-01-18 15:12:20 +05:30
}
2016-01-20 15:01:26 -08:00
static void arc_dma_sync_sg_for_cpu ( struct device * dev ,
struct scatterlist * sglist , int nelems ,
enum dma_data_direction dir )
{
int i ;
struct scatterlist * sg ;
for_each_sg ( sglist , sg , nelems , i )
2016-03-16 14:51:33 +05:30
_dma_cache_sync ( sg_phys ( sg ) , sg - > length , dir ) ;
2016-01-20 15:01:26 -08:00
}
static void arc_dma_sync_sg_for_device ( struct device * dev ,
struct scatterlist * sglist , int nelems ,
enum dma_data_direction dir )
{
int i ;
struct scatterlist * sg ;
for_each_sg ( sglist , sg , nelems , i )
2016-03-16 14:51:33 +05:30
_dma_cache_sync ( sg_phys ( sg ) , sg - > length , dir ) ;
2016-01-20 15:01:26 -08:00
}
static int arc_dma_supported ( struct device * dev , u64 dma_mask )
{
/* Support 32 bit DMA mask exclusively */
return dma_mask = = DMA_BIT_MASK ( 32 ) ;
}
2017-01-20 13:04:01 -08:00
const struct dma_map_ops arc_dma_ops = {
2016-01-20 15:01:26 -08:00
. alloc = arc_dma_alloc ,
. free = arc_dma_free ,
2016-11-03 18:06:13 +03:00
. mmap = arc_dma_mmap ,
2016-01-20 15:01:26 -08:00
. map_page = arc_dma_map_page ,
. map_sg = arc_dma_map_sg ,
. sync_single_for_device = arc_dma_sync_single_for_device ,
. sync_single_for_cpu = arc_dma_sync_single_for_cpu ,
. sync_sg_for_cpu = arc_dma_sync_sg_for_cpu ,
. sync_sg_for_device = arc_dma_sync_sg_for_device ,
. dma_supported = arc_dma_supported ,
} ;
EXPORT_SYMBOL ( arc_dma_ops ) ;