2013-01-18 13:42:20 +04:00
/*
* Copyright ( C ) 2004 , 2007 - 2010 , 2011 - 2012 Synopsys , Inc . ( www . synopsys . com )
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
/*
* DMA Coherent API Notes
*
* I / O is inherently non - coherent on ARC . So a coherent DMA buffer is
2016-05-21 14:45:35 +03:00
* implemented by accessing it using a kernel virtual address , with
2013-01-18 13:42:20 +04:00
* Cache bit off in the TLB entry .
*
* The default DMA address = = Phy address which is 0x8000 _0000 based .
*/
2018-05-18 16:41:32 +03:00
# include <linux/dma-noncoherent.h>
2015-05-25 19:54:28 +03:00
# include <asm/cache.h>
2013-01-18 13:42:20 +04:00
# include <asm/cacheflush.h>
2018-05-18 16:41:32 +03:00
void * arch_dma_alloc ( struct device * dev , size_t size , dma_addr_t * dma_handle ,
gfp_t gfp , unsigned long attrs )
2013-01-18 13:42:20 +04:00
{
2016-03-14 12:33:59 +03:00
unsigned long order = get_order ( size ) ;
struct page * page ;
phys_addr_t paddr ;
void * kvaddr ;
2016-03-14 13:04:36 +03:00
int need_coh = 1 , need_kvaddr = 0 ;
2013-01-18 13:42:20 +04:00
2016-03-14 12:33:59 +03:00
page = alloc_pages ( gfp , order ) ;
if ( ! page )
2013-01-18 13:42:20 +04:00
return NULL ;
2015-05-25 19:54:28 +03:00
/*
* IOC relies on all data ( even coherent DMA data ) being in cache
* Thus allocate normal cached memory
*
* The gains with IOC are two pronged :
2016-03-14 13:04:36 +03:00
* - For streaming data , elides need for cache maintenance , saving
2015-05-25 19:54:28 +03:00
* cycles in flush code , and bus bandwidth as all the lines of a
* buffer need to be flushed out to memory
* - For coherent data , Read / Write to buffers terminate early in cache
* ( vs . always going to memory - thus are faster )
*/
2016-10-14 01:58:59 +03:00
if ( ( is_isa_arcv2 ( ) & & ioc_enable ) | |
2016-08-03 23:46:00 +03:00
( attrs & DMA_ATTR_NON_CONSISTENT ) )
2016-03-14 13:04:36 +03:00
need_coh = 0 ;
/*
* - A coherent buffer needs MMU mapping to enforce non - cachability
* - A highmem page needs a virtual handle ( hence MMU mapping )
* independent of cachability
*/
if ( PageHighMem ( page ) | | need_coh )
need_kvaddr = 1 ;
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys ( page ) ;
2017-12-20 13:57:40 +03:00
* dma_handle = paddr ;
2013-01-18 13:42:20 +04:00
/* This is kernel Virtual address (0x7000_0000 based) */
2016-03-14 13:04:36 +03:00
if ( need_kvaddr ) {
2016-03-16 12:34:39 +03:00
kvaddr = ioremap_nocache ( paddr , size ) ;
2016-03-14 13:04:36 +03:00
if ( kvaddr = = NULL ) {
__free_pages ( page , order ) ;
return NULL ;
}
} else {
2016-03-16 12:34:39 +03:00
kvaddr = ( void * ) ( u32 ) paddr ;
2016-03-14 12:33:59 +03:00
}
2013-01-18 13:42:20 +04:00
2015-04-03 12:37:07 +03:00
/*
* Evict any existing L1 and / or L2 lines for the backing page
* in case it was used earlier as a normal " cached " page .
* Yeah this bit us - STAR 9000898266
*
* Although core does call flush_cache_vmap ( ) , it gets kvaddr hence
* can ' t be used to efficiently flush L1 and / or L2 which need paddr
* Currently flush_cache_vmap nukes the L1 cache completely which
* will be optimized as a separate commit
*/
2016-03-14 13:04:36 +03:00
if ( need_coh )
2016-03-16 12:34:39 +03:00
dma_cache_wback_inv ( paddr , size ) ;
2015-04-03 12:37:07 +03:00
2013-01-18 13:42:20 +04:00
return kvaddr ;
}
2018-05-18 16:41:32 +03:00
void arch_dma_free ( struct device * dev , size_t size , void * vaddr ,
2016-08-03 23:46:00 +03:00
dma_addr_t dma_handle , unsigned long attrs )
2013-01-18 13:42:20 +04:00
{
2017-12-20 13:57:40 +03:00
phys_addr_t paddr = dma_handle ;
2016-07-03 10:07:48 +03:00
struct page * page = virt_to_page ( paddr ) ;
2016-03-14 13:04:36 +03:00
int is_non_coh = 1 ;
2016-08-03 23:46:00 +03:00
is_non_coh = ( attrs & DMA_ATTR_NON_CONSISTENT ) | |
2016-10-14 01:58:59 +03:00
( is_isa_arcv2 ( ) & & ioc_enable ) ;
2016-03-14 12:33:59 +03:00
2016-03-14 13:04:36 +03:00
if ( PageHighMem ( page ) | | ! is_non_coh )
2016-01-21 02:01:26 +03:00
iounmap ( ( void __force __iomem * ) vaddr ) ;
2013-01-18 13:42:20 +04:00
2016-03-14 12:33:59 +03:00
__free_pages ( page , get_order ( size ) ) ;
2013-01-18 13:42:20 +04:00
}
2018-05-18 16:41:32 +03:00
int arch_dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
unsigned long attrs )
2016-11-03 18:06:13 +03:00
{
unsigned long user_count = vma_pages ( vma ) ;
unsigned long count = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
2017-12-20 13:57:40 +03:00
unsigned long pfn = __phys_to_pfn ( dma_addr ) ;
2016-11-03 18:06:13 +03:00
unsigned long off = vma - > vm_pgoff ;
int ret = - ENXIO ;
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
2017-07-20 13:19:58 +03:00
if ( dma_mmap_from_dev_coherent ( dev , vma , cpu_addr , size , & ret ) )
2016-11-03 18:06:13 +03:00
return ret ;
if ( off < count & & user_count < = ( count - off ) ) {
ret = remap_pfn_range ( vma , vma - > vm_start ,
pfn + off ,
user_count < < PAGE_SHIFT ,
vma - > vm_page_prot ) ;
}
return ret ;
}
2018-05-18 16:41:32 +03:00
void arch_sync_dma_for_device ( struct device * dev , phys_addr_t paddr ,
size_t size , enum dma_data_direction dir )
2016-01-21 02:01:26 +03:00
{
2018-05-18 16:41:32 +03:00
dma_cache_wback ( paddr , size ) ;
2016-01-21 02:01:26 +03:00
}
2018-05-18 16:41:32 +03:00
void arch_sync_dma_for_cpu ( struct device * dev , phys_addr_t paddr ,
size_t size , enum dma_data_direction dir )
2017-07-18 22:14:09 +03:00
{
2018-05-18 16:41:32 +03:00
dma_cache_inv ( paddr , size ) ;
2017-07-18 22:14:09 +03:00
}