2012-05-02 17:06:22 +10:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file COPYING in the main directory of this archive
* for more details .
*/
# undef DEBUG
2018-06-20 10:19:45 +02:00
# include <linux/dma-noncoherent.h>
2012-05-02 17:06:22 +10:00
# include <linux/device.h>
# include <linux/kernel.h>
2018-05-17 20:07:13 +10:00
# include <linux/platform_device.h>
2012-05-02 17:06:22 +10:00
# include <linux/scatterlist.h>
# include <linux/slab.h>
# include <linux/vmalloc.h>
# include <linux/export.h>
# include <asm/pgalloc.h>
2012-06-26 21:02:54 +10:00
# if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
2019-06-25 11:01:35 +02:00
void arch_dma_prep_coherent ( struct page * page , size_t size )
{
cache_push ( page_to_phys ( page ) , size ) ;
}
2019-08-26 09:03:44 +02:00
pgprot_t pgprot_dmacoherent ( pgprot_t prot )
2012-05-02 17:06:22 +10:00
{
2019-06-25 11:01:34 +02:00
if ( CPU_IS_040_OR_060 ) {
pgprot_val ( prot ) & = ~ _PAGE_CACHE040 ;
pgprot_val ( prot ) | = _PAGE_GLOBAL040 | _PAGE_NOCACHE_S ;
} else {
pgprot_val ( prot ) | = _PAGE_NOCACHE030 ;
2012-05-02 17:06:22 +10:00
}
2019-06-25 11:01:34 +02:00
return prot ;
2012-05-02 17:06:22 +10:00
}
2011-03-22 13:39:27 +10:00
# else
2012-05-02 17:06:22 +10:00
# include <asm/cacheflush.h>
2018-06-20 10:19:45 +02:00
void * arch_dma_alloc ( struct device * dev , size_t size , dma_addr_t * dma_handle ,
gfp_t gfp , unsigned long attrs )
2012-05-02 17:06:22 +10:00
{
void * ret ;
if ( dev = = NULL | | ( * dev - > dma_mask < 0xffffffff ) )
gfp | = GFP_DMA ;
ret = ( void * ) __get_free_pages ( gfp , get_order ( size ) ) ;
if ( ret ! = NULL ) {
memset ( ret , 0 , size ) ;
* dma_handle = virt_to_phys ( ret ) ;
}
return ret ;
}
2018-06-20 10:19:45 +02:00
void arch_dma_free ( struct device * dev , size_t size , void * vaddr ,
2016-08-03 13:46:00 -07:00
dma_addr_t dma_handle , unsigned long attrs )
2012-05-02 17:06:22 +10:00
{
free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
}
2012-06-26 21:02:54 +10:00
# endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
2012-05-02 17:06:22 +10:00
2018-06-20 10:19:45 +02:00
void arch_sync_dma_for_device ( struct device * dev , phys_addr_t handle ,
size_t size , enum dma_data_direction dir )
2012-05-02 17:06:22 +10:00
{
switch ( dir ) {
2012-07-10 13:50:58 +10:00
case DMA_BIDIRECTIONAL :
2012-05-02 17:06:22 +10:00
case DMA_TO_DEVICE :
cache_push ( handle , size ) ;
break ;
case DMA_FROM_DEVICE :
cache_clear ( handle , size ) ;
break ;
default :
2016-12-06 19:57:37 +01:00
pr_err_ratelimited ( " dma_sync_single_for_device: unsupported dir %u \n " ,
dir ) ;
2012-05-02 17:06:22 +10:00
break ;
}
}