2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2005-04-17 02:20:36 +04:00
/*
* PowerPC version derived from arch / arm / mm / consistent . c
* Copyright ( C ) 2001 Dan Malek ( dmalek @ jlc . net )
*
* Copyright ( C ) 2000 Russell King
*/
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/types.h>
# include <linux/highmem.h>
2018-12-16 20:19:50 +03:00
# include <linux/dma-direct.h>
2020-09-22 16:36:11 +03:00
# include <linux/dma-map-ops.h>
2005-04-17 02:20:36 +04:00
# include <asm/tlbflush.h>
2014-08-09 01:23:25 +04:00
# include <asm/dma.h>
2005-04-17 02:20:36 +04:00
/*
* make an area consistent .
*/
2019-02-13 10:01:29 +03:00
static void __dma_sync ( void * vaddr , size_t size , int direction )
2005-04-17 02:20:36 +04:00
{
unsigned long start = ( unsigned long ) vaddr ;
unsigned long end = start + size ;
switch ( direction ) {
case DMA_NONE :
BUG ( ) ;
2008-06-26 13:29:05 +04:00
case DMA_FROM_DEVICE :
/*
* invalidate only when cache - line aligned otherwise there is
* the potential for discarding uncommitted data from the cache
*/
2016-02-09 19:08:29 +03:00
if ( ( start | end ) & ( L1_CACHE_BYTES - 1 ) )
2008-06-26 13:29:05 +04:00
flush_dcache_range ( start , end ) ;
else
invalidate_dcache_range ( start , end ) ;
2005-04-17 02:20:36 +04:00
break ;
case DMA_TO_DEVICE : /* writeback only */
clean_dcache_range ( start , end ) ;
break ;
case DMA_BIDIRECTIONAL : /* writeback and invalidate */
flush_dcache_range ( start , end ) ;
break ;
}
}
# ifdef CONFIG_HIGHMEM
/*
* __dma_sync_page ( ) implementation for systems using highmem .
* In this case , each page of a buffer must be kmapped / kunmapped
* in order to have a virtual address for __dma_sync ( ) . This must
2005-09-10 11:26:54 +04:00
* not sleep so kmap_atomic ( ) / kunmap_atomic ( ) are used .
2005-04-17 02:20:36 +04:00
*
* Note : yes , it is possible and correct to have a buffer extend
* beyond the first page .
*/
static inline void __dma_sync_page_highmem ( struct page * page ,
unsigned long offset , size_t size , int direction )
{
2005-10-11 19:29:07 +04:00
size_t seg_size = min ( ( size_t ) ( PAGE_SIZE - offset ) , size ) ;
2005-04-17 02:20:36 +04:00
size_t cur_size = seg_size ;
unsigned long flags , start , seg_offset = offset ;
2005-10-11 19:29:07 +04:00
int nr_segs = 1 + ( ( size - seg_size ) + PAGE_SIZE - 1 ) / PAGE_SIZE ;
2005-04-17 02:20:36 +04:00
int seg_nr = 0 ;
local_irq_save ( flags ) ;
do {
2011-11-25 19:14:16 +04:00
start = ( unsigned long ) kmap_atomic ( page + seg_nr ) + seg_offset ;
2005-04-17 02:20:36 +04:00
/* Sync this buffer segment */
__dma_sync ( ( void * ) start , seg_size , direction ) ;
2011-11-25 19:14:16 +04:00
kunmap_atomic ( ( void * ) start ) ;
2005-04-17 02:20:36 +04:00
seg_nr + + ;
/* Calculate next buffer segment size */
seg_size = min ( ( size_t ) PAGE_SIZE , size - cur_size ) ;
/* Add the segment size to our running total */
cur_size + = seg_size ;
seg_offset = 0 ;
} while ( seg_nr < nr_segs ) ;
local_irq_restore ( flags ) ;
}
# endif /* CONFIG_HIGHMEM */
/*
* __dma_sync_page makes memory consistent . identical to __dma_sync , but
* takes a struct page instead of a virtual address
*/
2019-02-13 10:01:29 +03:00
static void __dma_sync_page ( phys_addr_t paddr , size_t size , int dir )
2005-04-17 02:20:36 +04:00
{
2019-02-13 10:01:29 +03:00
struct page * page = pfn_to_page ( paddr > > PAGE_SHIFT ) ;
unsigned offset = paddr & ~ PAGE_MASK ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_HIGHMEM
2019-02-13 10:01:29 +03:00
__dma_sync_page_highmem ( page , offset , size , dir ) ;
2005-04-17 02:20:36 +04:00
# else
unsigned long start = ( unsigned long ) page_address ( page ) + offset ;
2019-02-13 10:01:29 +03:00
__dma_sync ( ( void * ) start , size , dir ) ;
2005-04-17 02:20:36 +04:00
# endif
}
2019-02-13 10:01:29 +03:00
2019-11-07 20:03:11 +03:00
void arch_sync_dma_for_device ( phys_addr_t paddr , size_t size ,
enum dma_data_direction dir )
2019-02-13 10:01:29 +03:00
{
__dma_sync_page ( paddr , size , dir ) ;
}
2019-11-07 20:03:11 +03:00
void arch_sync_dma_for_cpu ( phys_addr_t paddr , size_t size ,
enum dma_data_direction dir )
2019-02-13 10:01:29 +03:00
{
__dma_sync_page ( paddr , size , dir ) ;
}
2011-03-24 23:50:06 +03:00
2019-08-14 16:22:30 +03:00
void arch_dma_prep_coherent ( struct page * page , size_t size )
2011-03-24 23:50:06 +03:00
{
2019-08-14 16:22:30 +03:00
unsigned long kaddr = ( unsigned long ) page_address ( page ) ;
2011-03-24 23:50:06 +03:00
2019-08-14 16:22:30 +03:00
flush_dcache_range ( kaddr , kaddr + size ) ;
2011-03-24 23:50:06 +03:00
}