2017-10-24 16:22:35 +08:00
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
# include <linux/types.h>
# include <linux/mm.h>
2018-05-28 09:55:35 +02:00
# include <linux/dma-noncoherent.h>
2017-10-24 16:22:35 +08:00
# include <linux/cache.h>
# include <linux/highmem.h>
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
# include <asm/proc-fns.h>
2018-05-19 09:17:01 +02:00
static inline void cache_op ( phys_addr_t paddr , size_t size ,
void ( * fn ) ( unsigned long start , unsigned long end ) )
2017-10-24 16:22:35 +08:00
{
2018-05-19 09:17:01 +02:00
struct page * page = pfn_to_page ( paddr > > PAGE_SHIFT ) ;
unsigned offset = paddr & ~ PAGE_MASK ;
size_t left = size ;
unsigned long start ;
2017-10-24 16:22:35 +08:00
2018-05-19 09:17:01 +02:00
do {
size_t len = left ;
2017-10-24 16:22:35 +08:00
if ( PageHighMem ( page ) ) {
2018-05-19 09:17:01 +02:00
void * addr ;
if ( offset + len > PAGE_SIZE ) {
if ( offset > = PAGE_SIZE ) {
page + = offset > > PAGE_SHIFT ;
offset & = ~ PAGE_MASK ;
}
len = PAGE_SIZE - offset ;
}
addr = kmap_atomic ( page ) ;
start = ( unsigned long ) ( addr + offset ) ;
fn ( start , start + len ) ;
kunmap_atomic ( addr ) ;
2017-10-24 16:22:35 +08:00
} else {
2018-05-19 09:17:01 +02:00
start = ( unsigned long ) phys_to_virt ( paddr ) ;
fn ( start , start + size ) ;
2017-10-24 16:22:35 +08:00
}
2018-05-19 09:17:01 +02:00
offset = 0 ;
page + + ;
left - = len ;
} while ( left ) ;
2017-10-24 16:22:35 +08:00
}
2019-11-07 18:03:11 +01:00
void arch_sync_dma_for_device ( phys_addr_t paddr , size_t size ,
enum dma_data_direction dir )
2017-10-24 16:22:35 +08:00
{
2018-05-19 09:17:01 +02:00
switch ( dir ) {
case DMA_FROM_DEVICE :
break ;
case DMA_TO_DEVICE :
case DMA_BIDIRECTIONAL :
2018-05-28 09:55:35 +02:00
cache_op ( paddr , size , cpu_dma_wb_range ) ;
2018-05-19 09:17:01 +02:00
break ;
default :
BUG ( ) ;
}
2017-10-24 16:22:35 +08:00
}
2019-11-07 18:03:11 +01:00
void arch_sync_dma_for_cpu ( phys_addr_t paddr , size_t size ,
enum dma_data_direction dir )
2017-10-24 16:22:35 +08:00
{
2018-05-19 09:17:01 +02:00
switch ( dir ) {
case DMA_TO_DEVICE :
break ;
case DMA_FROM_DEVICE :
case DMA_BIDIRECTIONAL :
2018-05-28 09:55:35 +02:00
cache_op ( paddr , size , cpu_dma_inval_range ) ;
2018-05-19 09:17:01 +02:00
break ;
default :
BUG ( ) ;
}
}
2019-04-28 14:28:38 -05:00
void arch_dma_prep_coherent ( struct page * page , size_t size )
{
cache_op ( page_to_phys ( page ) , size , cpu_dma_wbinval_range ) ;
}