2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2011-06-04 22:56:48 +04:00
/*
* OpenRISC Linux
*
* Linux architectural port borrowing liberally from similar works of
* others . All original copyrights apply as per the original source
* declaration .
*
* Modifications for the OpenRISC architecture :
* Copyright ( C ) 2003 Matjaz Breskvar < phoenix @ bsemi . com >
* Copyright ( C ) 2010 - 2011 Jonas Bonn < jonas @ southpole . se >
*
* DMA mapping callbacks . . .
*/
2018-07-19 16:02:32 +03:00
# include <linux/dma-noncoherent.h>
2019-08-28 17:19:53 +03:00
# include <linux/pagewalk.h>
2011-06-04 22:56:48 +04:00
# include <asm/cpuinfo.h>
# include <asm/spr_defs.h>
# include <asm/tlbflush.h>
2012-04-15 23:09:25 +04:00
static int
page_set_nocache ( pte_t * pte , unsigned long addr ,
unsigned long next , struct mm_walk * walk )
2011-06-04 22:56:48 +04:00
{
unsigned long cl ;
2014-05-11 22:49:34 +04:00
struct cpuinfo_or1k * cpuinfo = & cpuinfo_or1k [ smp_processor_id ( ) ] ;
2011-06-04 22:56:48 +04:00
pte_val ( * pte ) | = _PAGE_CI ;
/*
* Flush the page out of the TLB so that the new page flags get
* picked up next time there ' s an access
*/
flush_tlb_page ( NULL , addr ) ;
/* Flush page out of dcache */
2014-05-11 22:49:34 +04:00
for ( cl = __pa ( addr ) ; cl < __pa ( next ) ; cl + = cpuinfo - > dcache_block_size )
2011-06-04 22:56:48 +04:00
mtspr ( SPR_DCBFR , cl ) ;
return 0 ;
}
2019-08-28 17:19:54 +03:00
static const struct mm_walk_ops set_nocache_walk_ops = {
. pte_entry = page_set_nocache ,
} ;
2012-04-15 23:09:25 +04:00
static int
page_clear_nocache ( pte_t * pte , unsigned long addr ,
unsigned long next , struct mm_walk * walk )
2011-06-04 22:56:48 +04:00
{
pte_val ( * pte ) & = ~ _PAGE_CI ;
/*
* Flush the page out of the TLB so that the new page flags get
* picked up next time there ' s an access
*/
flush_tlb_page ( NULL , addr ) ;
return 0 ;
}
2019-08-28 17:19:54 +03:00
static const struct mm_walk_ops clear_nocache_walk_ops = {
. pte_entry = page_clear_nocache ,
} ;
2019-11-07 20:08:39 +03:00
void * arch_dma_set_uncached ( void * cpu_addr , size_t size )
2011-06-04 22:56:48 +04:00
{
2019-11-07 20:08:39 +03:00
unsigned long va = ( unsigned long ) cpu_addr ;
int error ;
2011-06-04 22:56:48 +04:00
2019-06-03 13:54:13 +03:00
/*
* We need to iterate through the pages , clearing the dcache for
* them and setting the cache - inhibit bit .
*/
2020-06-26 06:29:17 +03:00
mmap_read_lock ( & init_mm ) ;
2019-11-07 20:08:39 +03:00
error = walk_page_range ( & init_mm , va , va + size , & set_nocache_walk_ops ,
NULL ) ;
2020-06-26 06:29:17 +03:00
mmap_read_unlock ( & init_mm ) ;
2019-11-07 20:08:39 +03:00
if ( error )
return ERR_PTR ( error ) ;
return cpu_addr ;
2011-06-04 22:56:48 +04:00
}
2019-11-07 20:08:39 +03:00
void arch_dma_clear_uncached ( void * cpu_addr , size_t size )
2011-06-04 22:56:48 +04:00
{
2019-11-07 20:08:39 +03:00
unsigned long va = ( unsigned long ) cpu_addr ;
2011-06-04 22:56:48 +04:00
2020-06-26 06:29:17 +03:00
mmap_read_lock ( & init_mm ) ;
2019-06-03 13:54:13 +03:00
/* walk_page_range shouldn't be able to fail here */
2019-08-28 17:19:54 +03:00
WARN_ON ( walk_page_range ( & init_mm , va , va + size ,
& clear_nocache_walk_ops , NULL ) ) ;
2020-06-26 06:29:17 +03:00
mmap_read_unlock ( & init_mm ) ;
2011-06-04 22:56:48 +04:00
}
2019-11-07 20:03:11 +03:00
void arch_sync_dma_for_device ( phys_addr_t addr , size_t size ,
2018-07-19 16:02:32 +03:00
enum dma_data_direction dir )
2011-06-04 22:56:48 +04:00
{
unsigned long cl ;
2014-05-11 22:49:34 +04:00
struct cpuinfo_or1k * cpuinfo = & cpuinfo_or1k [ smp_processor_id ( ) ] ;
2011-06-04 22:56:48 +04:00
switch ( dir ) {
case DMA_TO_DEVICE :
/* Flush the dcache for the requested range */
for ( cl = addr ; cl < addr + size ;
2014-05-11 22:49:34 +04:00
cl + = cpuinfo - > dcache_block_size )
2011-06-04 22:56:48 +04:00
mtspr ( SPR_DCBFR , cl ) ;
break ;
case DMA_FROM_DEVICE :
/* Invalidate the dcache for the requested range */
for ( cl = addr ; cl < addr + size ;
2014-05-11 22:49:34 +04:00
cl + = cpuinfo - > dcache_block_size )
2011-06-04 22:56:48 +04:00
mtspr ( SPR_DCBIR , cl ) ;
break ;
default :
/*
* NOTE : If dir = = DMA_BIDIRECTIONAL then there ' s no need to
* flush nor invalidate the cache here as the area will need
* to be manually synced anyway .
*/
break ;
}
}