2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2012-03-05 11:49:28 +00:00
/*
* Based on arch / arm / mm / flush . c
*
* Copyright ( C ) 1995 - 2002 Russell King
* Copyright ( C ) 2012 ARM Ltd .
*/
# include <linux/export.h>
# include <linux/mm.h>
# include <linux/pagemap.h>
# include <asm/cacheflush.h>
2017-03-10 20:32:23 +00:00
# include <asm/cache.h>
2012-03-05 11:49:28 +00:00
# include <asm/tlbflush.h>
2016-11-02 14:40:46 +05:30
void sync_icache_aliases ( void * kaddr , unsigned long len )
2015-12-17 01:38:32 -08:00
{
unsigned long addr = ( unsigned long ) kaddr ;
if ( icache_is_aliasing ( ) ) {
__clean_dcache_area_pou ( kaddr , len ) ;
__flush_icache_all ( ) ;
} else {
2019-01-24 17:28:37 +00:00
/*
* Don ' t issue kick_all_cpus_sync ( ) after I - cache invalidation
* for user mappings .
*/
__flush_icache_range ( addr , addr + len ) ;
2015-12-17 01:38:32 -08:00
}
}
2012-03-05 11:49:28 +00:00
static void flush_ptrace_access ( struct vm_area_struct * vma , struct page * page ,
unsigned long uaddr , void * kaddr ,
unsigned long len )
{
2015-12-17 01:38:32 -08:00
if ( vma - > vm_flags & VM_EXEC )
sync_icache_aliases ( kaddr , len ) ;
2012-03-05 11:49:28 +00:00
}
/*
* Copy user data from / to a page which is mapped into a different processes
* address space . Really , we want to allow our " user space " model to handle
* this .
*/
void copy_to_user_page ( struct vm_area_struct * vma , struct page * page ,
unsigned long uaddr , void * dst , const void * src ,
unsigned long len )
{
memcpy ( dst , src , len ) ;
flush_ptrace_access ( vma , page , uaddr , dst , len ) ;
}
2018-04-17 20:03:09 +08:00
void __sync_icache_dcache ( pte_t pte )
2012-03-05 11:49:28 +00:00
{
2013-05-01 16:34:22 +01:00
struct page * page = pte_page ( pte ) ;
2012-03-05 11:49:28 +00:00
2015-12-17 01:38:32 -08:00
if ( ! test_and_set_bit ( PG_dcache_clean , & page - > flags ) )
2019-09-23 15:34:25 -07:00
sync_icache_aliases ( page_address ( page ) , page_size ( page ) ) ;
2012-03-05 11:49:28 +00:00
}
2018-07-12 00:18:22 +01:00
EXPORT_SYMBOL_GPL ( __sync_icache_dcache ) ;
2012-03-05 11:49:28 +00:00
/*
2013-05-01 12:23:05 +01:00
* This function is called when a page has been modified by the kernel . Mark
* it as dirty for later flushing when mapped in user space ( if executable ,
* see __sync_icache_dcache ) .
2012-03-05 11:49:28 +00:00
*/
void flush_dcache_page ( struct page * page )
{
2013-05-01 12:23:05 +01:00
if ( test_bit ( PG_dcache_clean , & page - > flags ) )
2012-03-05 11:49:28 +00:00
clear_bit ( PG_dcache_clean , & page - > flags ) ;
}
EXPORT_SYMBOL ( flush_dcache_page ) ;
/*
* Additional functions defined in assembly .
*/
2018-07-06 16:21:17 +01:00
EXPORT_SYMBOL ( __flush_icache_range ) ;
2017-07-25 11:55:42 +01:00
# ifdef CONFIG_ARCH_HAS_PMEM_API
2017-08-10 16:52:31 +02:00
void arch_wb_cache_pmem ( void * addr , size_t size )
2017-07-25 11:55:42 +01:00
{
/* Ensure order against any prior non-cacheable writes */
dmb ( osh ) ;
__clean_dcache_area_pop ( addr , size ) ;
}
EXPORT_SYMBOL_GPL ( arch_wb_cache_pmem ) ;
2017-08-10 16:52:31 +02:00
void arch_invalidate_pmem ( void * addr , size_t size )
2017-07-25 11:55:42 +01:00
{
__inval_dcache_area ( addr , size ) ;
}
EXPORT_SYMBOL_GPL ( arch_invalidate_pmem ) ;
# endif