2014-11-06 15:19:48 +08:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2009 , Wind River Systems Inc
* Implemented by fredrik . markstrom @ gmail . com and ivarholmqvist @ gmail . com
*/
# include <linux/export.h>
# include <linux/sched.h>
# include <linux/mm.h>
# include <linux/fs.h>
2021-04-29 22:55:35 -07:00
# include <linux/pagemap.h>
2014-11-06 15:19:48 +08:00
# include <asm/cacheflush.h>
# include <asm/cpuinfo.h>
static void __flush_dcache ( unsigned long start , unsigned long end )
{
unsigned long addr ;
start & = ~ ( cpuinfo . dcache_line_size - 1 ) ;
end + = ( cpuinfo . dcache_line_size - 1 ) ;
end & = ~ ( cpuinfo . dcache_line_size - 1 ) ;
if ( end > start + cpuinfo . dcache_size )
end = start + cpuinfo . dcache_size ;
for ( addr = start ; addr < end ; addr + = cpuinfo . dcache_line_size ) {
__asm__ __volatile__ ( " flushd 0(%0) \n "
: /* Outputs */
: /* Inputs */ " r " ( addr )
/* : No clobber */ ) ;
}
}
static void __invalidate_dcache ( unsigned long start , unsigned long end )
{
unsigned long addr ;
start & = ~ ( cpuinfo . dcache_line_size - 1 ) ;
end + = ( cpuinfo . dcache_line_size - 1 ) ;
end & = ~ ( cpuinfo . dcache_line_size - 1 ) ;
for ( addr = start ; addr < end ; addr + = cpuinfo . dcache_line_size ) {
__asm__ __volatile__ ( " initda 0(%0) \n "
: /* Outputs */
: /* Inputs */ " r " ( addr )
/* : No clobber */ ) ;
}
}
static void __flush_icache ( unsigned long start , unsigned long end )
{
unsigned long addr ;
start & = ~ ( cpuinfo . icache_line_size - 1 ) ;
end + = ( cpuinfo . icache_line_size - 1 ) ;
end & = ~ ( cpuinfo . icache_line_size - 1 ) ;
if ( end > start + cpuinfo . icache_size )
end = start + cpuinfo . icache_size ;
for ( addr = start ; addr < end ; addr + = cpuinfo . icache_line_size ) {
__asm__ __volatile__ ( " flushi %0 \n "
: /* Outputs */
: /* Inputs */ " r " ( addr )
/* : No clobber */ ) ;
}
__asm__ __volatile ( " flushp \n " ) ;
}
2023-08-02 16:13:46 +01:00
static void flush_aliases ( struct address_space * mapping , struct folio * folio )
2014-11-06 15:19:48 +08:00
{
struct mm_struct * mm = current - > active_mm ;
2023-08-02 16:13:46 +01:00
struct vm_area_struct * vma ;
2023-08-22 16:27:49 +02:00
unsigned long flags ;
2014-11-06 15:19:48 +08:00
pgoff_t pgoff ;
2023-08-02 16:13:46 +01:00
unsigned long nr = folio_nr_pages ( folio ) ;
2014-11-06 15:19:48 +08:00
2023-08-02 16:13:46 +01:00
pgoff = folio - > index ;
2014-11-06 15:19:48 +08:00
2023-08-22 16:27:49 +02:00
flush_dcache_mmap_lock_irqsave ( mapping , flags ) ;
2023-08-02 16:13:46 +01:00
vma_interval_tree_foreach ( vma , & mapping - > i_mmap , pgoff , pgoff + nr - 1 ) {
unsigned long start ;
2014-11-06 15:19:48 +08:00
2023-08-02 16:13:46 +01:00
if ( vma - > vm_mm ! = mm )
2014-11-06 15:19:48 +08:00
continue ;
2023-08-02 16:13:46 +01:00
if ( ! ( vma - > vm_flags & VM_MAYSHARE ) )
2014-11-06 15:19:48 +08:00
continue ;
2023-08-02 16:13:46 +01:00
start = vma - > vm_start + ( ( pgoff - vma - > vm_pgoff ) < < PAGE_SHIFT ) ;
flush_cache_range ( vma , start , start + nr * PAGE_SIZE ) ;
2014-11-06 15:19:48 +08:00
}
2023-08-22 16:27:49 +02:00
flush_dcache_mmap_unlock_irqrestore ( mapping , flags ) ;
2014-11-06 15:19:48 +08:00
}
void flush_cache_all ( void )
{
2015-11-26 22:25:58 +08:00
__flush_dcache ( 0 , cpuinfo . dcache_size ) ;
2014-11-06 15:19:48 +08:00
__flush_icache ( 0 , cpuinfo . icache_size ) ;
}
void flush_cache_mm ( struct mm_struct * mm )
{
flush_cache_all ( ) ;
}
void flush_cache_dup_mm ( struct mm_struct * mm )
{
flush_cache_all ( ) ;
}
void flush_icache_range ( unsigned long start , unsigned long end )
{
2015-04-24 14:18:55 +08:00
__flush_dcache ( start , end ) ;
2014-11-06 15:19:48 +08:00
__flush_icache ( start , end ) ;
}
void flush_dcache_range ( unsigned long start , unsigned long end )
{
__flush_dcache ( start , end ) ;
2015-04-24 14:18:55 +08:00
__flush_icache ( start , end ) ;
2014-11-06 15:19:48 +08:00
}
EXPORT_SYMBOL ( flush_dcache_range ) ;
void invalidate_dcache_range ( unsigned long start , unsigned long end )
{
__invalidate_dcache ( start , end ) ;
}
EXPORT_SYMBOL ( invalidate_dcache_range ) ;
void flush_cache_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end )
{
__flush_dcache ( start , end ) ;
if ( vma = = NULL | | ( vma - > vm_flags & VM_EXEC ) )
__flush_icache ( start , end ) ;
}
2023-08-02 16:13:46 +01:00
void flush_icache_pages ( struct vm_area_struct * vma , struct page * page ,
unsigned int nr )
2014-11-06 15:19:48 +08:00
{
unsigned long start = ( unsigned long ) page_address ( page ) ;
2023-08-02 16:13:46 +01:00
unsigned long end = start + nr * PAGE_SIZE ;
2014-11-06 15:19:48 +08:00
2015-04-24 14:18:55 +08:00
__flush_dcache ( start , end ) ;
2014-11-06 15:19:48 +08:00
__flush_icache ( start , end ) ;
}
void flush_cache_page ( struct vm_area_struct * vma , unsigned long vmaddr ,
unsigned long pfn )
{
unsigned long start = vmaddr ;
unsigned long end = start + PAGE_SIZE ;
__flush_dcache ( start , end ) ;
if ( vma - > vm_flags & VM_EXEC )
__flush_icache ( start , end ) ;
}
2023-08-02 16:13:46 +01:00
static void __flush_dcache_folio ( struct folio * folio )
2015-04-24 14:18:55 +08:00
{
/*
* Writeback any data associated with the kernel mapping of this
* page . This ensures that data in the physical page is mutually
* coherent with the kernels mapping .
*/
2023-08-02 16:13:46 +01:00
unsigned long start = ( unsigned long ) folio_address ( folio ) ;
2015-04-24 14:18:55 +08:00
2023-08-02 16:13:46 +01:00
__flush_dcache ( start , start + folio_size ( folio ) ) ;
2015-04-24 14:18:55 +08:00
}
2023-08-02 16:13:46 +01:00
void flush_dcache_folio ( struct folio * folio )
2014-11-06 15:19:48 +08:00
{
struct address_space * mapping ;
/*
* The zero page is never written to , so never has any dirty
* cache lines , and therefore never needs to be flushed .
*/
2023-08-02 16:13:46 +01:00
if ( is_zero_pfn ( folio_pfn ( folio ) ) )
2014-11-06 15:19:48 +08:00
return ;
2023-08-02 16:13:46 +01:00
mapping = folio_flush_mapping ( folio ) ;
2014-11-06 15:19:48 +08:00
/* Flush this page if there are aliases. */
if ( mapping & & ! mapping_mapped ( mapping ) ) {
2023-08-02 16:13:46 +01:00
clear_bit ( PG_dcache_clean , & folio - > flags ) ;
2014-11-06 15:19:48 +08:00
} else {
2023-08-02 16:13:46 +01:00
__flush_dcache_folio ( folio ) ;
2015-04-24 14:18:55 +08:00
if ( mapping ) {
2023-08-02 16:13:46 +01:00
unsigned long start = ( unsigned long ) folio_address ( folio ) ;
flush_aliases ( mapping , folio ) ;
flush_icache_range ( start , start + folio_size ( folio ) ) ;
2015-04-24 14:18:55 +08:00
}
2023-08-02 16:13:46 +01:00
set_bit ( PG_dcache_clean , & folio - > flags ) ;
2014-11-06 15:19:48 +08:00
}
}
2023-08-02 16:13:46 +01:00
EXPORT_SYMBOL ( flush_dcache_folio ) ;
void flush_dcache_page ( struct page * page )
{
flush_dcache_folio ( page_folio ( page ) ) ;
}
2014-11-06 15:19:48 +08:00
EXPORT_SYMBOL ( flush_dcache_page ) ;
2023-08-02 16:13:46 +01:00
void update_mmu_cache_range ( struct vm_fault * vmf , struct vm_area_struct * vma ,
unsigned long address , pte_t * ptep , unsigned int nr )
2014-11-06 15:19:48 +08:00
{
2018-11-07 10:35:34 +08:00
pte_t pte = * ptep ;
unsigned long pfn = pte_pfn ( pte ) ;
2023-08-02 16:13:46 +01:00
struct folio * folio ;
2015-04-24 14:18:55 +08:00
struct address_space * mapping ;
2014-11-06 15:19:48 +08:00
2018-11-07 10:35:34 +08:00
reload_tlb_page ( vma , address , pte ) ;
2018-11-01 17:42:16 +08:00
2014-11-06 15:19:48 +08:00
if ( ! pfn_valid ( pfn ) )
return ;
/*
* The zero page is never written to , so never has any dirty
* cache lines , and therefore never needs to be flushed .
*/
2023-08-02 16:13:46 +01:00
if ( is_zero_pfn ( pfn ) )
2014-11-06 15:19:48 +08:00
return ;
2023-08-02 16:13:46 +01:00
folio = page_folio ( pfn_to_page ( pfn ) ) ;
if ( ! test_and_set_bit ( PG_dcache_clean , & folio - > flags ) )
__flush_dcache_folio ( folio ) ;
2015-04-24 14:18:55 +08:00
2023-08-02 16:13:46 +01:00
mapping = folio_flush_mapping ( folio ) ;
if ( mapping ) {
flush_aliases ( mapping , folio ) ;
2015-04-24 14:18:55 +08:00
if ( vma - > vm_flags & VM_EXEC )
2023-08-02 16:13:46 +01:00
flush_icache_pages ( vma , & folio - > page ,
folio_nr_pages ( folio ) ) ;
2014-11-06 15:19:48 +08:00
}
}
void copy_user_page ( void * vto , void * vfrom , unsigned long vaddr ,
struct page * to )
{
__flush_dcache ( vaddr , vaddr + PAGE_SIZE ) ;
2015-04-24 14:18:55 +08:00
__flush_icache ( vaddr , vaddr + PAGE_SIZE ) ;
2014-11-06 15:19:48 +08:00
copy_page ( vto , vfrom ) ;
__flush_dcache ( ( unsigned long ) vto , ( unsigned long ) vto + PAGE_SIZE ) ;
2015-04-24 14:18:55 +08:00
__flush_icache ( ( unsigned long ) vto , ( unsigned long ) vto + PAGE_SIZE ) ;
2014-11-06 15:19:48 +08:00
}
void clear_user_page ( void * addr , unsigned long vaddr , struct page * page )
{
__flush_dcache ( vaddr , vaddr + PAGE_SIZE ) ;
2015-04-24 14:18:55 +08:00
__flush_icache ( vaddr , vaddr + PAGE_SIZE ) ;
2014-11-06 15:19:48 +08:00
clear_page ( addr ) ;
__flush_dcache ( ( unsigned long ) addr , ( unsigned long ) addr + PAGE_SIZE ) ;
2015-04-24 14:18:55 +08:00
__flush_icache ( ( unsigned long ) addr , ( unsigned long ) addr + PAGE_SIZE ) ;
2014-11-06 15:19:48 +08:00
}
void copy_from_user_page ( struct vm_area_struct * vma , struct page * page ,
unsigned long user_vaddr ,
void * dst , void * src , int len )
{
flush_cache_page ( vma , user_vaddr , page_to_pfn ( page ) ) ;
memcpy ( dst , src , len ) ;
2015-11-26 22:25:58 +08:00
__flush_dcache ( ( unsigned long ) src , ( unsigned long ) src + len ) ;
2014-11-06 15:19:48 +08:00
if ( vma - > vm_flags & VM_EXEC )
__flush_icache ( ( unsigned long ) src , ( unsigned long ) src + len ) ;
}
void copy_to_user_page ( struct vm_area_struct * vma , struct page * page ,
unsigned long user_vaddr ,
void * dst , void * src , int len )
{
flush_cache_page ( vma , user_vaddr , page_to_pfn ( page ) ) ;
memcpy ( dst , src , len ) ;
2015-11-26 22:25:58 +08:00
__flush_dcache ( ( unsigned long ) dst , ( unsigned long ) dst + len ) ;
2014-11-06 15:19:48 +08:00
if ( vma - > vm_flags & VM_EXEC )
__flush_icache ( ( unsigned long ) dst , ( unsigned long ) dst + len ) ;
}