2018-09-05 09:25:10 +03:00
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
# include <linux/cache.h>
# include <linux/highmem.h>
# include <linux/mm.h>
# include <asm/cache.h>
void update_mmu_cache ( struct vm_area_struct * vma , unsigned long address ,
pte_t * pte )
{
2020-01-26 20:20:36 +03:00
unsigned long addr ;
2018-09-05 09:25:10 +03:00
struct page * page ;
2020-01-26 20:20:36 +03:00
page = pfn_to_page ( pte_pfn ( * pte ) ) ;
if ( page = = ZERO_PAGE ( 0 ) )
2018-09-05 09:25:10 +03:00
return ;
2020-01-26 20:20:36 +03:00
if ( test_and_set_bit ( PG_dcache_clean , & page - > flags ) )
2018-09-05 09:25:10 +03:00
return ;
2019-04-10 05:55:07 +03:00
addr = ( unsigned long ) kmap_atomic ( page ) ;
2018-09-05 09:25:10 +03:00
2020-01-26 20:20:36 +03:00
dcache_wb_range ( addr , addr + PAGE_SIZE ) ;
if ( vma - > vm_flags & VM_EXEC )
icache_inv_range ( addr , addr + PAGE_SIZE ) ;
2018-09-05 09:25:10 +03:00
2019-04-10 05:55:07 +03:00
kunmap_atomic ( ( void * ) addr ) ;
2018-09-05 09:25:10 +03:00
}
2020-01-31 15:33:10 +03:00
void flush_icache_deferred ( struct mm_struct * mm )
{
unsigned int cpu = smp_processor_id ( ) ;
cpumask_t * mask = & mm - > context . icache_stale_mask ;
if ( cpumask_test_cpu ( cpu , mask ) ) {
cpumask_clear_cpu ( cpu , mask ) ;
/*
* Ensure the remote hart ' s writes are visible to this hart .
* This pairs with a barrier in flush_icache_mm .
*/
smp_mb ( ) ;
local_icache_inv_all ( NULL ) ;
}
}
void flush_icache_mm_range ( struct mm_struct * mm ,
unsigned long start , unsigned long end )
{
unsigned int cpu ;
cpumask_t others , * mask ;
preempt_disable ( ) ;
# ifdef CONFIG_CPU_HAS_ICACHE_INS
if ( mm = = current - > mm ) {
icache_inv_range ( start , end ) ;
preempt_enable ( ) ;
return ;
}
# endif
/* Mark every hart's icache as needing a flush for this MM. */
mask = & mm - > context . icache_stale_mask ;
cpumask_setall ( mask ) ;
/* Flush this hart's I$ now, and mark it as flushed. */
cpu = smp_processor_id ( ) ;
cpumask_clear_cpu ( cpu , mask ) ;
local_icache_inv_all ( NULL ) ;
/*
* Flush the I $ of other harts concurrently executing , and mark them as
* flushed .
*/
cpumask_andnot ( & others , mm_cpumask ( mm ) , cpumask_of ( cpu ) ) ;
if ( mm ! = current - > active_mm | | ! cpumask_empty ( & others ) ) {
on_each_cpu_mask ( & others , local_icache_inv_all , NULL , 1 ) ;
cpumask_clear ( mask ) ;
}
preempt_enable ( ) ;
}