2019-05-29 07:18:00 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2017-10-25 14:30:32 -07:00
/*
* Copyright ( C ) 2017 SiFive
*/
# include <asm/cacheflush.h>
2019-03-27 00:41:25 +00:00
# ifdef CONFIG_SMP
# include <asm/sbi.h>
2019-10-28 13:10:36 +01:00
static void ipi_remote_fence_i ( void * info )
{
return local_flush_icache_all ( ) ;
}
2019-03-27 00:41:25 +00:00
void flush_icache_all ( void )
{
2019-10-28 13:10:36 +01:00
if ( IS_ENABLED ( CONFIG_RISCV_SBI ) )
sbi_remote_fence_i ( NULL ) ;
else
on_each_cpu ( ipi_remote_fence_i , NULL , 1 ) ;
2019-03-27 00:41:25 +00:00
}
2019-12-16 20:07:04 -08:00
EXPORT_SYMBOL ( flush_icache_all ) ;
2019-03-27 00:41:25 +00:00
/*
* Performs an icache flush for the given MM context . RISC - V has no direct
* mechanism for instruction cache shoot downs , so instead we send an IPI that
* informs the remote harts they need to flush their local instruction caches .
* To avoid pathologically slow behavior in a common case ( a bunch of
* single - hart processes on a many - hart machine , ie ' make - j ' ) we avoid the
* IPIs for harts that are not currently executing a MM context and instead
* schedule a deferred local instruction cache flush to be performed before
* execution resumes on each hart .
*/
void flush_icache_mm ( struct mm_struct * mm , bool local )
{
unsigned int cpu ;
2019-10-28 13:10:36 +01:00
cpumask_t others , * mask ;
2019-03-27 00:41:25 +00:00
preempt_disable ( ) ;
/* Mark every hart's icache as needing a flush for this MM. */
mask = & mm - > context . icache_stale_mask ;
cpumask_setall ( mask ) ;
/* Flush this hart's I$ now, and mark it as flushed. */
cpu = smp_processor_id ( ) ;
cpumask_clear_cpu ( cpu , mask ) ;
local_flush_icache_all ( ) ;
/*
* Flush the I $ of other harts concurrently executing , and mark them as
* flushed .
*/
cpumask_andnot ( & others , mm_cpumask ( mm ) , cpumask_of ( cpu ) ) ;
local | = cpumask_empty ( & others ) ;
2019-10-28 13:10:36 +01:00
if ( mm = = current - > active_mm & & local ) {
2019-03-27 00:41:25 +00:00
/*
* It ' s assumed that at least one strongly ordered operation is
* performed on this hart between setting a hart ' s cpumask bit
* and scheduling this MM context on that hart . Sending an SBI
* remote message will do this , but in the case where no
* messages are sent we still need to order this hart ' s writes
* with flush_icache_deferred ( ) .
*/
smp_mb ( ) ;
2019-10-28 13:10:36 +01:00
} else if ( IS_ENABLED ( CONFIG_RISCV_SBI ) ) {
cpumask_t hartid_mask ;
riscv_cpuid_to_hartid_mask ( & others , & hartid_mask ) ;
sbi_remote_fence_i ( cpumask_bits ( & hartid_mask ) ) ;
} else {
on_each_cpu_mask ( & others , ipi_remote_fence_i , NULL , 1 ) ;
2019-03-27 00:41:25 +00:00
}
preempt_enable ( ) ;
}
# endif /* CONFIG_SMP */
2019-10-28 13:10:41 +01:00
# ifdef CONFIG_MMU
2017-10-25 14:30:32 -07:00
void flush_icache_pte ( pte_t pte )
{
struct page * page = pte_page ( pte ) ;
if ( ! test_and_set_bit ( PG_dcache_clean , & page - > flags ) )
flush_icache_all ( ) ;
}
2019-10-28 13:10:41 +01:00
# endif /* CONFIG_MMU */