2019-05-29 07:18:00 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2017-10-25 14:30:32 -07:00
/*
* Copyright ( C ) 2017 SiFive
*/
2023-10-18 18:10:07 +05:30
# include <linux/acpi.h>
2022-10-21 11:52:39 +05:30
# include <linux/of.h>
2023-10-18 18:10:07 +05:30
# include <asm/acpi.h>
2017-10-25 14:30:32 -07:00
# include <asm/cacheflush.h>
2019-03-27 00:41:25 +00:00
# ifdef CONFIG_SMP
# include <asm/sbi.h>
2019-10-28 13:10:36 +01:00
static void ipi_remote_fence_i ( void * info )
{
return local_flush_icache_all ( ) ;
}
2019-03-27 00:41:25 +00:00
void flush_icache_all ( void )
{
2021-09-18 18:02:21 +02:00
local_flush_icache_all ( ) ;
2023-03-28 09:22:22 +05:30
if ( IS_ENABLED ( CONFIG_RISCV_SBI ) & & ! riscv_use_ipi_for_rfence ( ) )
2019-10-28 13:10:36 +01:00
sbi_remote_fence_i ( NULL ) ;
else
on_each_cpu ( ipi_remote_fence_i , NULL , 1 ) ;
2019-03-27 00:41:25 +00:00
}
2019-12-16 20:07:04 -08:00
EXPORT_SYMBOL ( flush_icache_all ) ;
2019-03-27 00:41:25 +00:00
/*
* Performs an icache flush for the given MM context . RISC - V has no direct
* mechanism for instruction cache shoot downs , so instead we send an IPI that
* informs the remote harts they need to flush their local instruction caches .
* To avoid pathologically slow behavior in a common case ( a bunch of
* single - hart processes on a many - hart machine , ie ' make - j ' ) we avoid the
* IPIs for harts that are not currently executing a MM context and instead
* schedule a deferred local instruction cache flush to be performed before
* execution resumes on each hart .
*/
void flush_icache_mm ( struct mm_struct * mm , bool local )
{
unsigned int cpu ;
2019-10-28 13:10:36 +01:00
cpumask_t others , * mask ;
2019-03-27 00:41:25 +00:00
preempt_disable ( ) ;
/* Mark every hart's icache as needing a flush for this MM. */
mask = & mm - > context . icache_stale_mask ;
cpumask_setall ( mask ) ;
/* Flush this hart's I$ now, and mark it as flushed. */
cpu = smp_processor_id ( ) ;
cpumask_clear_cpu ( cpu , mask ) ;
local_flush_icache_all ( ) ;
/*
* Flush the I $ of other harts concurrently executing , and mark them as
* flushed .
*/
cpumask_andnot ( & others , mm_cpumask ( mm ) , cpumask_of ( cpu ) ) ;
local | = cpumask_empty ( & others ) ;
2019-10-28 13:10:36 +01:00
if ( mm = = current - > active_mm & & local ) {
2019-03-27 00:41:25 +00:00
/*
* It ' s assumed that at least one strongly ordered operation is
* performed on this hart between setting a hart ' s cpumask bit
* and scheduling this MM context on that hart . Sending an SBI
* remote message will do this , but in the case where no
* messages are sent we still need to order this hart ' s writes
* with flush_icache_deferred ( ) .
*/
smp_mb ( ) ;
2023-03-28 09:22:22 +05:30
} else if ( IS_ENABLED ( CONFIG_RISCV_SBI ) & &
! riscv_use_ipi_for_rfence ( ) ) {
2022-01-20 01:09:18 -08:00
sbi_remote_fence_i ( & others ) ;
2019-10-28 13:10:36 +01:00
} else {
on_each_cpu_mask ( & others , ipi_remote_fence_i , NULL , 1 ) ;
2019-03-27 00:41:25 +00:00
}
preempt_enable ( ) ;
}
# endif /* CONFIG_SMP */
2019-10-28 13:10:41 +01:00
# ifdef CONFIG_MMU
2017-10-25 14:30:32 -07:00
void flush_icache_pte ( pte_t pte )
{
2023-08-02 16:13:50 +01:00
struct folio * folio = page_folio ( pte_page ( pte ) ) ;
2017-10-25 14:30:32 -07:00
2023-08-02 16:13:50 +01:00
if ( ! test_bit ( PG_dcache_clean , & folio - > flags ) ) {
2017-10-25 14:30:32 -07:00
flush_icache_all ( ) ;
2023-08-02 16:13:50 +01:00
set_bit ( PG_dcache_clean , & folio - > flags ) ;
2023-01-26 22:53:06 -05:00
}
2017-10-25 14:30:32 -07:00
}
2019-10-28 13:10:41 +01:00
# endif /* CONFIG_MMU */
2022-10-21 11:52:39 +05:30
unsigned int riscv_cbom_block_size ;
EXPORT_SYMBOL_GPL ( riscv_cbom_block_size ) ;
2023-02-24 17:26:27 +01:00
unsigned int riscv_cboz_block_size ;
EXPORT_SYMBOL_GPL ( riscv_cboz_block_size ) ;
2023-06-15 00:55:03 +08:00
static void __init cbo_get_block_size ( struct device_node * node ,
const char * name , u32 * block_size ,
unsigned long * first_hartid )
2022-10-21 11:52:39 +05:30
{
2023-02-24 17:26:25 +01:00
unsigned long hartid ;
u32 val ;
if ( riscv_of_processor_hartid ( node , & hartid ) )
return ;
if ( of_property_read_u32 ( node , name , & val ) )
return ;
if ( ! * block_size ) {
* block_size = val ;
* first_hartid = hartid ;
} else if ( * block_size ! = val ) {
pr_warn ( " %s mismatched between harts %lu and %lu \n " ,
name , * first_hartid , hartid ) ;
}
}
2023-06-15 00:55:03 +08:00
void __init riscv_init_cbo_blocksizes ( void )
2022-10-21 11:52:39 +05:30
{
2023-02-24 17:26:27 +01:00
unsigned long cbom_hartid , cboz_hartid ;
u32 cbom_block_size = 0 , cboz_block_size = 0 ;
2022-10-21 11:52:39 +05:30
struct device_node * node ;
2023-10-18 18:10:07 +05:30
struct acpi_table_header * rhct ;
acpi_status status ;
if ( acpi_disabled ) {
for_each_of_cpu_node ( node ) {
/* set block-size for cbom and/or cboz extension if available */
cbo_get_block_size ( node , " riscv,cbom-block-size " ,
& cbom_block_size , & cbom_hartid ) ;
cbo_get_block_size ( node , " riscv,cboz-block-size " ,
& cboz_block_size , & cboz_hartid ) ;
}
} else {
status = acpi_get_table ( ACPI_SIG_RHCT , 0 , & rhct ) ;
if ( ACPI_FAILURE ( status ) )
return ;
2022-10-21 11:52:39 +05:30
2023-10-18 18:10:07 +05:30
acpi_get_cbo_block_size ( rhct , & cbom_block_size , & cboz_block_size , NULL ) ;
acpi_put_table ( ( struct acpi_table_header * ) rhct ) ;
2022-10-21 11:52:39 +05:30
}
2023-02-24 17:26:27 +01:00
if ( cbom_block_size )
riscv_cbom_block_size = cbom_block_size ;
if ( cboz_block_size )
riscv_cboz_block_size = cboz_block_size ;
2022-10-21 11:52:39 +05:30
}