2019-05-29 07:18:00 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2017-07-10 18:00:26 -07:00
/*
* Copyright ( C ) 2017 SiFive
*/
# include <linux/cpu.h>
# include <linux/of.h>
2020-02-20 10:45:18 +05:30
# include <asm/cacheinfo.h>
static struct riscv_cacheinfo_ops * rv_cache_ops ;
void riscv_set_cacheinfo_ops ( struct riscv_cacheinfo_ops * ops )
{
rv_cache_ops = ops ;
}
EXPORT_SYMBOL_GPL ( riscv_set_cacheinfo_ops ) ;
const struct attribute_group *
cache_get_priv_group ( struct cacheinfo * this_leaf )
{
if ( rv_cache_ops & & rv_cache_ops - > get_priv_group )
return rv_cache_ops - > get_priv_group ( this_leaf ) ;
return NULL ;
}
2017-07-10 18:00:26 -07:00
2020-08-31 15:33:50 +08:00
static struct cacheinfo * get_cacheinfo ( u32 level , enum cache_type type )
{
2020-12-23 00:01:52 +08:00
/*
* Using raw_smp_processor_id ( ) elides a preemptability check , but this
* is really indicative of a larger problem : the cacheinfo UABI assumes
* that cores have a homonogenous view of the cache hierarchy . That
* happens to be the case for the current set of RISC - V systems , but
* likely won ' t be true in general . Since there ' s no way to provide
* correct information for these systems via the current UABI we ' re
* just eliding the check for now .
*/
struct cpu_cacheinfo * this_cpu_ci = get_cpu_cacheinfo ( raw_smp_processor_id ( ) ) ;
2020-08-31 15:33:50 +08:00
struct cacheinfo * this_leaf ;
int index ;
for ( index = 0 ; index < this_cpu_ci - > num_leaves ; index + + ) {
this_leaf = this_cpu_ci - > info_list + index ;
if ( this_leaf - > level = = level & & this_leaf - > type = = type )
return this_leaf ;
}
return NULL ;
}
uintptr_t get_cache_size ( u32 level , enum cache_type type )
{
struct cacheinfo * this_leaf = get_cacheinfo ( level , type ) ;
return this_leaf ? this_leaf - > size : 0 ;
}
uintptr_t get_cache_geometry ( u32 level , enum cache_type type )
{
struct cacheinfo * this_leaf = get_cacheinfo ( level , type ) ;
return this_leaf ? ( this_leaf - > ways_of_associativity < < 16 |
this_leaf - > coherency_line_size ) :
0 ;
}
2023-03-08 14:47:34 +08:00
static void ci_leaf_init ( struct cacheinfo * this_leaf ,
struct device_node * node ,
enum cache_type type , unsigned int level )
2017-07-10 18:00:26 -07:00
{
this_leaf - > level = level ;
this_leaf - > type = type ;
}
2021-08-31 13:48:34 +02:00
int populate_cache_leaves ( unsigned int cpu )
2017-07-10 18:00:26 -07:00
{
struct cpu_cacheinfo * this_cpu_ci = get_cpu_cacheinfo ( cpu ) ;
struct cacheinfo * this_leaf = this_cpu_ci - > info_list ;
struct device_node * np = of_cpu_device_node_get ( cpu ) ;
2018-11-20 15:07:50 -08:00
struct device_node * prev = NULL ;
2017-07-10 18:00:26 -07:00
int levels = 1 , level = 1 ;
2023-03-08 14:47:34 +08:00
if ( of_property_read_bool ( np , " cache-size " ) )
ci_leaf_init ( this_leaf + + , np , CACHE_TYPE_UNIFIED , level ) ;
if ( of_property_read_bool ( np , " i-cache-size " ) )
ci_leaf_init ( this_leaf + + , np , CACHE_TYPE_INST , level ) ;
if ( of_property_read_bool ( np , " d-cache-size " ) )
ci_leaf_init ( this_leaf + + , np , CACHE_TYPE_DATA , level ) ;
2017-07-10 18:00:26 -07:00
2018-11-20 15:07:50 -08:00
prev = np ;
2017-07-10 18:00:26 -07:00
while ( ( np = of_find_next_cache_node ( np ) ) ) {
2018-11-20 15:07:50 -08:00
of_node_put ( prev ) ;
prev = np ;
2017-07-10 18:00:26 -07:00
if ( ! of_device_is_compatible ( np , " cache " ) )
break ;
if ( of_property_read_u32 ( np , " cache-level " , & level ) )
break ;
if ( level < = levels )
break ;
2023-03-08 14:47:34 +08:00
if ( of_property_read_bool ( np , " cache-size " ) )
ci_leaf_init ( this_leaf + + , np , CACHE_TYPE_UNIFIED , level ) ;
if ( of_property_read_bool ( np , " i-cache-size " ) )
ci_leaf_init ( this_leaf + + , np , CACHE_TYPE_INST , level ) ;
if ( of_property_read_bool ( np , " d-cache-size " ) )
ci_leaf_init ( this_leaf + + , np , CACHE_TYPE_DATA , level ) ;
2017-07-10 18:00:26 -07:00
levels = level ;
}
2018-11-20 15:07:50 -08:00
of_node_put ( np ) ;
2017-07-10 18:00:26 -07:00
return 0 ;
}