2019-05-29 07:12:26 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2016-12-07 17:16:26 -08:00
/*
* MIPS cacheinfo support
*/
# include <linux/cacheinfo.h>
/* Populates leaf and increments to next leaf */
# define populate_cache(cache, leaf, c_level, c_type) \
2017-02-10 22:44:03 +00:00
do { \
2016-12-07 17:16:26 -08:00
leaf - > type = c_type ; \
leaf - > level = c_level ; \
leaf - > coherency_line_size = c - > cache . linesz ; \
leaf - > number_of_sets = c - > cache . sets ; \
leaf - > ways_of_associativity = c - > cache . ways ; \
leaf - > size = c - > cache . linesz * c - > cache . sets * \
c - > cache . ways ; \
2017-02-10 22:44:03 +00:00
leaf + + ; \
} while ( 0 )
2016-12-07 17:16:26 -08:00
2021-08-31 13:48:34 +02:00
int init_cache_level ( unsigned int cpu )
2016-12-07 17:16:26 -08:00
{
struct cpuinfo_mips * c = & current_cpu_data ;
struct cpu_cacheinfo * this_cpu_ci = get_cpu_cacheinfo ( cpu ) ;
int levels = 0 , leaves = 0 ;
/*
* If Dcache is not set , we assume the cache structures
* are not properly initialized .
*/
if ( c - > dcache . waysize )
levels + = 1 ;
else
return - ENOENT ;
leaves + = ( c - > icache . waysize ) ? 2 : 1 ;
2020-12-30 11:39:48 +08:00
if ( c - > vcache . waysize ) {
levels + + ;
leaves + + ;
}
2016-12-07 17:16:26 -08:00
if ( c - > scache . waysize ) {
levels + + ;
leaves + + ;
}
if ( c - > tcache . waysize ) {
levels + + ;
leaves + + ;
}
this_cpu_ci - > num_levels = levels ;
this_cpu_ci - > num_leaves = leaves ;
return 0 ;
}
2019-11-24 16:07:31 +02:00
static void fill_cpumask_siblings ( int cpu , cpumask_t * cpu_map )
{
int cpu1 ;
for_each_possible_cpu ( cpu1 )
if ( cpus_are_siblings ( cpu , cpu1 ) )
cpumask_set_cpu ( cpu1 , cpu_map ) ;
}
static void fill_cpumask_cluster ( int cpu , cpumask_t * cpu_map )
{
int cpu1 ;
int cluster = cpu_cluster ( & cpu_data [ cpu ] ) ;
for_each_possible_cpu ( cpu1 )
if ( cpu_cluster ( & cpu_data [ cpu1 ] ) = = cluster )
cpumask_set_cpu ( cpu1 , cpu_map ) ;
}
2021-08-31 13:48:34 +02:00
int populate_cache_leaves ( unsigned int cpu )
2016-12-07 17:16:26 -08:00
{
struct cpuinfo_mips * c = & current_cpu_data ;
struct cpu_cacheinfo * this_cpu_ci = get_cpu_cacheinfo ( cpu ) ;
struct cacheinfo * this_leaf = this_cpu_ci - > info_list ;
2020-12-30 11:39:48 +08:00
int level = 1 ;
2016-12-07 17:16:26 -08:00
if ( c - > icache . waysize ) {
2020-12-30 11:39:48 +08:00
/* I/D caches are per core */
2019-11-24 16:07:31 +02:00
fill_cpumask_siblings ( cpu , & this_leaf - > shared_cpu_map ) ;
2020-12-30 11:39:48 +08:00
populate_cache ( dcache , this_leaf , level , CACHE_TYPE_DATA ) ;
2019-11-24 16:07:31 +02:00
fill_cpumask_siblings ( cpu , & this_leaf - > shared_cpu_map ) ;
2020-12-30 11:39:48 +08:00
populate_cache ( icache , this_leaf , level , CACHE_TYPE_INST ) ;
level + + ;
2016-12-07 17:16:26 -08:00
} else {
2020-12-30 11:39:48 +08:00
populate_cache ( dcache , this_leaf , level , CACHE_TYPE_UNIFIED ) ;
level + + ;
}
if ( c - > vcache . waysize ) {
/* Vcache is per core as well */
fill_cpumask_siblings ( cpu , & this_leaf - > shared_cpu_map ) ;
populate_cache ( vcache , this_leaf , level , CACHE_TYPE_UNIFIED ) ;
level + + ;
2016-12-07 17:16:26 -08:00
}
2019-11-24 16:07:31 +02:00
if ( c - > scache . waysize ) {
2020-12-30 11:39:48 +08:00
/* Scache is per cluster */
2019-11-24 16:07:31 +02:00
fill_cpumask_cluster ( cpu , & this_leaf - > shared_cpu_map ) ;
2020-12-30 11:39:48 +08:00
populate_cache ( scache , this_leaf , level , CACHE_TYPE_UNIFIED ) ;
level + + ;
2019-11-24 16:07:31 +02:00
}
2016-12-07 17:16:26 -08:00
2021-01-16 14:10:00 +05:30
if ( c - > tcache . waysize )
2020-12-30 11:39:48 +08:00
populate_cache ( tcache , this_leaf , level , CACHE_TYPE_UNIFIED ) ;
2016-12-07 17:16:26 -08:00
2019-07-16 10:36:56 +03:00
this_cpu_ci - > cpu_map_populated = true ;
2016-12-07 17:16:26 -08:00
return 0 ;
}