2014-03-04 11:51:17 +04:00
/*
* arch / arm64 / kernel / topology . c
*
* Copyright ( C ) 2011 , 2013 , 2014 Linaro Limited .
*
* Based on the arm32 version written by Vincent Guittot in turn based on
* arch / sh / kernel / topology . c
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*/
# include <linux/cpu.h>
# include <linux/cpumask.h>
# include <linux/init.h>
# include <linux/percpu.h>
# include <linux/node.h>
# include <linux/nodemask.h>
2014-05-03 00:38:29 +04:00
# include <linux/of.h>
2014-03-04 11:51:17 +04:00
# include <linux/sched.h>
2014-06-07 04:55:27 +04:00
# include <asm/cputype.h>
2014-03-04 11:51:17 +04:00
# include <asm/topology.h>
2014-05-03 00:38:29 +04:00
static int __init get_cpu_for_node ( struct device_node * node )
{
struct device_node * cpu_node ;
int cpu ;
cpu_node = of_parse_phandle ( node , " cpu " , 0 ) ;
if ( ! cpu_node )
return - 1 ;
for_each_possible_cpu ( cpu ) {
if ( of_get_cpu_node ( cpu , NULL ) = = cpu_node ) {
of_node_put ( cpu_node ) ;
return cpu ;
}
}
pr_crit ( " Unable to find CPU node for %s \n " , cpu_node - > full_name ) ;
of_node_put ( cpu_node ) ;
return - 1 ;
}
static int __init parse_core ( struct device_node * core , int cluster_id ,
int core_id )
{
char name [ 10 ] ;
bool leaf = true ;
int i = 0 ;
int cpu ;
struct device_node * t ;
do {
snprintf ( name , sizeof ( name ) , " thread%d " , i ) ;
t = of_get_child_by_name ( core , name ) ;
if ( t ) {
leaf = false ;
cpu = get_cpu_for_node ( t ) ;
if ( cpu > = 0 ) {
cpu_topology [ cpu ] . cluster_id = cluster_id ;
cpu_topology [ cpu ] . core_id = core_id ;
cpu_topology [ cpu ] . thread_id = i ;
} else {
pr_err ( " %s: Can't get CPU for thread \n " ,
t - > full_name ) ;
of_node_put ( t ) ;
return - EINVAL ;
}
of_node_put ( t ) ;
}
i + + ;
} while ( t ) ;
cpu = get_cpu_for_node ( core ) ;
if ( cpu > = 0 ) {
if ( ! leaf ) {
pr_err ( " %s: Core has both threads and CPU \n " ,
core - > full_name ) ;
return - EINVAL ;
}
cpu_topology [ cpu ] . cluster_id = cluster_id ;
cpu_topology [ cpu ] . core_id = core_id ;
} else if ( leaf ) {
pr_err ( " %s: Can't get CPU for leaf core \n " , core - > full_name ) ;
return - EINVAL ;
}
return 0 ;
}
static int __init parse_cluster ( struct device_node * cluster , int depth )
{
char name [ 10 ] ;
bool leaf = true ;
bool has_cores = false ;
struct device_node * c ;
static int cluster_id __initdata ;
int core_id = 0 ;
int i , ret ;
/*
* First check for child clusters ; we currently ignore any
* information about the nesting of clusters and present the
* scheduler with a flat list of them .
*/
i = 0 ;
do {
snprintf ( name , sizeof ( name ) , " cluster%d " , i ) ;
c = of_get_child_by_name ( cluster , name ) ;
if ( c ) {
leaf = false ;
ret = parse_cluster ( c , depth + 1 ) ;
of_node_put ( c ) ;
if ( ret ! = 0 )
return ret ;
}
i + + ;
} while ( c ) ;
/* Now check for cores */
i = 0 ;
do {
snprintf ( name , sizeof ( name ) , " core%d " , i ) ;
c = of_get_child_by_name ( cluster , name ) ;
if ( c ) {
has_cores = true ;
if ( depth = = 0 ) {
pr_err ( " %s: cpu-map children should be clusters \n " ,
c - > full_name ) ;
of_node_put ( c ) ;
return - EINVAL ;
}
if ( leaf ) {
ret = parse_core ( c , cluster_id , core_id + + ) ;
} else {
pr_err ( " %s: Non-leaf cluster with core %s \n " ,
cluster - > full_name , name ) ;
ret = - EINVAL ;
}
of_node_put ( c ) ;
if ( ret ! = 0 )
return ret ;
}
i + + ;
} while ( c ) ;
if ( leaf & & ! has_cores )
pr_warn ( " %s: empty cluster \n " , cluster - > full_name ) ;
if ( leaf )
cluster_id + + ;
return 0 ;
}
static int __init parse_dt_topology ( void )
{
struct device_node * cn , * map ;
int ret = 0 ;
int cpu ;
cn = of_find_node_by_path ( " /cpus " ) ;
if ( ! cn ) {
pr_err ( " No CPU information found in DT \n " ) ;
return 0 ;
}
/*
* When topology is provided cpu - map is essentially a root
* cluster with restricted subnodes .
*/
map = of_get_child_by_name ( cn , " cpu-map " ) ;
if ( ! map )
goto out ;
ret = parse_cluster ( map , 0 ) ;
if ( ret ! = 0 )
goto out_map ;
/*
* Check that all cores are in the topology ; the SMP code will
* only mark cores described in the DT as possible .
*/
2014-06-07 04:55:27 +04:00
for_each_possible_cpu ( cpu )
if ( cpu_topology [ cpu ] . cluster_id = = - 1 )
2014-05-03 00:38:29 +04:00
ret = - EINVAL ;
out_map :
of_node_put ( map ) ;
out :
of_node_put ( cn ) ;
return ret ;
}
2014-03-04 11:51:17 +04:00
/*
* cpu topology table
*/
struct cpu_topology cpu_topology [ NR_CPUS ] ;
EXPORT_SYMBOL_GPL ( cpu_topology ) ;
const struct cpumask * cpu_coregroup_mask ( int cpu )
{
return & cpu_topology [ cpu ] . core_sibling ;
}
static void update_siblings_masks ( unsigned int cpuid )
{
struct cpu_topology * cpu_topo , * cpuid_topo = & cpu_topology [ cpuid ] ;
int cpu ;
/* update core and thread sibling masks */
for_each_possible_cpu ( cpu ) {
cpu_topo = & cpu_topology [ cpu ] ;
if ( cpuid_topo - > cluster_id ! = cpu_topo - > cluster_id )
continue ;
cpumask_set_cpu ( cpuid , & cpu_topo - > core_sibling ) ;
if ( cpu ! = cpuid )
cpumask_set_cpu ( cpu , & cpuid_topo - > core_sibling ) ;
if ( cpuid_topo - > core_id ! = cpu_topo - > core_id )
continue ;
cpumask_set_cpu ( cpuid , & cpu_topo - > thread_sibling ) ;
if ( cpu ! = cpuid )
cpumask_set_cpu ( cpu , & cpuid_topo - > thread_sibling ) ;
}
}
void store_cpu_topology ( unsigned int cpuid )
{
2014-06-07 04:55:27 +04:00
struct cpu_topology * cpuid_topo = & cpu_topology [ cpuid ] ;
u64 mpidr ;
if ( cpuid_topo - > cluster_id ! = - 1 )
goto topology_populated ;
mpidr = read_cpuid_mpidr ( ) ;
/* Uniprocessor systems can rely on default topology values */
if ( mpidr & MPIDR_UP_BITMASK )
return ;
/* Create cpu topology mapping based on MPIDR. */
if ( mpidr & MPIDR_MT_BITMASK ) {
/* Multiprocessor system : Multi-threads per core */
cpuid_topo - > thread_id = MPIDR_AFFINITY_LEVEL ( mpidr , 0 ) ;
cpuid_topo - > core_id = MPIDR_AFFINITY_LEVEL ( mpidr , 1 ) ;
2014-11-21 03:36:49 +03:00
cpuid_topo - > cluster_id = MPIDR_AFFINITY_LEVEL ( mpidr , 2 ) |
MPIDR_AFFINITY_LEVEL ( mpidr , 3 ) < < 8 ;
2014-06-07 04:55:27 +04:00
} else {
/* Multiprocessor system : Single-thread per core */
cpuid_topo - > thread_id = - 1 ;
cpuid_topo - > core_id = MPIDR_AFFINITY_LEVEL ( mpidr , 0 ) ;
2014-11-21 03:36:49 +03:00
cpuid_topo - > cluster_id = MPIDR_AFFINITY_LEVEL ( mpidr , 1 ) |
MPIDR_AFFINITY_LEVEL ( mpidr , 2 ) < < 8 |
MPIDR_AFFINITY_LEVEL ( mpidr , 3 ) < < 16 ;
2014-06-07 04:55:27 +04:00
}
pr_debug ( " CPU%u: cluster %d core %d thread %d mpidr %#016llx \n " ,
cpuid , cpuid_topo - > cluster_id , cpuid_topo - > core_id ,
cpuid_topo - > thread_id , mpidr ) ;
topology_populated :
2014-03-04 11:51:17 +04:00
update_siblings_masks ( cpuid ) ;
}
2014-05-03 00:38:29 +04:00
static void __init reset_cpu_topology ( void )
2014-03-04 11:51:17 +04:00
{
unsigned int cpu ;
for_each_possible_cpu ( cpu ) {
struct cpu_topology * cpu_topo = & cpu_topology [ cpu ] ;
cpu_topo - > thread_id = - 1 ;
2014-05-03 00:38:28 +04:00
cpu_topo - > core_id = 0 ;
2014-03-04 11:51:17 +04:00
cpu_topo - > cluster_id = - 1 ;
2014-05-03 00:38:28 +04:00
2014-03-04 11:51:17 +04:00
cpumask_clear ( & cpu_topo - > core_sibling ) ;
2014-05-03 00:38:28 +04:00
cpumask_set_cpu ( cpu , & cpu_topo - > core_sibling ) ;
2014-03-04 11:51:17 +04:00
cpumask_clear ( & cpu_topo - > thread_sibling ) ;
2014-05-03 00:38:28 +04:00
cpumask_set_cpu ( cpu , & cpu_topo - > thread_sibling ) ;
2014-03-04 11:51:17 +04:00
}
}
2014-05-03 00:38:29 +04:00
void __init init_cpu_topology ( void )
{
reset_cpu_topology ( ) ;
/*
* Discard anything that was parsed if we hit an error so we
* don ' t use partial information .
*/
2015-07-23 20:28:26 +03:00
if ( of_have_populated_dt ( ) & & parse_dt_topology ( ) )
2014-05-03 00:38:29 +04:00
reset_cpu_topology ( ) ;
}