2011-08-08 13:21:59 +01:00
/*
* arch / arm / kernel / topology . c
*
* Copyright ( C ) 2011 Linaro Limited .
* Written by : Vincent Guittot
*
* based on arch / sh / kernel / topology . c
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*/
2017-05-31 17:59:30 +01:00
# include <linux/arch_topology.h>
2011-08-08 13:21:59 +01:00
# include <linux/cpu.h>
2016-11-06 01:34:15 +01:00
# include <linux/cpufreq.h>
2011-08-08 13:21:59 +01:00
# include <linux/cpumask.h>
2013-05-31 22:49:22 +01:00
# include <linux/export.h>
2011-08-08 13:21:59 +01:00
# include <linux/init.h>
# include <linux/percpu.h>
# include <linux/node.h>
# include <linux/nodemask.h>
2012-07-10 14:13:12 +01:00
# include <linux/of.h>
2011-08-08 13:21:59 +01:00
# include <linux/sched.h>
2017-02-01 16:36:40 +01:00
# include <linux/sched/topology.h>
2012-07-10 14:13:12 +01:00
# include <linux/slab.h>
2016-11-06 01:34:31 +01:00
# include <linux/string.h>
2011-08-08 13:21:59 +01:00
2016-11-06 01:34:31 +01:00
# include <asm/cpu.h>
2011-08-08 13:21:59 +01:00
# include <asm/cputype.h>
# include <asm/topology.h>
2012-07-10 14:08:40 +01:00
/*
2014-05-26 18:19:39 -04:00
* cpu capacity scale management
2012-07-10 14:08:40 +01:00
*/
/*
2014-05-26 18:19:39 -04:00
* cpu capacity table
2012-07-10 14:08:40 +01:00
* This per cpu data structure describes the relative capacity of each core .
* On a heteregenous system , cores don ' t have the same computation capacity
2014-05-26 18:19:39 -04:00
* and we reflect that difference in the cpu_capacity field so the scheduler
* can take this difference into account during load balance . A per cpu
* structure is preferred because each CPU updates its own cpu_capacity field
* during the load balance except for idle cores . One idle core is selected
* to run the rebalance_domains for all idle cores and the cpu_capacity can be
* updated during this sequence .
2012-07-10 14:08:40 +01:00
*/
2012-07-10 14:13:12 +01:00
# ifdef CONFIG_OF
struct cpu_efficiency {
const char * compatible ;
unsigned long efficiency ;
} ;
/*
* Table of relative efficiency of each processors
* The efficiency value must fit in 20 bit and the final
* cpu_scale value must be in the range
2014-05-26 18:19:39 -04:00
* 0 < cpu_scale < 3 * SCHED_CAPACITY_SCALE / 2
2012-07-10 14:13:12 +01:00
* in order to return at most 1 when DIV_ROUND_CLOSEST
* is used to compute the capacity of a CPU .
* Processors that are not defined in the table ,
2014-05-26 18:19:39 -04:00
* use the default SCHED_CAPACITY_SCALE value for cpu_scale .
2012-07-10 14:13:12 +01:00
*/
2013-12-10 12:10:17 +01:00
static const struct cpu_efficiency table_efficiency [ ] = {
2012-07-10 14:13:12 +01:00
{ " arm,cortex-a15 " , 3891 } ,
{ " arm,cortex-a7 " , 2048 } ,
{ NULL , } ,
} ;
2013-12-10 12:10:17 +01:00
static unsigned long * __cpu_capacity ;
2013-06-17 14:20:00 +01:00
# define cpu_capacity(cpu) __cpu_capacity[cpu]
2012-07-10 14:13:12 +01:00
2013-12-10 12:10:17 +01:00
static unsigned long middle_capacity = 1 ;
2016-11-06 01:34:15 +01:00
static bool cap_from_dt = true ;
2012-07-10 14:13:12 +01:00
/*
* Iterate all CPUs ' descriptor in DT and compute the efficiency
* ( as per table_efficiency ) . Also calculate a middle efficiency
* as close as possible to ( max { eff_i } - min { eff_i } ) / 2
2014-05-26 18:19:39 -04:00
* This is later used to scale the cpu_capacity field such that an
* ' average ' CPU is of middle capacity . Also see the comments near
* table_efficiency [ ] and update_cpu_capacity ( ) .
2012-07-10 14:13:12 +01:00
*/
static void __init parse_dt_topology ( void )
{
2013-12-10 12:10:17 +01:00
const struct cpu_efficiency * cpu_eff ;
2012-07-10 14:13:12 +01:00
struct device_node * cn = NULL ;
2014-03-20 15:16:54 +01:00
unsigned long min_capacity = ULONG_MAX ;
2012-07-10 14:13:12 +01:00
unsigned long max_capacity = 0 ;
unsigned long capacity = 0 ;
2014-03-20 15:16:54 +01:00
int cpu = 0 ;
2012-07-10 14:13:12 +01:00
2014-03-20 15:16:54 +01:00
__cpu_capacity = kcalloc ( nr_cpu_ids , sizeof ( * __cpu_capacity ) ,
GFP_NOWAIT ) ;
2012-07-10 14:13:12 +01:00
2013-06-17 14:20:00 +01:00
for_each_possible_cpu ( cpu ) {
2019-10-11 13:56:58 +01:00
const __be32 * rate ;
2012-07-10 14:13:12 +01:00
int len ;
2013-06-17 14:20:00 +01:00
/* too early to use cpu->of_node */
cn = of_get_cpu_node ( cpu , NULL ) ;
if ( ! cn ) {
pr_err ( " missing device node for CPU %d \n " , cpu ) ;
continue ;
}
2012-07-10 14:13:12 +01:00
2017-05-31 17:59:31 +01:00
if ( topology_parse_cpu_capacity ( cn , cpu ) ) {
2016-11-06 01:34:15 +01:00
of_node_put ( cn ) ;
continue ;
}
cap_from_dt = false ;
2012-07-10 14:13:12 +01:00
for ( cpu_eff = table_efficiency ; cpu_eff - > compatible ; cpu_eff + + )
if ( of_device_is_compatible ( cn , cpu_eff - > compatible ) )
break ;
if ( cpu_eff - > compatible = = NULL )
continue ;
rate = of_get_property ( cn , " clock-frequency " , & len ) ;
if ( ! rate | | len ! = 4 ) {
2017-07-21 14:28:32 -05:00
pr_err ( " %pOF missing clock-frequency property \n " , cn ) ;
2012-07-10 14:13:12 +01:00
continue ;
}
capacity = ( ( be32_to_cpup ( rate ) ) > > 20 ) * cpu_eff - > efficiency ;
/* Save min capacity of the system */
if ( capacity < min_capacity )
min_capacity = capacity ;
/* Save max capacity of the system */
if ( capacity > max_capacity )
max_capacity = capacity ;
2013-06-17 14:20:00 +01:00
cpu_capacity ( cpu ) = capacity ;
2012-07-10 14:13:12 +01:00
}
/* If min and max capacities are equals, we bypass the update of the
* cpu_scale because all CPUs have the same capacity . Otherwise , we
* compute a middle_capacity factor that will ensure that the capacity
* of an ' average ' CPU of the system will be as close as possible to
2014-05-26 18:19:39 -04:00
* SCHED_CAPACITY_SCALE , which is the default value , but with the
2012-07-10 14:13:12 +01:00
* constraint explained near table_efficiency [ ] .
*/
2013-06-17 14:20:00 +01:00
if ( 4 * max_capacity < ( 3 * ( max_capacity + min_capacity ) ) )
2012-07-10 14:13:12 +01:00
middle_capacity = ( min_capacity + max_capacity )
2014-05-26 18:19:39 -04:00
> > ( SCHED_CAPACITY_SHIFT + 1 ) ;
2012-07-10 14:13:12 +01:00
else
middle_capacity = ( ( max_capacity / 3 )
2014-05-26 18:19:39 -04:00
> > ( SCHED_CAPACITY_SHIFT - 1 ) ) + 1 ;
2012-07-10 14:13:12 +01:00
2017-05-31 17:59:29 +01:00
if ( cap_from_dt )
2017-05-31 17:59:31 +01:00
topology_normalize_cpu_scale ( ) ;
2012-07-10 14:13:12 +01:00
}
/*
* Look for a customed capacity of a CPU in the cpu_capacity table during the
* boot . The update of all CPUs is in O ( n ^ 2 ) for heteregeneous system but the
* function returns directly for SMP system .
*/
2014-05-26 18:19:39 -04:00
static void update_cpu_capacity ( unsigned int cpu )
2012-07-10 14:13:12 +01:00
{
2016-11-06 01:34:15 +01:00
if ( ! cpu_capacity ( cpu ) | | cap_from_dt )
2012-07-10 14:13:12 +01:00
return ;
2017-05-31 17:59:31 +01:00
topology_set_cpu_scale ( cpu , cpu_capacity ( cpu ) / middle_capacity ) ;
2012-07-10 14:13:12 +01:00
2014-10-28 11:26:42 +00:00
pr_info ( " CPU%u: update cpu_capacity %lu \n " ,
2019-06-17 17:00:17 +02:00
cpu , topology_get_cpu_scale ( cpu ) ) ;
2012-07-10 14:13:12 +01:00
}
# else
static inline void parse_dt_topology ( void ) { }
2014-05-26 18:19:39 -04:00
static inline void update_cpu_capacity ( unsigned int cpuid ) { }
2012-07-10 14:13:12 +01:00
# endif
2014-04-11 11:44:41 +02:00
/*
* The current assumption is that we can power gate each core independently .
* This will be superseded by DT binding once available .
*/
const struct cpumask * cpu_corepower_mask ( int cpu )
{
return & cpu_topology [ cpu ] . thread_sibling ;
}
2011-08-08 13:21:59 +01:00
/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug . lock locked , when several cpus have booted ,
* which prevents simultaneous write access to cpu_topology array
*/
void store_cpu_topology ( unsigned int cpuid )
{
2019-06-27 12:52:59 -07:00
struct cpu_topology * cpuid_topo = & cpu_topology [ cpuid ] ;
2011-08-08 13:21:59 +01:00
unsigned int mpidr ;
2019-11-29 16:23:02 +01:00
if ( cpuid_topo - > package_id ! = - 1 )
goto topology_populated ;
2011-08-08 13:21:59 +01:00
mpidr = read_cpuid_mpidr ( ) ;
/* create cpu topology mapping */
if ( ( mpidr & MPIDR_SMP_BITMASK ) = = MPIDR_SMP_VALUE ) {
/*
* This is a multiprocessor system
* multiprocessor format & multiprocessor mode field are set
*/
if ( mpidr & MPIDR_MT_BITMASK ) {
/* core performance interdependency */
2012-11-16 15:24:06 +00:00
cpuid_topo - > thread_id = MPIDR_AFFINITY_LEVEL ( mpidr , 0 ) ;
cpuid_topo - > core_id = MPIDR_AFFINITY_LEVEL ( mpidr , 1 ) ;
2019-06-27 12:52:59 -07:00
cpuid_topo - > package_id = MPIDR_AFFINITY_LEVEL ( mpidr , 2 ) ;
2011-08-08 13:21:59 +01:00
} else {
/* largely independent cores */
cpuid_topo - > thread_id = - 1 ;
2012-11-16 15:24:06 +00:00
cpuid_topo - > core_id = MPIDR_AFFINITY_LEVEL ( mpidr , 0 ) ;
2019-06-27 12:52:59 -07:00
cpuid_topo - > package_id = MPIDR_AFFINITY_LEVEL ( mpidr , 1 ) ;
2011-08-08 13:21:59 +01:00
}
} else {
/*
* This is an uniprocessor system
* we are in multiprocessor format but uniprocessor system
* or in the old uniprocessor format
*/
cpuid_topo - > thread_id = - 1 ;
cpuid_topo - > core_id = 0 ;
2019-06-27 12:52:59 -07:00
cpuid_topo - > package_id = - 1 ;
2011-08-08 13:21:59 +01:00
}
2014-05-26 18:19:39 -04:00
update_cpu_capacity ( cpuid ) ;
2012-07-10 14:13:12 +01:00
2014-10-28 11:26:42 +00:00
pr_info ( " CPU%u: thread %d, cpu %d, socket %d, mpidr %x \n " ,
2011-08-08 13:21:59 +01:00
cpuid , cpu_topology [ cpuid ] . thread_id ,
cpu_topology [ cpuid ] . core_id ,
2019-06-27 12:52:59 -07:00
cpu_topology [ cpuid ] . package_id , mpidr ) ;
2019-11-29 16:23:02 +01:00
topology_populated :
update_siblings_masks ( cpuid ) ;
2011-08-08 13:21:59 +01:00
}
2014-06-24 18:05:29 -07:00
static inline int cpu_corepower_flags ( void )
2014-04-11 11:44:41 +02:00
{
return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN ;
}
static struct sched_domain_topology_level arm_topology [ ] = {
# ifdef CONFIG_SCHED_MC
{ cpu_corepower_mask , cpu_corepower_flags , SD_INIT_NAME ( GMC ) } ,
{ cpu_coregroup_mask , cpu_core_flags , SD_INIT_NAME ( MC ) } ,
# endif
{ cpu_cpu_mask , SD_INIT_NAME ( DIE ) } ,
{ NULL , } ,
} ;
2011-08-08 13:21:59 +01:00
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
*/
2012-08-03 07:58:33 +01:00
void __init init_cpu_topology ( void )
2011-08-08 13:21:59 +01:00
{
2019-06-27 12:52:59 -07:00
reset_cpu_topology ( ) ;
2011-08-08 13:21:59 +01:00
smp_wmb ( ) ;
2012-07-10 14:13:12 +01:00
parse_dt_topology ( ) ;
2014-04-11 11:44:41 +02:00
/* Set scheduler topology descriptor */
set_sched_topology ( arm_topology ) ;
2011-08-08 13:21:59 +01:00
}