2017-11-07 19:30:08 +03:00
// SPDX-License-Identifier: GPL-2.0
2017-05-31 19:59:28 +03:00
/*
* Arch specific cpu topology information
*
* Copyright ( C ) 2016 , ARM Ltd .
* Written by : Juri Lelli , ARM Ltd .
*/
# include <linux/acpi.h>
2017-05-31 19:59:30 +03:00
# include <linux/arch_topology.h>
2017-05-31 19:59:28 +03:00
# include <linux/cpu.h>
# include <linux/cpufreq.h>
# include <linux/device.h>
# include <linux/of.h>
# include <linux/slab.h>
# include <linux/string.h>
# include <linux/sched/topology.h>
2017-09-26 19:41:10 +03:00
DEFINE_PER_CPU ( unsigned long , freq_scale ) = SCHED_CAPACITY_SCALE ;
2017-05-31 19:59:28 +03:00
2017-09-26 19:41:10 +03:00
void arch_set_freq_scale ( struct cpumask * cpus , unsigned long cur_freq ,
unsigned long max_freq )
2017-05-31 19:59:28 +03:00
{
2017-09-26 19:41:10 +03:00
unsigned long scale ;
int i ;
scale = ( cur_freq < < SCHED_CAPACITY_SHIFT ) / max_freq ;
for_each_cpu ( i , cpus )
per_cpu ( freq_scale , i ) = scale ;
2017-05-31 19:59:28 +03:00
}
static DEFINE_MUTEX ( cpu_scale_mutex ) ;
2017-09-26 19:41:11 +03:00
DEFINE_PER_CPU ( unsigned long , cpu_scale ) = SCHED_CAPACITY_SCALE ;
2017-05-31 19:59:28 +03:00
2017-05-31 19:59:31 +03:00
void topology_set_cpu_scale ( unsigned int cpu , unsigned long capacity )
2017-05-31 19:59:28 +03:00
{
per_cpu ( cpu_scale , cpu ) = capacity ;
}
static ssize_t cpu_capacity_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
struct cpu * cpu = container_of ( dev , struct cpu , dev ) ;
2017-06-23 12:25:30 +03:00
return sprintf ( buf , " %lu \n " , topology_get_cpu_scale ( NULL , cpu - > dev . id ) ) ;
2017-05-31 19:59:28 +03:00
}
static ssize_t cpu_capacity_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf ,
size_t count )
{
struct cpu * cpu = container_of ( dev , struct cpu , dev ) ;
int this_cpu = cpu - > dev . id ;
int i ;
unsigned long new_capacity ;
ssize_t ret ;
if ( ! count )
return 0 ;
ret = kstrtoul ( buf , 0 , & new_capacity ) ;
if ( ret )
return ret ;
if ( new_capacity > SCHED_CAPACITY_SCALE )
return - EINVAL ;
mutex_lock ( & cpu_scale_mutex ) ;
for_each_cpu ( i , & cpu_topology [ this_cpu ] . core_sibling )
2017-05-31 19:59:31 +03:00
topology_set_cpu_scale ( i , new_capacity ) ;
2017-05-31 19:59:28 +03:00
mutex_unlock ( & cpu_scale_mutex ) ;
return count ;
}
static DEVICE_ATTR_RW ( cpu_capacity ) ;
static int register_cpu_capacity_sysctl ( void )
{
int i ;
struct device * cpu ;
for_each_possible_cpu ( i ) {
cpu = get_cpu_device ( i ) ;
if ( ! cpu ) {
pr_err ( " %s: too early to get CPU%d device! \n " ,
__func__ , i ) ;
continue ;
}
device_create_file ( cpu , & dev_attr_cpu_capacity ) ;
}
return 0 ;
}
subsys_initcall ( register_cpu_capacity_sysctl ) ;
static u32 capacity_scale ;
static u32 * raw_capacity ;
2017-06-23 12:25:33 +03:00
2017-10-10 10:34:56 +03:00
static int free_raw_capacity ( void )
2017-06-23 12:25:33 +03:00
{
kfree ( raw_capacity ) ;
raw_capacity = NULL ;
return 0 ;
}
2017-05-31 19:59:28 +03:00
2017-05-31 19:59:31 +03:00
void topology_normalize_cpu_scale ( void )
2017-05-31 19:59:28 +03:00
{
u64 capacity ;
int cpu ;
2017-06-23 12:25:33 +03:00
if ( ! raw_capacity )
2017-05-31 19:59:28 +03:00
return ;
pr_debug ( " cpu_capacity: capacity_scale=%u \n " , capacity_scale ) ;
mutex_lock ( & cpu_scale_mutex ) ;
for_each_possible_cpu ( cpu ) {
pr_debug ( " cpu_capacity: cpu=%d raw_capacity=%u \n " ,
cpu , raw_capacity [ cpu ] ) ;
capacity = ( raw_capacity [ cpu ] < < SCHED_CAPACITY_SHIFT )
/ capacity_scale ;
2017-05-31 19:59:31 +03:00
topology_set_cpu_scale ( cpu , capacity ) ;
2017-05-31 19:59:28 +03:00
pr_debug ( " cpu_capacity: CPU%d cpu_capacity=%lu \n " ,
2017-05-31 19:59:31 +03:00
cpu , topology_get_cpu_scale ( NULL , cpu ) ) ;
2017-05-31 19:59:28 +03:00
}
mutex_unlock ( & cpu_scale_mutex ) ;
}
2017-06-23 12:25:32 +03:00
bool __init topology_parse_cpu_capacity ( struct device_node * cpu_node , int cpu )
2017-05-31 19:59:28 +03:00
{
2017-06-23 12:25:33 +03:00
static bool cap_parsing_failed ;
2017-06-23 12:25:32 +03:00
int ret ;
2017-05-31 19:59:28 +03:00
u32 cpu_capacity ;
if ( cap_parsing_failed )
2017-06-23 12:25:32 +03:00
return false ;
2017-05-31 19:59:28 +03:00
2017-06-23 12:25:30 +03:00
ret = of_property_read_u32 ( cpu_node , " capacity-dmips-mhz " ,
2017-05-31 19:59:28 +03:00
& cpu_capacity ) ;
if ( ! ret ) {
if ( ! raw_capacity ) {
raw_capacity = kcalloc ( num_possible_cpus ( ) ,
sizeof ( * raw_capacity ) ,
GFP_KERNEL ) ;
if ( ! raw_capacity ) {
pr_err ( " cpu_capacity: failed to allocate memory for raw capacities \n " ) ;
cap_parsing_failed = true ;
2017-06-23 12:25:32 +03:00
return false ;
2017-05-31 19:59:28 +03:00
}
}
capacity_scale = max ( cpu_capacity , capacity_scale ) ;
raw_capacity [ cpu ] = cpu_capacity ;
2017-07-19 00:42:49 +03:00
pr_debug ( " cpu_capacity: %pOF cpu_capacity=%u (raw) \n " ,
cpu_node , raw_capacity [ cpu ] ) ;
2017-05-31 19:59:28 +03:00
} else {
if ( raw_capacity ) {
2017-07-19 00:42:49 +03:00
pr_err ( " cpu_capacity: missing %pOF raw capacity \n " ,
cpu_node ) ;
2017-05-31 19:59:28 +03:00
pr_err ( " cpu_capacity: partial information: fallback to 1024 for all CPUs \n " ) ;
}
cap_parsing_failed = true ;
2017-06-23 12:25:33 +03:00
free_raw_capacity ( ) ;
2017-05-31 19:59:28 +03:00
}
return ! ret ;
}
# ifdef CONFIG_CPU_FREQ
2018-02-13 05:06:40 +03:00
static cpumask_var_t cpus_to_visit ;
static void parsing_done_workfn ( struct work_struct * work ) ;
static DECLARE_WORK ( parsing_done_work , parsing_done_workfn ) ;
2017-05-31 19:59:28 +03:00
2018-02-13 05:06:40 +03:00
static int
2017-05-31 19:59:28 +03:00
init_cpu_capacity_callback ( struct notifier_block * nb ,
unsigned long val ,
void * data )
{
struct cpufreq_policy * policy = data ;
int cpu ;
2017-06-23 12:25:34 +03:00
if ( ! raw_capacity )
2017-05-31 19:59:28 +03:00
return 0 ;
2017-06-23 12:25:31 +03:00
if ( val ! = CPUFREQ_NOTIFY )
return 0 ;
pr_debug ( " cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl) \n " ,
cpumask_pr_args ( policy - > related_cpus ) ,
cpumask_pr_args ( cpus_to_visit ) ) ;
cpumask_andnot ( cpus_to_visit , cpus_to_visit , policy - > related_cpus ) ;
for_each_cpu ( cpu , policy - > related_cpus ) {
raw_capacity [ cpu ] = topology_get_cpu_scale ( NULL , cpu ) *
policy - > cpuinfo . max_freq / 1000UL ;
capacity_scale = max ( raw_capacity [ cpu ] , capacity_scale ) ;
2017-05-31 19:59:28 +03:00
}
2017-06-23 12:25:31 +03:00
if ( cpumask_empty ( cpus_to_visit ) ) {
topology_normalize_cpu_scale ( ) ;
2017-06-23 12:25:33 +03:00
free_raw_capacity ( ) ;
2017-06-23 12:25:31 +03:00
pr_debug ( " cpu_capacity: parsing done \n " ) ;
schedule_work ( & parsing_done_work ) ;
}
2017-05-31 19:59:28 +03:00
return 0 ;
}
2018-02-13 05:06:40 +03:00
static struct notifier_block init_cpu_capacity_notifier = {
2017-05-31 19:59:28 +03:00
. notifier_call = init_cpu_capacity_callback ,
} ;
static int __init register_cpufreq_notifier ( void )
{
2017-09-26 19:41:06 +03:00
int ret ;
2017-05-31 19:59:28 +03:00
/*
* on ACPI - based systems we need to use the default cpu capacity
* until we have the necessary code to parse the cpu capacity , so
* skip registering cpufreq notifier .
*/
2017-05-31 19:59:29 +03:00
if ( ! acpi_disabled | | ! raw_capacity )
2017-05-31 19:59:28 +03:00
return - EINVAL ;
if ( ! alloc_cpumask_var ( & cpus_to_visit , GFP_KERNEL ) ) {
pr_err ( " cpu_capacity: failed to allocate memory for cpus_to_visit \n " ) ;
return - ENOMEM ;
}
cpumask_copy ( cpus_to_visit , cpu_possible_mask ) ;
2017-09-26 19:41:06 +03:00
ret = cpufreq_register_notifier ( & init_cpu_capacity_notifier ,
CPUFREQ_POLICY_NOTIFIER ) ;
if ( ret )
free_cpumask_var ( cpus_to_visit ) ;
return ret ;
2017-05-31 19:59:28 +03:00
}
core_initcall ( register_cpufreq_notifier ) ;
2018-02-13 05:06:40 +03:00
static void parsing_done_workfn ( struct work_struct * work )
2017-05-31 19:59:28 +03:00
{
cpufreq_unregister_notifier ( & init_cpu_capacity_notifier ,
CPUFREQ_POLICY_NOTIFIER ) ;
2017-09-26 19:41:06 +03:00
free_cpumask_var ( cpus_to_visit ) ;
2017-05-31 19:59:28 +03:00
}
# else
core_initcall ( free_raw_capacity ) ;
# endif