2020-12-08 19:41:45 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Linaro Limited
*
* Author : Daniel Lezcano < daniel . lezcano @ linaro . org >
*
* The DTPM CPU is based on the energy model . It hooks the CPU in the
* DTPM tree which in turns update the power number by propagating the
* power number from the CPU energy model information to the parents .
*
* The association between the power and the performance state , allows
* to set the power of the CPU at the OPP granularity .
*
* The CPU hotplug is supported and the power numbers will be updated
* if a CPU is hot plugged / unplugged .
*/
2021-03-12 16:04:07 +03:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2020-12-08 19:41:45 +03:00
# include <linux/cpumask.h>
# include <linux/cpufreq.h>
# include <linux/cpuhotplug.h>
# include <linux/dtpm.h>
# include <linux/energy_model.h>
2022-01-28 19:35:35 +03:00
# include <linux/of.h>
2020-12-08 19:41:45 +03:00
# include <linux/pm_qos.h>
# include <linux/slab.h>
# include <linux/units.h>
struct dtpm_cpu {
2021-03-12 16:04:10 +03:00
struct dtpm dtpm ;
2020-12-08 19:41:45 +03:00
struct freq_qos_request qos_req ;
int cpu ;
} ;
2021-03-12 16:04:10 +03:00
static DEFINE_PER_CPU ( struct dtpm_cpu * , dtpm_per_cpu ) ;
static struct dtpm_cpu * to_dtpm_cpu ( struct dtpm * dtpm )
{
return container_of ( dtpm , struct dtpm_cpu , dtpm ) ;
}
2020-12-08 19:41:45 +03:00
static u64 set_pd_power_limit ( struct dtpm * dtpm , u64 power_limit )
{
2021-03-12 16:04:10 +03:00
struct dtpm_cpu * dtpm_cpu = to_dtpm_cpu ( dtpm ) ;
2021-03-12 16:04:07 +03:00
struct em_perf_domain * pd = em_cpu_get ( dtpm_cpu - > cpu ) ;
2020-12-08 19:41:45 +03:00
struct cpumask cpus ;
unsigned long freq ;
u64 power ;
int i , nr_cpus ;
cpumask_and ( & cpus , cpu_online_mask , to_cpumask ( pd - > cpus ) ) ;
nr_cpus = cpumask_weight ( & cpus ) ;
for ( i = 0 ; i < pd - > nr_perf_states ; i + + ) {
2022-07-07 10:15:52 +03:00
power = pd - > table [ i ] . power * nr_cpus ;
2020-12-08 19:41:45 +03:00
if ( power > power_limit )
break ;
}
freq = pd - > table [ i - 1 ] . frequency ;
freq_qos_update_request ( & dtpm_cpu - > qos_req , freq ) ;
2022-07-07 10:15:52 +03:00
power_limit = pd - > table [ i - 1 ] . power * nr_cpus ;
2020-12-08 19:41:45 +03:00
return power_limit ;
}
2021-03-12 16:04:11 +03:00
static u64 scale_pd_power_uw ( struct cpumask * pd_mask , u64 power )
{
2022-06-21 12:04:10 +03:00
unsigned long max , sum_util = 0 ;
2021-03-12 16:04:11 +03:00
int cpu ;
/*
2022-06-21 12:04:10 +03:00
* The capacity is the same for all CPUs belonging to
* the same perf domain .
2021-03-12 16:04:11 +03:00
*/
2022-06-21 12:04:10 +03:00
max = arch_scale_cpu_capacity ( cpumask_first ( pd_mask ) ) ;
for_each_cpu_and ( cpu , pd_mask , cpu_online_mask )
sum_util + = sched_cpu_util ( cpu ) ;
return ( power * ( ( sum_util < < 10 ) / max ) ) > > 10 ;
2021-03-12 16:04:11 +03:00
}
2020-12-08 19:41:45 +03:00
static u64 get_pd_power_uw ( struct dtpm * dtpm )
{
2021-03-12 16:04:10 +03:00
struct dtpm_cpu * dtpm_cpu = to_dtpm_cpu ( dtpm ) ;
2020-12-08 19:41:45 +03:00
struct em_perf_domain * pd ;
2021-03-12 16:04:11 +03:00
struct cpumask * pd_mask ;
2020-12-08 19:41:45 +03:00
unsigned long freq ;
2021-03-12 16:04:11 +03:00
int i ;
2020-12-08 19:41:45 +03:00
pd = em_cpu_get ( dtpm_cpu - > cpu ) ;
2021-03-12 16:04:07 +03:00
2021-03-12 16:04:11 +03:00
pd_mask = em_span_cpus ( pd ) ;
freq = cpufreq_quick_get ( dtpm_cpu - > cpu ) ;
2020-12-08 19:41:45 +03:00
for ( i = 0 ; i < pd - > nr_perf_states ; i + + ) {
if ( pd - > table [ i ] . frequency < freq )
continue ;
2021-03-12 16:04:11 +03:00
return scale_pd_power_uw ( pd_mask , pd - > table [ i ] . power *
MICROWATT_PER_MILLIWATT ) ;
2020-12-08 19:41:45 +03:00
}
return 0 ;
}
2021-03-12 16:04:07 +03:00
static int update_pd_power_uw ( struct dtpm * dtpm )
{
2021-03-12 16:04:10 +03:00
struct dtpm_cpu * dtpm_cpu = to_dtpm_cpu ( dtpm ) ;
2021-03-12 16:04:07 +03:00
struct em_perf_domain * em = em_cpu_get ( dtpm_cpu - > cpu ) ;
struct cpumask cpus ;
int nr_cpus ;
cpumask_and ( & cpus , cpu_online_mask , to_cpumask ( em - > cpus ) ) ;
nr_cpus = cpumask_weight ( & cpus ) ;
dtpm - > power_min = em - > table [ 0 ] . power ;
dtpm - > power_min * = MICROWATT_PER_MILLIWATT ;
dtpm - > power_min * = nr_cpus ;
dtpm - > power_max = em - > table [ em - > nr_perf_states - 1 ] . power ;
dtpm - > power_max * = MICROWATT_PER_MILLIWATT ;
dtpm - > power_max * = nr_cpus ;
return 0 ;
}
2020-12-08 19:41:45 +03:00
static void pd_release ( struct dtpm * dtpm )
{
2021-03-12 16:04:10 +03:00
struct dtpm_cpu * dtpm_cpu = to_dtpm_cpu ( dtpm ) ;
2022-01-31 00:02:04 +03:00
struct cpufreq_policy * policy ;
2020-12-08 19:41:45 +03:00
if ( freq_qos_request_active ( & dtpm_cpu - > qos_req ) )
freq_qos_remove_request ( & dtpm_cpu - > qos_req ) ;
2022-01-31 00:02:04 +03:00
policy = cpufreq_cpu_get ( dtpm_cpu - > cpu ) ;
if ( policy ) {
for_each_cpu ( dtpm_cpu - > cpu , policy - > related_cpus )
per_cpu ( dtpm_per_cpu , dtpm_cpu - > cpu ) = NULL ;
}
2020-12-08 19:41:45 +03:00
kfree ( dtpm_cpu ) ;
}
static struct dtpm_ops dtpm_ops = {
2021-03-12 16:04:07 +03:00
. set_power_uw = set_pd_power_limit ,
. get_power_uw = get_pd_power_uw ,
. update_power_uw = update_pd_power_uw ,
. release = pd_release ,
2020-12-08 19:41:45 +03:00
} ;
static int cpuhp_dtpm_cpu_offline ( unsigned int cpu )
{
2021-03-12 16:04:10 +03:00
struct dtpm_cpu * dtpm_cpu ;
2020-12-08 19:41:45 +03:00
2021-03-12 16:04:10 +03:00
dtpm_cpu = per_cpu ( dtpm_per_cpu , cpu ) ;
2021-11-08 09:23:44 +03:00
if ( dtpm_cpu )
dtpm_update_power ( & dtpm_cpu - > dtpm ) ;
2020-12-08 19:41:45 +03:00
2021-11-08 09:23:44 +03:00
return 0 ;
2020-12-08 19:41:45 +03:00
}
static int cpuhp_dtpm_cpu_online ( unsigned int cpu )
2022-01-28 19:35:35 +03:00
{
struct dtpm_cpu * dtpm_cpu ;
dtpm_cpu = per_cpu ( dtpm_per_cpu , cpu ) ;
if ( dtpm_cpu )
return dtpm_update_power ( & dtpm_cpu - > dtpm ) ;
return 0 ;
}
static int __dtpm_cpu_setup ( int cpu , struct dtpm * parent )
2020-12-08 19:41:45 +03:00
{
struct dtpm_cpu * dtpm_cpu ;
struct cpufreq_policy * policy ;
struct em_perf_domain * pd ;
char name [ CPUFREQ_NAME_LEN ] ;
int ret = - ENOMEM ;
2022-01-28 19:35:35 +03:00
dtpm_cpu = per_cpu ( dtpm_per_cpu , cpu ) ;
if ( dtpm_cpu )
return 0 ;
2020-12-08 19:41:45 +03:00
policy = cpufreq_cpu_get ( cpu ) ;
if ( ! policy )
return 0 ;
pd = em_cpu_get ( cpu ) ;
2022-03-21 12:57:29 +03:00
if ( ! pd | | em_is_artificial ( pd ) )
2020-12-08 19:41:45 +03:00
return - EINVAL ;
2021-01-04 15:10:53 +03:00
dtpm_cpu = kzalloc ( sizeof ( * dtpm_cpu ) , GFP_KERNEL ) ;
2020-12-08 19:41:45 +03:00
if ( ! dtpm_cpu )
2021-03-12 16:04:10 +03:00
return - ENOMEM ;
2020-12-08 19:41:45 +03:00
2021-03-12 16:04:10 +03:00
dtpm_init ( & dtpm_cpu - > dtpm , & dtpm_ops ) ;
2020-12-08 19:41:45 +03:00
dtpm_cpu - > cpu = cpu ;
for_each_cpu ( cpu , policy - > related_cpus )
2021-03-12 16:04:10 +03:00
per_cpu ( dtpm_per_cpu , cpu ) = dtpm_cpu ;
2020-12-08 19:41:45 +03:00
2021-03-12 16:04:07 +03:00
snprintf ( name , sizeof ( name ) , " cpu%d-cpufreq " , dtpm_cpu - > cpu ) ;
2020-12-08 19:41:45 +03:00
2022-01-28 19:35:35 +03:00
ret = dtpm_register ( name , & dtpm_cpu - > dtpm , parent ) ;
2020-12-08 19:41:45 +03:00
if ( ret )
goto out_kfree_dtpm_cpu ;
ret = freq_qos_add_request ( & policy - > constraints ,
& dtpm_cpu - > qos_req , FREQ_QOS_MAX ,
pd - > table [ pd - > nr_perf_states - 1 ] . frequency ) ;
if ( ret )
2021-03-12 16:04:07 +03:00
goto out_dtpm_unregister ;
2020-12-08 19:41:45 +03:00
return 0 ;
out_dtpm_unregister :
2021-03-12 16:04:10 +03:00
dtpm_unregister ( & dtpm_cpu - > dtpm ) ;
2020-12-08 19:41:45 +03:00
dtpm_cpu = NULL ;
out_kfree_dtpm_cpu :
for_each_cpu ( cpu , policy - > related_cpus )
per_cpu ( dtpm_per_cpu , cpu ) = NULL ;
kfree ( dtpm_cpu ) ;
return ret ;
}
2022-01-28 19:35:35 +03:00
static int dtpm_cpu_setup ( struct dtpm * dtpm , struct device_node * np )
{
int cpu ;
cpu = of_cpu_node_to_id ( np ) ;
if ( cpu < 0 )
return 0 ;
return __dtpm_cpu_setup ( cpu , dtpm ) ;
}
static int dtpm_cpu_init ( void )
2020-12-08 19:41:45 +03:00
{
2021-03-12 16:04:07 +03:00
int ret ;
/*
* The callbacks at CPU hotplug time are calling
* dtpm_update_power ( ) which in turns calls update_pd_power ( ) .
*
* The function update_pd_power ( ) uses the online mask to
* figure out the power consumption limits .
*
* At CPUHP_AP_ONLINE_DYN , the CPU is present in the CPU
* online mask when the cpuhp_dtpm_cpu_online function is
* called , but the CPU is still in the online mask for the
* tear down callback . So the power can not be updated when
* the CPU is unplugged .
*
* At CPUHP_AP_DTPM_CPU_DEAD , the situation is the opposite as
* above . The CPU online mask is not up to date when the CPU
* is plugged in .
*
* For this reason , we need to call the online and offline
* callbacks at different moments when the CPU online mask is
* consistent with the power numbers we want to update .
*/
ret = cpuhp_setup_state ( CPUHP_AP_DTPM_CPU_DEAD , " dtpm_cpu:offline " ,
NULL , cpuhp_dtpm_cpu_offline ) ;
if ( ret < 0 )
return ret ;
ret = cpuhp_setup_state ( CPUHP_AP_ONLINE_DYN , " dtpm_cpu:online " ,
cpuhp_dtpm_cpu_online , NULL ) ;
if ( ret < 0 )
return ret ;
2020-12-08 19:41:45 +03:00
2021-03-12 16:04:07 +03:00
return 0 ;
2020-12-08 19:41:45 +03:00
}
2021-03-12 16:04:09 +03:00
2022-01-31 00:02:08 +03:00
static void dtpm_cpu_exit ( void )
{
cpuhp_remove_state_nocalls ( CPUHP_AP_ONLINE_DYN ) ;
cpuhp_remove_state_nocalls ( CPUHP_AP_DTPM_CPU_DEAD ) ;
}
2022-01-28 19:35:33 +03:00
struct dtpm_subsys_ops dtpm_cpu_ops = {
. name = KBUILD_MODNAME ,
. init = dtpm_cpu_init ,
2022-01-31 00:02:08 +03:00
. exit = dtpm_cpu_exit ,
2022-01-28 19:35:35 +03:00
. setup = dtpm_cpu_setup ,
2022-01-28 19:35:33 +03:00
} ;