2019-10-21 13:20:34 +03:00
// SPDX-License-Identifier: GPL-2.0
2013-10-29 16:18:39 +04:00
/*
* Versatile Express SPC CPUFreq Interface driver
*
2019-10-21 13:20:34 +03:00
* Copyright ( C ) 2013 - 2019 ARM Ltd .
* Sudeep Holla < sudeep . holla @ arm . com >
2013-10-29 16:18:39 +04:00
*
2019-10-21 13:20:34 +03:00
* Copyright ( C ) 2013 Linaro .
* Viresh Kumar < viresh . kumar @ linaro . org >
2013-10-29 16:18:39 +04:00
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2019-10-21 13:20:34 +03:00
# include <linux/clk.h>
cpufreq: arm_big_little: use generic OPP functions for {init, free}_opp_table
Currently when performing random CPU hot-plugs and suspend-to-ram(S2R)
on systems using arm_big_little cpufreq driver, we get warnings similar
to something like below:
cpu cpu1: _opp_add: duplicate OPPs detected. Existing: freq: 600000000,
volt: 800000, enabled: 1. New: freq: 600000000, volt: 800000, enabled: 1
This is mainly because the OPPs for the shared cpus are not set. We can
just use dev_pm_opp_of_cpumask_add_table in case the OPPs are obtained
from DT(arm_big_little_dt.c) or use dev_pm_opp_set_sharing_cpus if the
OPPs are obtained by other means like firmware(e.g. scpi-cpufreq.c)
Also now that the generic dev_pm_opp{,_of}_cpumask_remove_table can
handle removal of opp table and entries for all associated CPUs, we can
re-use dev_pm_opp{,_of}_cpumask_remove_table as free_opp_table in
cpufreq_arm_bL_ops.
This patch makes necessary changes to reuse the generic OPP functions for
{init,free}_opp_table and thereby eliminating the warnings.
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-05-03 17:05:05 +03:00
# include <linux/cpu.h>
2013-10-29 16:18:39 +04:00
# include <linux/cpufreq.h>
2019-10-21 13:20:34 +03:00
# include <linux/cpumask.h>
# include <linux/cpu_cooling.h>
# include <linux/device.h>
2013-10-29 16:18:39 +04:00
# include <linux/module.h>
2019-10-21 13:20:34 +03:00
# include <linux/mutex.h>
# include <linux/of_platform.h>
2013-10-29 16:18:39 +04:00
# include <linux/platform_device.h>
# include <linux/pm_opp.h>
2019-10-21 13:20:34 +03:00
# include <linux/slab.h>
# include <linux/topology.h>
2013-10-29 16:18:39 +04:00
# include <linux/types.h>
2019-10-21 13:20:34 +03:00
/* Currently we support only two clusters */
# define A15_CLUSTER 0
# define A7_CLUSTER 1
# define MAX_CLUSTERS 2
# ifdef CONFIG_BL_SWITCHER
# include <asm/bL_switcher.h>
static bool bL_switching_enabled ;
# define is_bL_switching_enabled() bL_switching_enabled
# define set_switching_enabled(x) (bL_switching_enabled = (x))
# else
# define is_bL_switching_enabled() false
# define set_switching_enabled(x) do { } while (0)
# define bL_switch_request(...) do { } while (0)
# define bL_switcher_put_enabled() do { } while (0)
# define bL_switcher_get_enabled() do { } while (0)
# endif
# define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
# define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
static struct thermal_cooling_device * cdev [ MAX_CLUSTERS ] ;
static struct clk * clk [ MAX_CLUSTERS ] ;
static struct cpufreq_frequency_table * freq_table [ MAX_CLUSTERS + 1 ] ;
static atomic_t cluster_usage [ MAX_CLUSTERS + 1 ] ;
static unsigned int clk_big_min ; /* (Big) clock frequencies */
static unsigned int clk_little_max ; /* Maximum clock frequency (Little) */
static DEFINE_PER_CPU ( unsigned int , physical_cluster ) ;
static DEFINE_PER_CPU ( unsigned int , cpu_last_req_freq ) ;
static struct mutex cluster_lock [ MAX_CLUSTERS ] ;
static inline int raw_cpu_to_cluster ( int cpu )
{
return topology_physical_package_id ( cpu ) ;
}
static inline int cpu_to_cluster ( int cpu )
{
return is_bL_switching_enabled ( ) ?
MAX_CLUSTERS : raw_cpu_to_cluster ( cpu ) ;
}
static unsigned int find_cluster_maxfreq ( int cluster )
{
int j ;
u32 max_freq = 0 , cpu_freq ;
for_each_online_cpu ( j ) {
cpu_freq = per_cpu ( cpu_last_req_freq , j ) ;
2019-10-18 13:37:49 +03:00
if ( cluster = = per_cpu ( physical_cluster , j ) & &
max_freq < cpu_freq )
2019-10-21 13:20:34 +03:00
max_freq = cpu_freq ;
}
return max_freq ;
}
static unsigned int clk_get_cpu_rate ( unsigned int cpu )
{
u32 cur_cluster = per_cpu ( physical_cluster , cpu ) ;
u32 rate = clk_get_rate ( clk [ cur_cluster ] ) / 1000 ;
/* For switcher we use virtual A7 clock rates */
if ( is_bL_switching_enabled ( ) )
rate = VIRT_FREQ ( cur_cluster , rate ) ;
return rate ;
}
2019-10-18 13:37:47 +03:00
static unsigned int ve_spc_cpufreq_get_rate ( unsigned int cpu )
2019-10-21 13:20:34 +03:00
{
2019-10-18 13:37:48 +03:00
if ( is_bL_switching_enabled ( ) )
2019-10-21 13:20:34 +03:00
return per_cpu ( cpu_last_req_freq , cpu ) ;
2019-10-18 13:37:48 +03:00
else
2019-10-21 13:20:34 +03:00
return clk_get_cpu_rate ( cpu ) ;
}
static unsigned int
2019-10-18 13:37:47 +03:00
ve_spc_cpufreq_set_rate ( u32 cpu , u32 old_cluster , u32 new_cluster , u32 rate )
2019-10-21 13:20:34 +03:00
{
u32 new_rate , prev_rate ;
int ret ;
bool bLs = is_bL_switching_enabled ( ) ;
mutex_lock ( & cluster_lock [ new_cluster ] ) ;
if ( bLs ) {
prev_rate = per_cpu ( cpu_last_req_freq , cpu ) ;
per_cpu ( cpu_last_req_freq , cpu ) = rate ;
per_cpu ( physical_cluster , cpu ) = new_cluster ;
new_rate = find_cluster_maxfreq ( new_cluster ) ;
new_rate = ACTUAL_FREQ ( new_cluster , new_rate ) ;
} else {
new_rate = rate ;
}
ret = clk_set_rate ( clk [ new_cluster ] , new_rate * 1000 ) ;
if ( ! ret ) {
/*
* FIXME : clk_set_rate hasn ' t returned an error here however it
* may be that clk_change_rate failed due to hardware or
* firmware issues and wasn ' t able to report that due to the
* current design of the clk core layer . To work around this
* problem we will read back the clock rate and check it is
* correct . This needs to be removed once clk core is fixed .
*/
if ( clk_get_rate ( clk [ new_cluster ] ) ! = new_rate * 1000 )
ret = - EIO ;
}
if ( WARN_ON ( ret ) ) {
if ( bLs ) {
per_cpu ( cpu_last_req_freq , cpu ) = prev_rate ;
per_cpu ( physical_cluster , cpu ) = old_cluster ;
}
mutex_unlock ( & cluster_lock [ new_cluster ] ) ;
return ret ;
}
mutex_unlock ( & cluster_lock [ new_cluster ] ) ;
/* Recalc freq for old cluster when switching clusters */
if ( old_cluster ! = new_cluster ) {
/* Switch cluster */
bL_switch_request ( cpu , new_cluster ) ;
mutex_lock ( & cluster_lock [ old_cluster ] ) ;
/* Set freq of old cluster if there are cpus left on it */
new_rate = find_cluster_maxfreq ( old_cluster ) ;
new_rate = ACTUAL_FREQ ( old_cluster , new_rate ) ;
2019-10-18 13:37:48 +03:00
if ( new_rate & &
clk_set_rate ( clk [ old_cluster ] , new_rate * 1000 ) ) {
pr_err ( " %s: clk_set_rate failed: %d, old cluster: %d \n " ,
__func__ , ret , old_cluster ) ;
2019-10-21 13:20:34 +03:00
}
mutex_unlock ( & cluster_lock [ old_cluster ] ) ;
}
return 0 ;
}
/* Set clock frequency */
2019-10-18 13:37:47 +03:00
static int ve_spc_cpufreq_set_target ( struct cpufreq_policy * policy ,
unsigned int index )
2019-10-21 13:20:34 +03:00
{
u32 cpu = policy - > cpu , cur_cluster , new_cluster , actual_cluster ;
unsigned int freqs_new ;
int ret ;
cur_cluster = cpu_to_cluster ( cpu ) ;
new_cluster = actual_cluster = per_cpu ( physical_cluster , cpu ) ;
freqs_new = freq_table [ cur_cluster ] [ index ] . frequency ;
if ( is_bL_switching_enabled ( ) ) {
2019-10-18 13:37:49 +03:00
if ( actual_cluster = = A15_CLUSTER & & freqs_new < clk_big_min )
2019-10-21 13:20:34 +03:00
new_cluster = A7_CLUSTER ;
2019-10-18 13:37:49 +03:00
else if ( actual_cluster = = A7_CLUSTER & &
freqs_new > clk_little_max )
2019-10-21 13:20:34 +03:00
new_cluster = A15_CLUSTER ;
}
2019-10-18 13:37:47 +03:00
ret = ve_spc_cpufreq_set_rate ( cpu , actual_cluster , new_cluster ,
freqs_new ) ;
2019-10-21 13:20:34 +03:00
if ( ! ret ) {
arch_set_freq_scale ( policy - > related_cpus , freqs_new ,
policy - > cpuinfo . max_freq ) ;
}
return ret ;
}
static inline u32 get_table_count ( struct cpufreq_frequency_table * table )
{
int count ;
for ( count = 0 ; table [ count ] . frequency ! = CPUFREQ_TABLE_END ; count + + )
;
return count ;
}
/* get the minimum frequency in the cpufreq_frequency_table */
static inline u32 get_table_min ( struct cpufreq_frequency_table * table )
{
struct cpufreq_frequency_table * pos ;
2019-10-18 13:37:49 +03:00
u32 min_freq = ~ 0 ;
2019-10-21 13:20:34 +03:00
cpufreq_for_each_entry ( pos , table )
if ( pos - > frequency < min_freq )
min_freq = pos - > frequency ;
return min_freq ;
}
/* get the maximum frequency in the cpufreq_frequency_table */
static inline u32 get_table_max ( struct cpufreq_frequency_table * table )
{
struct cpufreq_frequency_table * pos ;
2019-10-18 13:37:49 +03:00
u32 max_freq = 0 ;
2019-10-21 13:20:34 +03:00
cpufreq_for_each_entry ( pos , table )
if ( pos - > frequency > max_freq )
max_freq = pos - > frequency ;
return max_freq ;
}
2019-10-23 15:18:51 +03:00
static bool search_frequency ( struct cpufreq_frequency_table * table , int size ,
unsigned int freq )
{
int count ;
for ( count = 0 ; count < size ; count + + ) {
if ( table [ count ] . frequency = = freq )
return true ;
}
return false ;
}
2019-10-21 13:20:34 +03:00
static int merge_cluster_tables ( void )
{
int i , j , k = 0 , count = 1 ;
struct cpufreq_frequency_table * table ;
for ( i = 0 ; i < MAX_CLUSTERS ; i + + )
count + = get_table_count ( freq_table [ i ] ) ;
table = kcalloc ( count , sizeof ( * table ) , GFP_KERNEL ) ;
if ( ! table )
return - ENOMEM ;
freq_table [ MAX_CLUSTERS ] = table ;
/* Add in reverse order to get freqs in increasing order */
2019-10-23 15:18:51 +03:00
for ( i = MAX_CLUSTERS - 1 ; i > = 0 ; i - - , count = k ) {
2019-10-21 13:20:34 +03:00
for ( j = 0 ; freq_table [ i ] [ j ] . frequency ! = CPUFREQ_TABLE_END ;
2019-10-23 15:18:51 +03:00
j + + ) {
if ( i = = A15_CLUSTER & &
search_frequency ( table , count , freq_table [ i ] [ j ] . frequency ) )
continue ; /* skip duplicates */
table [ k + + ] . frequency =
2019-10-18 13:37:49 +03:00
VIRT_FREQ ( i , freq_table [ i ] [ j ] . frequency ) ;
2019-10-21 13:20:34 +03:00
}
}
table [ k ] . driver_data = k ;
table [ k ] . frequency = CPUFREQ_TABLE_END ;
return 0 ;
}
static void _put_cluster_clk_and_freq_table ( struct device * cpu_dev ,
const struct cpumask * cpumask )
{
u32 cluster = raw_cpu_to_cluster ( cpu_dev - > id ) ;
if ( ! freq_table [ cluster ] )
return ;
clk_put ( clk [ cluster ] ) ;
dev_pm_opp_free_cpufreq_table ( cpu_dev , & freq_table [ cluster ] ) ;
}
static void put_cluster_clk_and_freq_table ( struct device * cpu_dev ,
const struct cpumask * cpumask )
{
u32 cluster = cpu_to_cluster ( cpu_dev - > id ) ;
int i ;
if ( atomic_dec_return ( & cluster_usage [ cluster ] ) )
return ;
if ( cluster < MAX_CLUSTERS )
return _put_cluster_clk_and_freq_table ( cpu_dev , cpumask ) ;
for_each_present_cpu ( i ) {
struct device * cdev = get_cpu_device ( i ) ;
2019-10-18 13:37:48 +03:00
if ( ! cdev )
2019-10-21 13:20:34 +03:00
return ;
_put_cluster_clk_and_freq_table ( cdev , cpumask ) ;
}
/* free virtual table */
kfree ( freq_table [ cluster ] ) ;
}
static int _get_cluster_clk_and_freq_table ( struct device * cpu_dev ,
const struct cpumask * cpumask )
{
u32 cluster = raw_cpu_to_cluster ( cpu_dev - > id ) ;
int ret ;
if ( freq_table [ cluster ] )
return 0 ;
2019-10-18 13:37:47 +03:00
/*
* platform specific SPC code must initialise the opp table
* so just check if the OPP count is non - zero
*/
ret = dev_pm_opp_get_opp_count ( cpu_dev ) < = 0 ;
if ( ret )
2019-10-21 13:20:34 +03:00
goto out ;
ret = dev_pm_opp_init_cpufreq_table ( cpu_dev , & freq_table [ cluster ] ) ;
2019-10-18 13:37:48 +03:00
if ( ret )
2019-10-18 13:37:47 +03:00
goto out ;
2019-10-21 13:20:34 +03:00
clk [ cluster ] = clk_get ( cpu_dev , NULL ) ;
2019-10-18 13:37:48 +03:00
if ( ! IS_ERR ( clk [ cluster ] ) )
2019-10-21 13:20:34 +03:00
return 0 ;
dev_err ( cpu_dev , " %s: Failed to get clk for cpu: %d, cluster: %d \n " ,
2019-10-18 13:37:49 +03:00
__func__ , cpu_dev - > id , cluster ) ;
2019-10-21 13:20:34 +03:00
ret = PTR_ERR ( clk [ cluster ] ) ;
dev_pm_opp_free_cpufreq_table ( cpu_dev , & freq_table [ cluster ] ) ;
out :
dev_err ( cpu_dev , " %s: Failed to get data for cluster: %d \n " , __func__ ,
2019-10-18 13:37:49 +03:00
cluster ) ;
2019-10-21 13:20:34 +03:00
return ret ;
}
static int get_cluster_clk_and_freq_table ( struct device * cpu_dev ,
const struct cpumask * cpumask )
{
u32 cluster = cpu_to_cluster ( cpu_dev - > id ) ;
int i , ret ;
if ( atomic_inc_return ( & cluster_usage [ cluster ] ) ! = 1 )
return 0 ;
if ( cluster < MAX_CLUSTERS ) {
ret = _get_cluster_clk_and_freq_table ( cpu_dev , cpumask ) ;
if ( ret )
atomic_dec ( & cluster_usage [ cluster ] ) ;
return ret ;
}
/*
* Get data for all clusters and fill virtual cluster with a merge of
* both
*/
for_each_present_cpu ( i ) {
struct device * cdev = get_cpu_device ( i ) ;
2019-10-18 13:37:48 +03:00
if ( ! cdev )
2019-10-21 13:20:34 +03:00
return - ENODEV ;
ret = _get_cluster_clk_and_freq_table ( cdev , cpumask ) ;
if ( ret )
goto put_clusters ;
}
ret = merge_cluster_tables ( ) ;
if ( ret )
goto put_clusters ;
/* Assuming 2 cluster, set clk_big_min and clk_little_max */
2019-10-23 14:08:10 +03:00
clk_big_min = get_table_min ( freq_table [ A15_CLUSTER ] ) ;
clk_little_max = VIRT_FREQ ( A7_CLUSTER ,
get_table_max ( freq_table [ A7_CLUSTER ] ) ) ;
2019-10-21 13:20:34 +03:00
return 0 ;
put_clusters :
for_each_present_cpu ( i ) {
struct device * cdev = get_cpu_device ( i ) ;
2019-10-18 13:37:48 +03:00
if ( ! cdev )
2019-10-21 13:20:34 +03:00
return - ENODEV ;
_put_cluster_clk_and_freq_table ( cdev , cpumask ) ;
}
atomic_dec ( & cluster_usage [ cluster ] ) ;
return ret ;
}
/* Per-CPU initialization */
2019-10-18 13:37:47 +03:00
static int ve_spc_cpufreq_init ( struct cpufreq_policy * policy )
2019-10-21 13:20:34 +03:00
{
u32 cur_cluster = cpu_to_cluster ( policy - > cpu ) ;
struct device * cpu_dev ;
int ret ;
cpu_dev = get_cpu_device ( policy - > cpu ) ;
if ( ! cpu_dev ) {
pr_err ( " %s: failed to get cpu%d device \n " , __func__ ,
2019-10-18 13:37:49 +03:00
policy - > cpu ) ;
2019-10-21 13:20:34 +03:00
return - ENODEV ;
}
if ( cur_cluster < MAX_CLUSTERS ) {
int cpu ;
2019-11-27 19:04:26 +03:00
dev_pm_opp_get_sharing_cpus ( cpu_dev , policy - > cpus ) ;
2019-10-21 13:20:34 +03:00
for_each_cpu ( cpu , policy - > cpus )
per_cpu ( physical_cluster , cpu ) = cur_cluster ;
} else {
/* Assumption: during init, we are always running on A15 */
per_cpu ( physical_cluster , policy - > cpu ) = A15_CLUSTER ;
}
ret = get_cluster_clk_and_freq_table ( cpu_dev , policy - > cpus ) ;
if ( ret )
return ret ;
policy - > freq_table = freq_table [ cur_cluster ] ;
2019-10-18 13:37:47 +03:00
policy - > cpuinfo . transition_latency = 1000000 ; /* 1 ms */
2019-10-21 13:20:34 +03:00
2020-05-27 12:58:54 +03:00
dev_pm_opp_of_register_em ( cpu_dev , policy - > cpus ) ;
2019-10-21 13:20:34 +03:00
if ( is_bL_switching_enabled ( ) )
2019-10-18 13:37:49 +03:00
per_cpu ( cpu_last_req_freq , policy - > cpu ) =
clk_get_cpu_rate ( policy - > cpu ) ;
2019-10-21 13:20:34 +03:00
dev_info ( cpu_dev , " %s: CPU %d initialized \n " , __func__ , policy - > cpu ) ;
return 0 ;
}
2019-10-18 13:37:47 +03:00
static int ve_spc_cpufreq_exit ( struct cpufreq_policy * policy )
2019-10-21 13:20:34 +03:00
{
struct device * cpu_dev ;
int cur_cluster = cpu_to_cluster ( policy - > cpu ) ;
if ( cur_cluster < MAX_CLUSTERS ) {
cpufreq_cooling_unregister ( cdev [ cur_cluster ] ) ;
cdev [ cur_cluster ] = NULL ;
}
cpu_dev = get_cpu_device ( policy - > cpu ) ;
if ( ! cpu_dev ) {
pr_err ( " %s: failed to get cpu%d device \n " , __func__ ,
2019-10-18 13:37:49 +03:00
policy - > cpu ) ;
2019-10-21 13:20:34 +03:00
return - ENODEV ;
}
put_cluster_clk_and_freq_table ( cpu_dev , policy - > related_cpus ) ;
return 0 ;
}
2019-10-18 13:37:47 +03:00
static void ve_spc_cpufreq_ready ( struct cpufreq_policy * policy )
2019-10-21 13:20:34 +03:00
{
int cur_cluster = cpu_to_cluster ( policy - > cpu ) ;
/* Do not register a cpu_cooling device if we are in IKS mode */
if ( cur_cluster > = MAX_CLUSTERS )
return ;
cdev [ cur_cluster ] = of_cpufreq_cooling_register ( policy ) ;
}
2019-10-18 13:37:47 +03:00
static struct cpufreq_driver ve_spc_cpufreq_driver = {
. name = " vexpress-spc " ,
2019-10-21 13:20:34 +03:00
. flags = CPUFREQ_STICKY |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK ,
. verify = cpufreq_generic_frequency_table_verify ,
2019-10-18 13:37:47 +03:00
. target_index = ve_spc_cpufreq_set_target ,
. get = ve_spc_cpufreq_get_rate ,
. init = ve_spc_cpufreq_init ,
. exit = ve_spc_cpufreq_exit ,
. ready = ve_spc_cpufreq_ready ,
2019-10-21 13:20:34 +03:00
. attr = cpufreq_generic_attr ,
} ;
# ifdef CONFIG_BL_SWITCHER
static int bL_cpufreq_switcher_notifier ( struct notifier_block * nfb ,
unsigned long action , void * _arg )
{
pr_debug ( " %s: action: %ld \n " , __func__ , action ) ;
switch ( action ) {
case BL_NOTIFY_PRE_ENABLE :
case BL_NOTIFY_PRE_DISABLE :
2019-10-18 13:37:47 +03:00
cpufreq_unregister_driver ( & ve_spc_cpufreq_driver ) ;
2019-10-21 13:20:34 +03:00
break ;
case BL_NOTIFY_POST_ENABLE :
set_switching_enabled ( true ) ;
2019-10-18 13:37:47 +03:00
cpufreq_register_driver ( & ve_spc_cpufreq_driver ) ;
2019-10-21 13:20:34 +03:00
break ;
case BL_NOTIFY_POST_DISABLE :
set_switching_enabled ( false ) ;
2019-10-18 13:37:47 +03:00
cpufreq_register_driver ( & ve_spc_cpufreq_driver ) ;
2019-10-21 13:20:34 +03:00
break ;
default :
return NOTIFY_DONE ;
}
return NOTIFY_OK ;
}
static struct notifier_block bL_switcher_notifier = {
. notifier_call = bL_cpufreq_switcher_notifier ,
} ;
static int __bLs_register_notifier ( void )
{
return bL_switcher_register_notifier ( & bL_switcher_notifier ) ;
}
static int __bLs_unregister_notifier ( void )
{
return bL_switcher_unregister_notifier ( & bL_switcher_notifier ) ;
}
# else
static int __bLs_register_notifier ( void ) { return 0 ; }
static int __bLs_unregister_notifier ( void ) { return 0 ; }
# endif
2019-10-18 13:37:47 +03:00
static int ve_spc_cpufreq_probe ( struct platform_device * pdev )
2019-10-21 13:20:34 +03:00
{
int ret , i ;
set_switching_enabled ( bL_switcher_get_enabled ( ) ) ;
for ( i = 0 ; i < MAX_CLUSTERS ; i + + )
mutex_init ( & cluster_lock [ i ] ) ;
2019-10-18 13:37:47 +03:00
ret = cpufreq_register_driver ( & ve_spc_cpufreq_driver ) ;
2019-10-21 13:20:34 +03:00
if ( ret ) {
pr_info ( " %s: Failed registering platform driver: %s, err: %d \n " ,
2019-10-18 13:37:47 +03:00
__func__ , ve_spc_cpufreq_driver . name , ret ) ;
2019-10-21 13:20:34 +03:00
} else {
ret = __bLs_register_notifier ( ) ;
2019-10-18 13:37:47 +03:00
if ( ret )
cpufreq_unregister_driver ( & ve_spc_cpufreq_driver ) ;
else
2019-10-21 13:20:34 +03:00
pr_info ( " %s: Registered platform driver: %s \n " ,
2019-10-18 13:37:47 +03:00
__func__ , ve_spc_cpufreq_driver . name ) ;
2019-10-21 13:20:34 +03:00
}
bL_switcher_put_enabled ( ) ;
return ret ;
}
2019-10-18 13:37:47 +03:00
static int ve_spc_cpufreq_remove ( struct platform_device * pdev )
2019-10-21 13:20:34 +03:00
{
bL_switcher_get_enabled ( ) ;
__bLs_unregister_notifier ( ) ;
2019-10-18 13:37:47 +03:00
cpufreq_unregister_driver ( & ve_spc_cpufreq_driver ) ;
2019-10-21 13:20:34 +03:00
bL_switcher_put_enabled ( ) ;
pr_info ( " %s: Un-registered platform driver: %s \n " , __func__ ,
2019-10-18 13:37:47 +03:00
ve_spc_cpufreq_driver . name ) ;
2013-10-29 16:18:39 +04:00
return 0 ;
}
static struct platform_driver ve_spc_cpufreq_platdrv = {
. driver = {
. name = " vexpress-spc-cpufreq " ,
} ,
. probe = ve_spc_cpufreq_probe ,
. remove = ve_spc_cpufreq_remove ,
} ;
module_platform_driver ( ve_spc_cpufreq_platdrv ) ;
2019-10-21 13:20:34 +03:00
MODULE_AUTHOR ( " Viresh Kumar <viresh.kumar@linaro.org> " ) ;
MODULE_AUTHOR ( " Sudeep Holla <sudeep.holla@arm.com> " ) ;
MODULE_DESCRIPTION ( " Vexpress SPC ARM big LITTLE cpufreq driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;