2005-06-01 06:03:47 +04:00
/*
* drivers / cpufreq / cpufreq_conservative . c
*
* Copyright ( C ) 2001 Russell King
* ( C ) 2003 Venkatesh Pallipadi < venkatesh . pallipadi @ intel . com > .
* Jun Nakajima < jun . nakajima @ intel . com >
2009-02-13 22:01:01 +03:00
* ( C ) 2009 Alexander Clouter < alex @ digriz . org . uk >
2005-06-01 06:03:47 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/cpufreq.h>
2012-10-26 02:47:42 +04:00
# include <linux/init.h>
# include <linux/kernel.h>
2005-06-01 06:03:47 +04:00
# include <linux/kernel_stat.h>
2012-10-26 02:47:42 +04:00
# include <linux/kobject.h>
# include <linux/module.h>
2006-01-14 02:54:22 +03:00
# include <linux/mutex.h>
2012-10-26 02:47:42 +04:00
# include <linux/notifier.h>
# include <linux/percpu-defs.h>
2013-03-27 19:58:58 +04:00
# include <linux/slab.h>
2012-10-26 02:47:42 +04:00
# include <linux/sysfs.h>
# include <linux/types.h>
2009-02-13 22:02:34 +03:00
2012-10-26 02:47:42 +04:00
# include "cpufreq_governor.h"
2005-06-01 06:03:47 +04:00
2013-02-08 21:24:24 +04:00
/* Conservative governor macros */
2005-06-01 06:03:47 +04:00
# define DEF_FREQUENCY_UP_THRESHOLD (80)
# define DEF_FREQUENCY_DOWN_THRESHOLD (20)
2006-03-22 12:54:10 +03:00
# define DEF_SAMPLING_DOWN_FACTOR (1)
# define MAX_SAMPLING_DOWN_FACTOR (10)
2005-06-01 06:03:47 +04:00
2012-10-26 02:47:42 +04:00
static DEFINE_PER_CPU ( struct cs_cpu_dbs_info_s , cs_cpu_dbs_info ) ;
2005-06-01 06:03:47 +04:00
2012-10-26 02:47:42 +04:00
/*
* Every sampling_rate , we check , if current idle time is less than 20 %
* ( default ) , then we try to increase frequency Every sampling_rate *
* sampling_down_factor , we check , if current idle time is more than 80 % , then
* we try to decrease frequency
*
* Any frequency increase takes it to the maximum frequency . Frequency reduction
* happens at minimum steps of 5 % ( default ) of maximum frequency
*/
static void cs_check_cpu ( int cpu , unsigned int load )
2007-10-22 11:50:13 +04:00
{
2012-10-26 02:47:42 +04:00
struct cs_cpu_dbs_info_s * dbs_info = & per_cpu ( cs_cpu_dbs_info , cpu ) ;
struct cpufreq_policy * policy = dbs_info - > cdbs . cur_policy ;
2013-03-27 19:58:58 +04:00
struct dbs_data * dbs_data = policy - > governor_data ;
struct cs_dbs_tuners * cs_tuners = dbs_data - > tuners ;
2012-10-26 02:47:42 +04:00
unsigned int freq_target ;
/*
* break out if we ' cannot ' reduce the speed as the user might
* want freq_step to be zero
*/
2013-03-27 19:58:58 +04:00
if ( cs_tuners - > freq_step = = 0 )
2012-10-26 02:47:42 +04:00
return ;
/* Check for frequency increase */
2013-03-27 19:58:58 +04:00
if ( load > cs_tuners - > up_threshold ) {
2012-10-26 02:47:42 +04:00
dbs_info - > down_skip = 0 ;
/* if we are already at full speed then break out early */
if ( dbs_info - > requested_freq = = policy - > max )
return ;
2013-03-27 19:58:58 +04:00
freq_target = ( cs_tuners - > freq_step * policy - > max ) / 100 ;
2012-10-26 02:47:42 +04:00
/* max freq cannot be less than 100. But who knows.... */
if ( unlikely ( freq_target = = 0 ) )
freq_target = 5 ;
dbs_info - > requested_freq + = freq_target ;
if ( dbs_info - > requested_freq > policy - > max )
dbs_info - > requested_freq = policy - > max ;
2007-10-22 11:50:13 +04:00
2012-10-26 02:47:42 +04:00
__cpufreq_driver_target ( policy , dbs_info - > requested_freq ,
CPUFREQ_RELATION_H ) ;
return ;
}
/*
* The optimal frequency is the frequency that is the lowest that can
* support the current CPU usage without triggering the up policy . To be
* safe , we focus 10 points under the threshold .
*/
2013-03-27 19:58:58 +04:00
if ( load < ( cs_tuners - > down_threshold - 10 ) ) {
2012-10-26 02:47:42 +04:00
/*
* if we cannot reduce the frequency anymore , break out early
*/
if ( policy - > cur = = policy - > min )
return ;
2013-02-28 09:38:01 +04:00
freq_target = ( cs_tuners - > freq_step * policy - > max ) / 100 ;
dbs_info - > requested_freq - = freq_target ;
if ( dbs_info - > requested_freq < policy - > min )
dbs_info - > requested_freq = policy - > min ;
2012-10-26 02:47:42 +04:00
__cpufreq_driver_target ( policy , dbs_info - > requested_freq ,
2013-02-28 09:38:02 +04:00
CPUFREQ_RELATION_L ) ;
2012-10-26 02:47:42 +04:00
return ;
}
}
2013-01-31 21:28:02 +04:00
static void cs_dbs_timer ( struct work_struct * work )
2012-10-26 02:47:42 +04:00
{
2013-01-31 21:28:02 +04:00
struct cs_cpu_dbs_info_s * dbs_info = container_of ( work ,
struct cs_cpu_dbs_info_s , cdbs . work . work ) ;
2013-01-31 14:39:19 +04:00
unsigned int cpu = dbs_info - > cdbs . cur_policy - > cpu ;
2013-01-31 21:28:02 +04:00
struct cs_cpu_dbs_info_s * core_dbs_info = & per_cpu ( cs_cpu_dbs_info ,
cpu ) ;
2013-03-27 19:58:58 +04:00
struct dbs_data * dbs_data = dbs_info - > cdbs . cur_policy - > governor_data ;
struct cs_dbs_tuners * cs_tuners = dbs_data - > tuners ;
int delay = delay_for_sampling_rate ( cs_tuners - > sampling_rate ) ;
2013-02-27 10:54:03 +04:00
bool modify_all = true ;
2012-10-26 02:47:42 +04:00
2013-01-31 21:28:02 +04:00
mutex_lock ( & core_dbs_info - > cdbs . timer_mutex ) ;
2013-02-27 10:54:03 +04:00
if ( ! need_load_eval ( & core_dbs_info - > cdbs , cs_tuners - > sampling_rate ) )
modify_all = false ;
else
2013-03-27 19:58:58 +04:00
dbs_check_cpu ( dbs_data , cpu ) ;
2012-12-27 18:55:41 +04:00
2013-02-27 10:54:03 +04:00
gov_queue_work ( dbs_data , dbs_info - > cdbs . cur_policy , delay , modify_all ) ;
2013-01-31 21:28:02 +04:00
mutex_unlock ( & core_dbs_info - > cdbs . timer_mutex ) ;
2012-12-27 18:55:41 +04:00
}
2012-10-26 02:47:42 +04:00
static int dbs_cpufreq_notifier ( struct notifier_block * nb , unsigned long val ,
void * data )
{
struct cpufreq_freqs * freq = data ;
struct cs_cpu_dbs_info_s * dbs_info =
& per_cpu ( cs_cpu_dbs_info , freq - > cpu ) ;
2009-02-13 22:01:51 +03:00
struct cpufreq_policy * policy ;
2012-10-26 02:47:42 +04:00
if ( ! dbs_info - > enable )
2007-10-22 11:50:13 +04:00
return 0 ;
2012-10-26 02:47:42 +04:00
policy = dbs_info - > cdbs . cur_policy ;
2009-02-13 22:01:51 +03:00
/*
2012-10-26 02:47:42 +04:00
* we only care if our internally tracked freq moves outside the ' valid '
2013-02-08 21:24:24 +04:00
* ranges of frequency available to us otherwise we do not change it
2009-02-13 22:01:51 +03:00
*/
2012-10-26 02:47:42 +04:00
if ( dbs_info - > requested_freq > policy - > max
| | dbs_info - > requested_freq < policy - > min )
dbs_info - > requested_freq = freq - > new ;
2007-10-22 11:50:13 +04:00
return 0 ;
}
2005-06-01 06:03:47 +04:00
/************************** sysfs interface ************************/
2013-03-27 19:58:58 +04:00
static struct common_dbs_data cs_dbs_cdata ;
2005-06-01 06:03:47 +04:00
2013-03-27 19:58:58 +04:00
static ssize_t store_sampling_down_factor ( struct dbs_data * dbs_data ,
const char * buf , size_t count )
2005-06-01 06:03:47 +04:00
{
2013-03-27 19:58:58 +04:00
struct cs_dbs_tuners * cs_tuners = dbs_data - > tuners ;
2005-06-01 06:03:47 +04:00
unsigned int input ;
int ret ;
2009-01-18 09:39:51 +03:00
ret = sscanf ( buf , " %u " , & input ) ;
2009-02-13 22:02:34 +03:00
2006-03-22 12:54:10 +03:00
if ( ret ! = 1 | | input > MAX_SAMPLING_DOWN_FACTOR | | input < 1 )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2013-03-27 19:58:58 +04:00
cs_tuners - > sampling_down_factor = input ;
2005-06-01 06:03:47 +04:00
return count ;
}
2013-03-27 19:58:58 +04:00
static ssize_t store_sampling_rate ( struct dbs_data * dbs_data , const char * buf ,
size_t count )
2005-06-01 06:03:47 +04:00
{
2013-03-27 19:58:58 +04:00
struct cs_dbs_tuners * cs_tuners = dbs_data - > tuners ;
2005-06-01 06:03:47 +04:00
unsigned int input ;
int ret ;
2009-01-18 09:39:51 +03:00
ret = sscanf ( buf , " %u " , & input ) ;
2005-06-01 06:03:47 +04:00
2009-02-13 22:02:34 +03:00
if ( ret ! = 1 )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2009-02-13 22:02:34 +03:00
2013-03-27 19:58:58 +04:00
cs_tuners - > sampling_rate = max ( input , dbs_data - > min_sampling_rate ) ;
2005-06-01 06:03:47 +04:00
return count ;
}
2013-03-27 19:58:58 +04:00
static ssize_t store_up_threshold ( struct dbs_data * dbs_data , const char * buf ,
size_t count )
2005-06-01 06:03:47 +04:00
{
2013-03-27 19:58:58 +04:00
struct cs_dbs_tuners * cs_tuners = dbs_data - > tuners ;
2005-06-01 06:03:47 +04:00
unsigned int input ;
int ret ;
2009-01-18 09:39:51 +03:00
ret = sscanf ( buf , " %u " , & input ) ;
2005-06-01 06:03:47 +04:00
2013-03-27 19:58:58 +04:00
if ( ret ! = 1 | | input > 100 | | input < = cs_tuners - > down_threshold )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2013-03-27 19:58:58 +04:00
cs_tuners - > up_threshold = input ;
2005-06-01 06:03:47 +04:00
return count ;
}
2013-03-27 19:58:58 +04:00
static ssize_t store_down_threshold ( struct dbs_data * dbs_data , const char * buf ,
size_t count )
2005-06-01 06:03:47 +04:00
{
2013-03-27 19:58:58 +04:00
struct cs_dbs_tuners * cs_tuners = dbs_data - > tuners ;
2005-06-01 06:03:47 +04:00
unsigned int input ;
int ret ;
2009-01-18 09:39:51 +03:00
ret = sscanf ( buf , " %u " , & input ) ;
2005-06-01 06:03:47 +04:00
2009-02-13 22:02:34 +03:00
/* cannot be lower than 11 otherwise freq will not fall */
if ( ret ! = 1 | | input < 11 | | input > 100 | |
2013-03-27 19:58:58 +04:00
input > = cs_tuners - > up_threshold )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2013-03-27 19:58:58 +04:00
cs_tuners - > down_threshold = input ;
2005-06-01 06:03:47 +04:00
return count ;
}
2013-03-27 19:58:58 +04:00
static ssize_t store_ignore_nice ( struct dbs_data * dbs_data , const char * buf ,
size_t count )
2005-06-01 06:03:47 +04:00
{
2013-03-27 19:58:58 +04:00
struct cs_dbs_tuners * cs_tuners = dbs_data - > tuners ;
2012-10-26 02:47:42 +04:00
unsigned int input , j ;
2005-06-01 06:03:47 +04:00
int ret ;
2007-10-23 00:49:09 +04:00
ret = sscanf ( buf , " %u " , & input ) ;
if ( ret ! = 1 )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2007-10-23 00:49:09 +04:00
if ( input > 1 )
2005-06-01 06:03:47 +04:00
input = 1 ;
2007-10-23 00:49:09 +04:00
2013-03-27 19:58:58 +04:00
if ( input = = cs_tuners - > ignore_nice ) /* nothing to do */
2005-06-01 06:03:47 +04:00
return count ;
2011-03-03 23:31:27 +03:00
2013-03-27 19:58:58 +04:00
cs_tuners - > ignore_nice = input ;
2005-06-01 06:03:47 +04:00
2009-02-13 22:02:34 +03:00
/* we need to re-evaluate prev_cpu_idle */
2005-06-01 06:03:49 +04:00
for_each_online_cpu ( j ) {
2012-10-26 02:47:42 +04:00
struct cs_cpu_dbs_info_s * dbs_info ;
2009-06-24 10:13:48 +04:00
dbs_info = & per_cpu ( cs_cpu_dbs_info , j ) ;
2012-10-26 02:47:42 +04:00
dbs_info - > cdbs . prev_cpu_idle = get_cpu_idle_time ( j ,
2013-02-28 20:57:32 +04:00
& dbs_info - > cdbs . prev_cpu_wall , 0 ) ;
2013-03-27 19:58:58 +04:00
if ( cs_tuners - > ignore_nice )
2012-10-26 02:47:42 +04:00
dbs_info - > cdbs . prev_cpu_nice =
kcpustat_cpu ( j ) . cpustat [ CPUTIME_NICE ] ;
2005-06-01 06:03:47 +04:00
}
return count ;
}
2013-03-27 19:58:58 +04:00
static ssize_t store_freq_step ( struct dbs_data * dbs_data , const char * buf ,
size_t count )
2005-06-01 06:03:47 +04:00
{
2013-03-27 19:58:58 +04:00
struct cs_dbs_tuners * cs_tuners = dbs_data - > tuners ;
2005-06-01 06:03:47 +04:00
unsigned int input ;
int ret ;
2007-10-23 00:49:09 +04:00
ret = sscanf ( buf , " %u " , & input ) ;
2005-06-01 06:03:47 +04:00
2007-10-23 00:49:09 +04:00
if ( ret ! = 1 )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2007-10-23 00:49:09 +04:00
if ( input > 100 )
2005-06-01 06:03:47 +04:00
input = 100 ;
2007-10-23 00:49:09 +04:00
2012-10-26 02:47:42 +04:00
/*
* no need to test here if freq_step is zero as the user might actually
* want this , they would be crazy though : )
*/
2013-03-27 19:58:58 +04:00
cs_tuners - > freq_step = input ;
2005-06-01 06:03:47 +04:00
return count ;
}
2013-03-27 19:58:58 +04:00
show_store_one ( cs , sampling_rate ) ;
show_store_one ( cs , sampling_down_factor ) ;
show_store_one ( cs , up_threshold ) ;
show_store_one ( cs , down_threshold ) ;
show_store_one ( cs , ignore_nice ) ;
show_store_one ( cs , freq_step ) ;
declare_show_sampling_rate_min ( cs ) ;
gov_sys_pol_attr_rw ( sampling_rate ) ;
gov_sys_pol_attr_rw ( sampling_down_factor ) ;
gov_sys_pol_attr_rw ( up_threshold ) ;
gov_sys_pol_attr_rw ( down_threshold ) ;
gov_sys_pol_attr_rw ( ignore_nice ) ;
gov_sys_pol_attr_rw ( freq_step ) ;
gov_sys_pol_attr_ro ( sampling_rate_min ) ;
static struct attribute * dbs_attributes_gov_sys [ ] = {
& sampling_rate_min_gov_sys . attr ,
& sampling_rate_gov_sys . attr ,
& sampling_down_factor_gov_sys . attr ,
& up_threshold_gov_sys . attr ,
& down_threshold_gov_sys . attr ,
& ignore_nice_gov_sys . attr ,
& freq_step_gov_sys . attr ,
2005-06-01 06:03:47 +04:00
NULL
} ;
2013-03-27 19:58:58 +04:00
static struct attribute_group cs_attr_group_gov_sys = {
. attrs = dbs_attributes_gov_sys ,
. name = " conservative " ,
} ;
static struct attribute * dbs_attributes_gov_pol [ ] = {
& sampling_rate_min_gov_pol . attr ,
& sampling_rate_gov_pol . attr ,
& sampling_down_factor_gov_pol . attr ,
& up_threshold_gov_pol . attr ,
& down_threshold_gov_pol . attr ,
& ignore_nice_gov_pol . attr ,
& freq_step_gov_pol . attr ,
NULL
} ;
static struct attribute_group cs_attr_group_gov_pol = {
. attrs = dbs_attributes_gov_pol ,
2005-06-01 06:03:47 +04:00
. name = " conservative " ,
} ;
/************************** sysfs end ************************/
2013-03-27 19:58:58 +04:00
static int cs_init ( struct dbs_data * dbs_data )
{
struct cs_dbs_tuners * tuners ;
tuners = kzalloc ( sizeof ( struct cs_dbs_tuners ) , GFP_KERNEL ) ;
if ( ! tuners ) {
pr_err ( " %s: kzalloc failed \n " , __func__ ) ;
return - ENOMEM ;
}
tuners - > up_threshold = DEF_FREQUENCY_UP_THRESHOLD ;
tuners - > down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD ;
tuners - > sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR ;
tuners - > ignore_nice = 0 ;
tuners - > freq_step = 5 ;
dbs_data - > tuners = tuners ;
dbs_data - > min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs ( 10 ) ;
mutex_init ( & dbs_data - > mutex ) ;
return 0 ;
}
static void cs_exit ( struct dbs_data * dbs_data )
{
kfree ( dbs_data - > tuners ) ;
}
2012-10-26 02:47:42 +04:00
define_get_cpu_dbs_routines ( cs_cpu_dbs_info ) ;
2009-02-13 22:02:34 +03:00
2012-10-26 02:47:42 +04:00
static struct notifier_block cs_cpufreq_notifier_block = {
. notifier_call = dbs_cpufreq_notifier ,
} ;
2009-02-13 22:02:34 +03:00
2012-10-26 02:47:42 +04:00
static struct cs_ops cs_ops = {
. notifier_block = & cs_cpufreq_notifier_block ,
} ;
2005-06-01 06:03:47 +04:00
2013-03-27 19:58:58 +04:00
static struct common_dbs_data cs_dbs_cdata = {
2012-10-26 02:47:42 +04:00
. governor = GOV_CONSERVATIVE ,
2013-03-27 19:58:58 +04:00
. attr_group_gov_sys = & cs_attr_group_gov_sys ,
. attr_group_gov_pol = & cs_attr_group_gov_pol ,
2012-10-26 02:47:42 +04:00
. get_cpu_cdbs = get_cpu_cdbs ,
. get_cpu_dbs_info_s = get_cpu_dbs_info_s ,
. gov_dbs_timer = cs_dbs_timer ,
. gov_check_cpu = cs_check_cpu ,
. gov_ops = & cs_ops ,
2013-03-27 19:58:58 +04:00
. init = cs_init ,
. exit = cs_exit ,
2012-10-26 02:47:42 +04:00
} ;
2005-06-01 06:03:47 +04:00
2012-10-26 02:47:42 +04:00
static int cs_cpufreq_governor_dbs ( struct cpufreq_policy * policy ,
2005-06-01 06:03:47 +04:00
unsigned int event )
{
2013-03-27 19:58:58 +04:00
return cpufreq_governor_dbs ( policy , & cs_dbs_cdata , event ) ;
2005-06-01 06:03:47 +04:00
}
2008-09-20 18:50:08 +04:00
# ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
static
# endif
2007-10-03 00:28:12 +04:00
struct cpufreq_governor cpufreq_gov_conservative = {
. name = " conservative " ,
2012-10-26 02:47:42 +04:00
. governor = cs_cpufreq_governor_dbs ,
2007-10-03 00:28:12 +04:00
. max_transition_latency = TRANSITION_LATENCY_LIMIT ,
. owner = THIS_MODULE ,
2005-06-01 06:03:47 +04:00
} ;
static int __init cpufreq_gov_dbs_init ( void )
{
2011-01-26 14:12:50 +03:00
return cpufreq_register_governor ( & cpufreq_gov_conservative ) ;
2005-06-01 06:03:47 +04:00
}
static void __exit cpufreq_gov_dbs_exit ( void )
{
2007-10-03 00:28:12 +04:00
cpufreq_unregister_governor ( & cpufreq_gov_conservative ) ;
2005-06-01 06:03:47 +04:00
}
2009-02-13 22:01:01 +03:00
MODULE_AUTHOR ( " Alexander Clouter <alex@digriz.org.uk> " ) ;
2009-01-18 09:39:51 +03:00
MODULE_DESCRIPTION ( " 'cpufreq_conservative' - A dynamic cpufreq governor for "
2005-06-01 06:03:47 +04:00
" Low Latency Frequency Transition capable processors "
" optimised for use in a battery environment " ) ;
2009-01-18 09:39:51 +03:00
MODULE_LICENSE ( " GPL " ) ;
2005-06-01 06:03:47 +04:00
2008-01-18 02:21:08 +03:00
# ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
fs_initcall ( cpufreq_gov_dbs_init ) ;
# else
2005-06-01 06:03:47 +04:00
module_init ( cpufreq_gov_dbs_init ) ;
2008-01-18 02:21:08 +03:00
# endif
2005-06-01 06:03:47 +04:00
module_exit ( cpufreq_gov_dbs_exit ) ;