2005-06-01 06:03:47 +04:00
/*
* drivers / cpufreq / cpufreq_conservative . c
*
* Copyright ( C ) 2001 Russell King
* ( C ) 2003 Venkatesh Pallipadi < venkatesh . pallipadi @ intel . com > .
* Jun Nakajima < jun . nakajima @ intel . com >
2009-02-13 22:01:01 +03:00
* ( C ) 2009 Alexander Clouter < alex @ digriz . org . uk >
2005-06-01 06:03:47 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/cpufreq.h>
2012-10-26 02:47:42 +04:00
# include <linux/init.h>
# include <linux/kernel.h>
2005-06-01 06:03:47 +04:00
# include <linux/kernel_stat.h>
2012-10-26 02:47:42 +04:00
# include <linux/kobject.h>
# include <linux/module.h>
2006-01-14 02:54:22 +03:00
# include <linux/mutex.h>
2012-10-26 02:47:42 +04:00
# include <linux/notifier.h>
# include <linux/percpu-defs.h>
# include <linux/sysfs.h>
# include <linux/types.h>
2009-02-13 22:02:34 +03:00
2012-10-26 02:47:42 +04:00
# include "cpufreq_governor.h"
2005-06-01 06:03:47 +04:00
2012-10-26 02:47:42 +04:00
/* Conservative governor macors */
2005-06-01 06:03:47 +04:00
# define DEF_FREQUENCY_UP_THRESHOLD (80)
# define DEF_FREQUENCY_DOWN_THRESHOLD (20)
2006-03-22 12:54:10 +03:00
# define DEF_SAMPLING_DOWN_FACTOR (1)
# define MAX_SAMPLING_DOWN_FACTOR (10)
2005-06-01 06:03:47 +04:00
2012-10-26 02:47:42 +04:00
static struct dbs_data cs_dbs_data ;
static DEFINE_PER_CPU ( struct cs_cpu_dbs_info_s , cs_cpu_dbs_info ) ;
2005-06-01 06:03:47 +04:00
2012-10-26 02:47:42 +04:00
static struct cs_dbs_tuners cs_tuners = {
2007-10-23 00:49:09 +04:00
. up_threshold = DEF_FREQUENCY_UP_THRESHOLD ,
. down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD ,
. sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR ,
. ignore_nice = 0 ,
. freq_step = 5 ,
2005-06-01 06:03:47 +04:00
} ;
2012-10-26 02:47:42 +04:00
/*
* Every sampling_rate , we check , if current idle time is less than 20 %
* ( default ) , then we try to increase frequency Every sampling_rate *
* sampling_down_factor , we check , if current idle time is more than 80 % , then
* we try to decrease frequency
*
* Any frequency increase takes it to the maximum frequency . Frequency reduction
* happens at minimum steps of 5 % ( default ) of maximum frequency
*/
static void cs_check_cpu ( int cpu , unsigned int load )
2007-10-22 11:50:13 +04:00
{
2012-10-26 02:47:42 +04:00
struct cs_cpu_dbs_info_s * dbs_info = & per_cpu ( cs_cpu_dbs_info , cpu ) ;
struct cpufreq_policy * policy = dbs_info - > cdbs . cur_policy ;
unsigned int freq_target ;
/*
* break out if we ' cannot ' reduce the speed as the user might
* want freq_step to be zero
*/
if ( cs_tuners . freq_step = = 0 )
return ;
/* Check for frequency increase */
if ( load > cs_tuners . up_threshold ) {
dbs_info - > down_skip = 0 ;
/* if we are already at full speed then break out early */
if ( dbs_info - > requested_freq = = policy - > max )
return ;
freq_target = ( cs_tuners . freq_step * policy - > max ) / 100 ;
/* max freq cannot be less than 100. But who knows.... */
if ( unlikely ( freq_target = = 0 ) )
freq_target = 5 ;
dbs_info - > requested_freq + = freq_target ;
if ( dbs_info - > requested_freq > policy - > max )
dbs_info - > requested_freq = policy - > max ;
2007-10-22 11:50:13 +04:00
2012-10-26 02:47:42 +04:00
__cpufreq_driver_target ( policy , dbs_info - > requested_freq ,
CPUFREQ_RELATION_H ) ;
return ;
}
/*
* The optimal frequency is the frequency that is the lowest that can
* support the current CPU usage without triggering the up policy . To be
* safe , we focus 10 points under the threshold .
*/
if ( load < ( cs_tuners . down_threshold - 10 ) ) {
freq_target = ( cs_tuners . freq_step * policy - > max ) / 100 ;
dbs_info - > requested_freq - = freq_target ;
if ( dbs_info - > requested_freq < policy - > min )
dbs_info - > requested_freq = policy - > min ;
/*
* if we cannot reduce the frequency anymore , break out early
*/
if ( policy - > cur = = policy - > min )
return ;
__cpufreq_driver_target ( policy , dbs_info - > requested_freq ,
CPUFREQ_RELATION_H ) ;
return ;
}
}
2012-12-27 18:55:41 +04:00
static void cs_timer_update ( struct cs_cpu_dbs_info_s * dbs_info , bool sample ,
struct delayed_work * dw )
2012-10-26 02:47:42 +04:00
{
2013-01-31 14:39:19 +04:00
unsigned int cpu = dbs_info - > cdbs . cur_policy - > cpu ;
2012-10-26 02:47:42 +04:00
int delay = delay_for_sampling_rate ( cs_tuners . sampling_rate ) ;
2012-12-27 18:55:41 +04:00
if ( sample )
dbs_check_cpu ( & cs_dbs_data , cpu ) ;
schedule_delayed_work_on ( smp_processor_id ( ) , dw , delay ) ;
}
static void cs_timer_coordinated ( struct cs_cpu_dbs_info_s * dbs_info_local ,
struct delayed_work * dw )
{
struct cs_cpu_dbs_info_s * dbs_info ;
ktime_t time_now ;
s64 delta_us ;
bool sample = true ;
/* use leader CPU's dbs_info */
2013-01-31 14:39:19 +04:00
dbs_info = & per_cpu ( cs_cpu_dbs_info ,
dbs_info_local - > cdbs . cur_policy - > cpu ) ;
2012-10-26 02:47:42 +04:00
mutex_lock ( & dbs_info - > cdbs . timer_mutex ) ;
2012-12-27 18:55:41 +04:00
time_now = ktime_get ( ) ;
delta_us = ktime_us_delta ( time_now , dbs_info - > cdbs . time_stamp ) ;
2012-10-26 02:47:42 +04:00
2012-12-27 18:55:41 +04:00
/* Do nothing if we recently have sampled */
if ( delta_us < ( s64 ) ( cs_tuners . sampling_rate / 2 ) )
sample = false ;
else
dbs_info - > cdbs . time_stamp = time_now ;
cs_timer_update ( dbs_info , sample , dw ) ;
2012-10-26 02:47:42 +04:00
mutex_unlock ( & dbs_info - > cdbs . timer_mutex ) ;
}
2012-12-27 18:55:41 +04:00
static void cs_dbs_timer ( struct work_struct * work )
{
struct delayed_work * dw = to_delayed_work ( work ) ;
struct cs_cpu_dbs_info_s * dbs_info = container_of ( work ,
struct cs_cpu_dbs_info_s , cdbs . work . work ) ;
2013-01-31 13:44:40 +04:00
if ( policy_is_shared ( dbs_info - > cdbs . cur_policy ) ) {
2012-12-27 18:55:41 +04:00
cs_timer_coordinated ( dbs_info , dw ) ;
} else {
mutex_lock ( & dbs_info - > cdbs . timer_mutex ) ;
cs_timer_update ( dbs_info , true , dw ) ;
mutex_unlock ( & dbs_info - > cdbs . timer_mutex ) ;
}
}
2012-10-26 02:47:42 +04:00
static int dbs_cpufreq_notifier ( struct notifier_block * nb , unsigned long val ,
void * data )
{
struct cpufreq_freqs * freq = data ;
struct cs_cpu_dbs_info_s * dbs_info =
& per_cpu ( cs_cpu_dbs_info , freq - > cpu ) ;
2009-02-13 22:01:51 +03:00
struct cpufreq_policy * policy ;
2012-10-26 02:47:42 +04:00
if ( ! dbs_info - > enable )
2007-10-22 11:50:13 +04:00
return 0 ;
2012-10-26 02:47:42 +04:00
policy = dbs_info - > cdbs . cur_policy ;
2009-02-13 22:01:51 +03:00
/*
2012-10-26 02:47:42 +04:00
* we only care if our internally tracked freq moves outside the ' valid '
* ranges of freqency available to us otherwise we do not change it
2009-02-13 22:01:51 +03:00
*/
2012-10-26 02:47:42 +04:00
if ( dbs_info - > requested_freq > policy - > max
| | dbs_info - > requested_freq < policy - > min )
dbs_info - > requested_freq = freq - > new ;
2007-10-22 11:50:13 +04:00
return 0 ;
}
2005-06-01 06:03:47 +04:00
/************************** sysfs interface ************************/
2009-10-01 21:49:28 +04:00
static ssize_t show_sampling_rate_min ( struct kobject * kobj ,
struct attribute * attr , char * buf )
2005-06-01 06:03:47 +04:00
{
2012-10-26 02:47:42 +04:00
return sprintf ( buf , " %u \n " , cs_dbs_data . min_sampling_rate ) ;
2005-06-01 06:03:47 +04:00
}
2009-10-01 21:49:28 +04:00
static ssize_t store_sampling_down_factor ( struct kobject * a ,
struct attribute * b ,
const char * buf , size_t count )
2005-06-01 06:03:47 +04:00
{
unsigned int input ;
int ret ;
2009-01-18 09:39:51 +03:00
ret = sscanf ( buf , " %u " , & input ) ;
2009-02-13 22:02:34 +03:00
2006-03-22 12:54:10 +03:00
if ( ret ! = 1 | | input > MAX_SAMPLING_DOWN_FACTOR | | input < 1 )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2012-10-26 02:47:42 +04:00
cs_tuners . sampling_down_factor = input ;
2005-06-01 06:03:47 +04:00
return count ;
}
2009-10-01 21:49:28 +04:00
static ssize_t store_sampling_rate ( struct kobject * a , struct attribute * b ,
const char * buf , size_t count )
2005-06-01 06:03:47 +04:00
{
unsigned int input ;
int ret ;
2009-01-18 09:39:51 +03:00
ret = sscanf ( buf , " %u " , & input ) ;
2005-06-01 06:03:47 +04:00
2009-02-13 22:02:34 +03:00
if ( ret ! = 1 )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2009-02-13 22:02:34 +03:00
2012-10-26 02:47:42 +04:00
cs_tuners . sampling_rate = max ( input , cs_dbs_data . min_sampling_rate ) ;
2005-06-01 06:03:47 +04:00
return count ;
}
2009-10-01 21:49:28 +04:00
static ssize_t store_up_threshold ( struct kobject * a , struct attribute * b ,
const char * buf , size_t count )
2005-06-01 06:03:47 +04:00
{
unsigned int input ;
int ret ;
2009-01-18 09:39:51 +03:00
ret = sscanf ( buf , " %u " , & input ) ;
2005-06-01 06:03:47 +04:00
2012-10-26 02:47:42 +04:00
if ( ret ! = 1 | | input > 100 | | input < = cs_tuners . down_threshold )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2012-10-26 02:47:42 +04:00
cs_tuners . up_threshold = input ;
2005-06-01 06:03:47 +04:00
return count ;
}
2009-10-01 21:49:28 +04:00
static ssize_t store_down_threshold ( struct kobject * a , struct attribute * b ,
const char * buf , size_t count )
2005-06-01 06:03:47 +04:00
{
unsigned int input ;
int ret ;
2009-01-18 09:39:51 +03:00
ret = sscanf ( buf , " %u " , & input ) ;
2005-06-01 06:03:47 +04:00
2009-02-13 22:02:34 +03:00
/* cannot be lower than 11 otherwise freq will not fall */
if ( ret ! = 1 | | input < 11 | | input > 100 | |
2012-10-26 02:47:42 +04:00
input > = cs_tuners . up_threshold )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2012-10-26 02:47:42 +04:00
cs_tuners . down_threshold = input ;
2005-06-01 06:03:47 +04:00
return count ;
}
2009-10-01 21:49:28 +04:00
static ssize_t store_ignore_nice_load ( struct kobject * a , struct attribute * b ,
const char * buf , size_t count )
2005-06-01 06:03:47 +04:00
{
2012-10-26 02:47:42 +04:00
unsigned int input , j ;
2005-06-01 06:03:47 +04:00
int ret ;
2007-10-23 00:49:09 +04:00
ret = sscanf ( buf , " %u " , & input ) ;
if ( ret ! = 1 )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2007-10-23 00:49:09 +04:00
if ( input > 1 )
2005-06-01 06:03:47 +04:00
input = 1 ;
2007-10-23 00:49:09 +04:00
2012-10-26 02:47:42 +04:00
if ( input = = cs_tuners . ignore_nice ) /* nothing to do */
2005-06-01 06:03:47 +04:00
return count ;
2011-03-03 23:31:27 +03:00
2012-10-26 02:47:42 +04:00
cs_tuners . ignore_nice = input ;
2005-06-01 06:03:47 +04:00
2009-02-13 22:02:34 +03:00
/* we need to re-evaluate prev_cpu_idle */
2005-06-01 06:03:49 +04:00
for_each_online_cpu ( j ) {
2012-10-26 02:47:42 +04:00
struct cs_cpu_dbs_info_s * dbs_info ;
2009-06-24 10:13:48 +04:00
dbs_info = & per_cpu ( cs_cpu_dbs_info , j ) ;
2012-10-26 02:47:42 +04:00
dbs_info - > cdbs . prev_cpu_idle = get_cpu_idle_time ( j ,
& dbs_info - > cdbs . prev_cpu_wall ) ;
if ( cs_tuners . ignore_nice )
dbs_info - > cdbs . prev_cpu_nice =
kcpustat_cpu ( j ) . cpustat [ CPUTIME_NICE ] ;
2005-06-01 06:03:47 +04:00
}
return count ;
}
2009-10-01 21:49:28 +04:00
static ssize_t store_freq_step ( struct kobject * a , struct attribute * b ,
const char * buf , size_t count )
2005-06-01 06:03:47 +04:00
{
unsigned int input ;
int ret ;
2007-10-23 00:49:09 +04:00
ret = sscanf ( buf , " %u " , & input ) ;
2005-06-01 06:03:47 +04:00
2007-10-23 00:49:09 +04:00
if ( ret ! = 1 )
2005-06-01 06:03:47 +04:00
return - EINVAL ;
2007-10-23 00:49:09 +04:00
if ( input > 100 )
2005-06-01 06:03:47 +04:00
input = 100 ;
2007-10-23 00:49:09 +04:00
2012-10-26 02:47:42 +04:00
/*
* no need to test here if freq_step is zero as the user might actually
* want this , they would be crazy though : )
*/
cs_tuners . freq_step = input ;
2005-06-01 06:03:47 +04:00
return count ;
}
2012-10-26 02:47:42 +04:00
show_one ( cs , sampling_rate , sampling_rate ) ;
show_one ( cs , sampling_down_factor , sampling_down_factor ) ;
show_one ( cs , up_threshold , up_threshold ) ;
show_one ( cs , down_threshold , down_threshold ) ;
show_one ( cs , ignore_nice_load , ignore_nice ) ;
show_one ( cs , freq_step , freq_step ) ;
2010-03-31 23:56:46 +04:00
define_one_global_rw ( sampling_rate ) ;
define_one_global_rw ( sampling_down_factor ) ;
define_one_global_rw ( up_threshold ) ;
define_one_global_rw ( down_threshold ) ;
define_one_global_rw ( ignore_nice_load ) ;
define_one_global_rw ( freq_step ) ;
2012-10-26 02:47:42 +04:00
define_one_global_ro ( sampling_rate_min ) ;
2005-06-01 06:03:47 +04:00
2009-01-18 09:39:51 +03:00
static struct attribute * dbs_attributes [ ] = {
2005-06-01 06:03:47 +04:00
& sampling_rate_min . attr ,
& sampling_rate . attr ,
& sampling_down_factor . attr ,
& up_threshold . attr ,
& down_threshold . attr ,
2005-12-01 12:09:25 +03:00
& ignore_nice_load . attr ,
2005-06-01 06:03:47 +04:00
& freq_step . attr ,
NULL
} ;
2012-10-26 02:47:42 +04:00
static struct attribute_group cs_attr_group = {
2005-06-01 06:03:47 +04:00
. attrs = dbs_attributes ,
. name = " conservative " ,
} ;
/************************** sysfs end ************************/
2012-10-26 02:47:42 +04:00
define_get_cpu_dbs_routines ( cs_cpu_dbs_info ) ;
2009-02-13 22:02:34 +03:00
2012-10-26 02:47:42 +04:00
static struct notifier_block cs_cpufreq_notifier_block = {
. notifier_call = dbs_cpufreq_notifier ,
} ;
2009-02-13 22:02:34 +03:00
2012-10-26 02:47:42 +04:00
static struct cs_ops cs_ops = {
. notifier_block = & cs_cpufreq_notifier_block ,
} ;
2005-06-01 06:03:47 +04:00
2012-10-26 02:47:42 +04:00
static struct dbs_data cs_dbs_data = {
. governor = GOV_CONSERVATIVE ,
. attr_group = & cs_attr_group ,
. tuners = & cs_tuners ,
. get_cpu_cdbs = get_cpu_cdbs ,
. get_cpu_dbs_info_s = get_cpu_dbs_info_s ,
. gov_dbs_timer = cs_dbs_timer ,
. gov_check_cpu = cs_check_cpu ,
. gov_ops = & cs_ops ,
} ;
2005-06-01 06:03:47 +04:00
2012-10-26 02:47:42 +04:00
static int cs_cpufreq_governor_dbs ( struct cpufreq_policy * policy ,
2005-06-01 06:03:47 +04:00
unsigned int event )
{
2012-10-26 02:47:42 +04:00
return cpufreq_governor_dbs ( & cs_dbs_data , policy , event ) ;
2005-06-01 06:03:47 +04:00
}
2008-09-20 18:50:08 +04:00
# ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
static
# endif
2007-10-03 00:28:12 +04:00
struct cpufreq_governor cpufreq_gov_conservative = {
. name = " conservative " ,
2012-10-26 02:47:42 +04:00
. governor = cs_cpufreq_governor_dbs ,
2007-10-03 00:28:12 +04:00
. max_transition_latency = TRANSITION_LATENCY_LIMIT ,
. owner = THIS_MODULE ,
2005-06-01 06:03:47 +04:00
} ;
static int __init cpufreq_gov_dbs_init ( void )
{
2012-10-26 02:47:42 +04:00
mutex_init ( & cs_dbs_data . mutex ) ;
2011-01-26 14:12:50 +03:00
return cpufreq_register_governor ( & cpufreq_gov_conservative ) ;
2005-06-01 06:03:47 +04:00
}
static void __exit cpufreq_gov_dbs_exit ( void )
{
2007-10-03 00:28:12 +04:00
cpufreq_unregister_governor ( & cpufreq_gov_conservative ) ;
2005-06-01 06:03:47 +04:00
}
2009-02-13 22:01:01 +03:00
MODULE_AUTHOR ( " Alexander Clouter <alex@digriz.org.uk> " ) ;
2009-01-18 09:39:51 +03:00
MODULE_DESCRIPTION ( " 'cpufreq_conservative' - A dynamic cpufreq governor for "
2005-06-01 06:03:47 +04:00
" Low Latency Frequency Transition capable processors "
" optimised for use in a battery environment " ) ;
2009-01-18 09:39:51 +03:00
MODULE_LICENSE ( " GPL " ) ;
2005-06-01 06:03:47 +04:00
2008-01-18 02:21:08 +03:00
# ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
fs_initcall ( cpufreq_gov_dbs_init ) ;
# else
2005-06-01 06:03:47 +04:00
module_init ( cpufreq_gov_dbs_init ) ;
2008-01-18 02:21:08 +03:00
# endif
2005-06-01 06:03:47 +04:00
module_exit ( cpufreq_gov_dbs_exit ) ;