[CPUFREQ] Allow ondemand stepping to be changed by user.
Adds support so that the cpufreq change stepping is no longer fixed at 5% and can be changed dynamically by the user Signed-off-by: Alexander Clouter <alex-kernel@digriz.org.uk> Signed-off-by: Dave Jones <davej@redhat.com>
This commit is contained in:
parent
c11420a616
commit
1206aaac28
@ -79,6 +79,7 @@ struct dbs_tuners {
|
|||||||
unsigned int up_threshold;
|
unsigned int up_threshold;
|
||||||
unsigned int down_threshold;
|
unsigned int down_threshold;
|
||||||
unsigned int ignore_nice;
|
unsigned int ignore_nice;
|
||||||
|
unsigned int freq_step;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dbs_tuners dbs_tuners_ins = {
|
static struct dbs_tuners dbs_tuners_ins = {
|
||||||
@ -117,6 +118,7 @@ show_one(sampling_down_factor, sampling_down_factor);
|
|||||||
show_one(up_threshold, up_threshold);
|
show_one(up_threshold, up_threshold);
|
||||||
show_one(down_threshold, down_threshold);
|
show_one(down_threshold, down_threshold);
|
||||||
show_one(ignore_nice, ignore_nice);
|
show_one(ignore_nice, ignore_nice);
|
||||||
|
show_one(freq_step, freq_step);
|
||||||
|
|
||||||
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
|
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
@ -235,6 +237,29 @@ static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
|
|||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t store_freq_step(struct cpufreq_policy *policy,
|
||||||
|
const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
unsigned int input;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = sscanf (buf, "%u", &input);
|
||||||
|
|
||||||
|
if ( ret != 1 )
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if ( input > 100 )
|
||||||
|
input = 100;
|
||||||
|
|
||||||
|
/* no need to test here if freq_step is zero as the user might actually
|
||||||
|
* want this, they would be crazy though :) */
|
||||||
|
down(&dbs_sem);
|
||||||
|
dbs_tuners_ins.freq_step = input;
|
||||||
|
up(&dbs_sem);
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
#define define_one_rw(_name) \
|
#define define_one_rw(_name) \
|
||||||
static struct freq_attr _name = \
|
static struct freq_attr _name = \
|
||||||
__ATTR(_name, 0644, show_##_name, store_##_name)
|
__ATTR(_name, 0644, show_##_name, store_##_name)
|
||||||
@ -244,6 +269,7 @@ define_one_rw(sampling_down_factor);
|
|||||||
define_one_rw(up_threshold);
|
define_one_rw(up_threshold);
|
||||||
define_one_rw(down_threshold);
|
define_one_rw(down_threshold);
|
||||||
define_one_rw(ignore_nice);
|
define_one_rw(ignore_nice);
|
||||||
|
define_one_rw(freq_step);
|
||||||
|
|
||||||
static struct attribute * dbs_attributes[] = {
|
static struct attribute * dbs_attributes[] = {
|
||||||
&sampling_rate_max.attr,
|
&sampling_rate_max.attr,
|
||||||
@ -253,6 +279,7 @@ static struct attribute * dbs_attributes[] = {
|
|||||||
&up_threshold.attr,
|
&up_threshold.attr,
|
||||||
&down_threshold.attr,
|
&down_threshold.attr,
|
||||||
&ignore_nice.attr,
|
&ignore_nice.attr,
|
||||||
|
&freq_step.attr,
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -291,7 +318,7 @@ static void dbs_check_cpu(int cpu)
|
|||||||
*
|
*
|
||||||
* Any frequency increase takes it to the maximum frequency.
|
* Any frequency increase takes it to the maximum frequency.
|
||||||
* Frequency reduction happens at minimum steps of
|
* Frequency reduction happens at minimum steps of
|
||||||
* 5% of max_frequency
|
* 5% (default) of max_frequency
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Check for frequency increase */
|
/* Check for frequency increase */
|
||||||
@ -390,18 +417,20 @@ static void dbs_check_cpu(int cpu)
|
|||||||
usecs_to_jiffies(freq_down_sampling_rate);
|
usecs_to_jiffies(freq_down_sampling_rate);
|
||||||
|
|
||||||
if (idle_ticks > down_idle_ticks ) {
|
if (idle_ticks > down_idle_ticks ) {
|
||||||
/* if we are already at the lowest speed then break out early */
|
/* if we are already at the lowest speed then break out early
|
||||||
if (policy->cur == policy->min)
|
* or if we 'cannot' reduce the speed as the user might want
|
||||||
|
* freq_step to be zero */
|
||||||
|
if (policy->cur == policy->min || dbs_tuners_ins.freq_step == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
freq_down_step = (5 * policy->max) / 100;
|
freq_down_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
|
||||||
|
|
||||||
/* max freq cannot be less than 100. But who knows.... */
|
/* max freq cannot be less than 100. But who knows.... */
|
||||||
if (unlikely(freq_down_step == 0))
|
if (unlikely(freq_down_step == 0))
|
||||||
freq_down_step = 5;
|
freq_down_step = 5;
|
||||||
|
|
||||||
__cpufreq_driver_target(policy,
|
__cpufreq_driver_target(policy,
|
||||||
policy->cur - freq_down_step,
|
policy->cur - freq_down_step,
|
||||||
CPUFREQ_RELATION_H);
|
CPUFREQ_RELATION_H);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -486,6 +515,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||||||
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
|
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
|
||||||
dbs_tuners_ins.sampling_rate = def_sampling_rate;
|
dbs_tuners_ins.sampling_rate = def_sampling_rate;
|
||||||
dbs_tuners_ins.ignore_nice = 0;
|
dbs_tuners_ins.ignore_nice = 0;
|
||||||
|
dbs_tuners_ins.freq_step = 5;
|
||||||
|
|
||||||
dbs_timer_init();
|
dbs_timer_init();
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user