Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq: [CPUFREQ] Don't take semaphore in cpufreq_quick_get() [CPUFREQ] Support different families in fid/did to frequency conversion [CPUFREQ] cpufreq_stats: misc cpuinit section annotations [CPUFREQ] implement !CONFIG_CPU_FREQ stub for cpufreq_unregister_notifier() [CPUFREQ] mark hotplug notifier callback as __cpuinit [CPUFREQ] Only check for transition latency on problematic governors (kconfig fix) [CPUFREQ] allow ondemand and conservative cpufreq governors to be used as default [CPUFREQ] move policy's governor initialisation out of low-level drivers into cpufreq core [CPUFREQ] Longhaul - Add support for PM133 northbridge [CPUFREQ] x86: use num_online_nodes to get physical cpus numbers for
This commit is contained in:
commit
4d5709a7b7
@ -269,7 +269,6 @@ static int __init imx_cpufreq_driver_init(struct cpufreq_policy *policy)
|
||||
return -EINVAL;
|
||||
|
||||
policy->cur = policy->min = policy->max = imx_get_speed(0);
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.min_freq = 8000;
|
||||
policy->cpuinfo.max_freq = 200000;
|
||||
/* Manual states, that PLL stabilizes in two CLK32 periods */
|
||||
|
@ -331,7 +331,6 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
|
||||
if (policy->cpu != 0)
|
||||
return -EINVAL;
|
||||
policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.min_freq = 59000;
|
||||
policy->cpuinfo.max_freq = 287000;
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
|
@ -108,7 +108,6 @@ static int __init omap_cpu_init(struct cpufreq_policy *policy)
|
||||
if (policy->cpu != 0)
|
||||
return -EINVAL;
|
||||
policy->cur = policy->min = policy->max = omap_getspeed(0);
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
|
||||
policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, VERY_HI_RATE) / 1000;
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
|
@ -118,8 +118,6 @@ static int __init __bf533_cpu_init(struct cpufreq_policy *policy)
|
||||
if (policy->cpu != 0)
|
||||
return -EINVAL;
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
/*Now ,only support one cpu */
|
||||
policy->cur = bf533_getfreq(0);
|
||||
|
@ -321,8 +321,6 @@ acpi_cpufreq_cpu_init (
|
||||
data->acpi_data.states[i].transition_latency * 1000;
|
||||
}
|
||||
}
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
|
||||
policy->cur = processor_get_freq(data, policy->cpu);
|
||||
|
||||
/* table init */
|
||||
|
@ -107,8 +107,6 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
|
||||
}
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
|
||||
/* if DEBUG is enabled set_pmode() measures the latency
|
||||
* of a transition */
|
||||
policy->cpuinfo.transition_latency = 25000;
|
||||
|
@ -195,8 +195,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
|
||||
}
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
|
||||
policy->cpuinfo.transition_latency = get_gizmo_latency();
|
||||
|
||||
cur_astate = get_cur_astate(policy->cpu);
|
||||
|
@ -410,7 +410,6 @@ static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
if (policy->cpu != 0)
|
||||
return -ENODEV;
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
policy->cur = cur_freq;
|
||||
|
||||
|
@ -357,7 +357,6 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
|
||||
|
||||
static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
|
||||
/* secondary CPUs are tied to the primary one by the
|
||||
|
@ -93,7 +93,6 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cur = sh_cpufreq_get(policy->cpu);
|
||||
policy->min = policy->cpuinfo.min_freq;
|
||||
policy->max = policy->cpuinfo.max_freq;
|
||||
|
@ -326,7 +326,6 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
table[2].index = 5;
|
||||
table[3].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = 0;
|
||||
policy->cur = clock_tick;
|
||||
|
||||
|
@ -646,7 +646,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
policy->cpuinfo.transition_latency =
|
||||
perf->states[i].transition_latency * 1000;
|
||||
}
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
|
||||
data->max_freq = perf->states[0].core_frequency * 1000;
|
||||
/* table init */
|
||||
|
@ -363,7 +363,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
|
||||
policy->cur = nforce2_get(policy->cpu);
|
||||
policy->min = policy->cpuinfo.min_freq;
|
||||
policy->max = policy->cpuinfo.max_freq;
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -253,7 +253,6 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
|
||||
f_table[k].frequency = CPUFREQ_TABLE_END;
|
||||
}
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
|
||||
policy->cur = fsb * current_multiplier;
|
||||
|
||||
|
@ -219,7 +219,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
|
||||
}
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
policy->cur = elanfreq_get_cpu_frequency(0);
|
||||
|
||||
|
@ -420,7 +420,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
|
||||
policy->min = maxfreq / POLICY_MIN_DIV;
|
||||
policy->max = maxfreq;
|
||||
policy->cur = curfreq;
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.min_freq = maxfreq / max_duration;
|
||||
policy->cpuinfo.max_freq = maxfreq;
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
|
@ -710,6 +710,10 @@ static int enable_arbiter_disable(void)
|
||||
reg = 0x78;
|
||||
dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0,
|
||||
NULL);
|
||||
/* Find PM133/VT8605 host bridge */
|
||||
if (dev == NULL)
|
||||
dev = pci_get_device(PCI_VENDOR_ID_VIA,
|
||||
PCI_DEVICE_ID_VIA_8605_0, NULL);
|
||||
/* Find CLE266 host bridge */
|
||||
if (dev == NULL) {
|
||||
reg = 0x76;
|
||||
@ -918,7 +922,6 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
||||
if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
|
||||
longhaul_setup_voltagescaling();
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = 200000; /* nsec */
|
||||
policy->cur = calc_speed(longhaul_get_cpu_mult());
|
||||
|
||||
|
@ -229,7 +229,6 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
|
||||
cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = 1000000; /* assumed */
|
||||
policy->cur = stock_freq;
|
||||
|
||||
|
@ -160,7 +160,6 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
|
||||
}
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
policy->cur = busfreq * max_multiplier;
|
||||
|
||||
|
@ -637,8 +637,6 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
|
||||
printk (KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
|
||||
minimum_speed/1000, maximum_speed/1000);
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
|
||||
policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency);
|
||||
|
||||
policy->cur = powernow_get(0);
|
||||
|
@ -76,7 +76,10 @@ static u32 find_khz_freq_from_fid(u32 fid)
|
||||
/* Return a frequency in MHz, given an input fid and did */
|
||||
static u32 find_freq_from_fiddid(u32 fid, u32 did)
|
||||
{
|
||||
return 100 * (fid + 0x10) >> did;
|
||||
if (current_cpu_data.x86 == 0x10)
|
||||
return 100 * (fid + 0x10) >> did;
|
||||
else
|
||||
return 100 * (fid + 0x8) >> did;
|
||||
}
|
||||
|
||||
static u32 find_khz_freq_from_fiddid(u32 fid, u32 did)
|
||||
@ -1208,7 +1211,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
/* run on any CPU again */
|
||||
set_cpus_allowed(current, oldmask);
|
||||
|
||||
pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
pol->cpus = cpumask_of_cpu(pol->cpu);
|
||||
else
|
||||
@ -1325,21 +1327,16 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
|
||||
static int __cpuinit powernowk8_init(void)
|
||||
{
|
||||
unsigned int i, supported_cpus = 0;
|
||||
unsigned int booted_cores = 1;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
if (check_supported_cpu(i))
|
||||
supported_cpus++;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
booted_cores = cpu_data[0].booted_cores;
|
||||
#endif
|
||||
|
||||
if (supported_cpus == num_online_cpus()) {
|
||||
printk(KERN_INFO PFX "Found %d %s "
|
||||
"processors (%d cpu cores) (" VERSION ")\n",
|
||||
supported_cpus/booted_cores,
|
||||
num_online_nodes(),
|
||||
boot_cpu_data.x86_model_id, supported_cpus);
|
||||
return cpufreq_register_driver(&cpufreq_amd64_driver);
|
||||
}
|
||||
|
@ -111,7 +111,6 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
return -ENODEV;
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = 1000000; /* 1ms */
|
||||
policy->cur = sc520_freq_get_cpu_frequency(0);
|
||||
|
||||
|
@ -393,7 +393,6 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
freq = get_cur_freq(policy->cpu);
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */
|
||||
policy->cur = freq;
|
||||
|
||||
|
@ -348,7 +348,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
|
||||
(speed / 1000));
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cur = speed;
|
||||
|
||||
result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
|
||||
|
@ -290,7 +290,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
|
||||
(speed / 1000));
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
policy->cur = speed;
|
||||
|
||||
|
@ -56,10 +56,6 @@ config CPU_FREQ_STAT_DETAILS
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
# Note that it is not currently possible to set the other governors (such as ondemand)
|
||||
# as the default, since if they fail to initialise, cpufreq will be
|
||||
# left in an undefined state.
|
||||
|
||||
choice
|
||||
prompt "Default CPUFreq governor"
|
||||
default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
|
||||
@ -85,6 +81,29 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE
|
||||
program shall be able to set the CPU dynamically without having
|
||||
to enable the userspace governor manually.
|
||||
|
||||
config CPU_FREQ_DEFAULT_GOV_ONDEMAND
|
||||
bool "ondemand"
|
||||
select CPU_FREQ_GOV_ONDEMAND
|
||||
select CPU_FREQ_GOV_PERFORMANCE
|
||||
help
|
||||
Use the CPUFreq governor 'ondemand' as default. This allows
|
||||
you to get a full dynamic frequency capable system by simply
|
||||
loading your cpufreq low-level hardware driver.
|
||||
Be aware that not all cpufreq drivers support the ondemand
|
||||
governor. If unsure have a look at the help section of the
|
||||
driver. Fallback governor will be the performance governor.
|
||||
|
||||
config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
|
||||
bool "conservative"
|
||||
select CPU_FREQ_GOV_CONSERVATIVE
|
||||
select CPU_FREQ_GOV_PERFORMANCE
|
||||
help
|
||||
Use the CPUFreq governor 'conservative' as default. This allows
|
||||
you to get a full dynamic frequency capable system by simply
|
||||
loading your cpufreq low-level hardware driver.
|
||||
Be aware that not all cpufreq drivers support the conservative
|
||||
governor. If unsure have a look at the help section of the
|
||||
driver. Fallback governor will be the performance governor.
|
||||
endchoice
|
||||
|
||||
config CPU_FREQ_GOV_PERFORMANCE
|
||||
|
@ -763,6 +763,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
|
||||
init_completion(&policy->kobj_unregister);
|
||||
INIT_WORK(&policy->update, handle_update);
|
||||
|
||||
/* Set governor before ->init, so that driver could check it */
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
/* call driver. From then on the cpufreq must be able
|
||||
* to accept all calls to ->verify and ->setpolicy for this CPU
|
||||
*/
|
||||
@ -1109,12 +1111,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
|
||||
unsigned int ret_freq = 0;
|
||||
|
||||
if (policy) {
|
||||
if (unlikely(lock_policy_rwsem_read(cpu)))
|
||||
return ret_freq;
|
||||
|
||||
ret_freq = policy->cur;
|
||||
|
||||
unlock_policy_rwsem_read(cpu);
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
@ -1483,6 +1480,31 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Only must be defined when default governor is known to have latency
|
||||
restrictions, like e.g. conservative or ondemand.
|
||||
That this is the case is already ensured in Kconfig
|
||||
*/
|
||||
#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
|
||||
struct cpufreq_governor *gov = &cpufreq_gov_performance;
|
||||
#else
|
||||
struct cpufreq_governor *gov = NULL;
|
||||
#endif
|
||||
|
||||
if (policy->governor->max_transition_latency &&
|
||||
policy->cpuinfo.transition_latency >
|
||||
policy->governor->max_transition_latency) {
|
||||
if (!gov)
|
||||
return -EINVAL;
|
||||
else {
|
||||
printk(KERN_WARNING "%s governor failed, too long"
|
||||
" transition latency of HW, fallback"
|
||||
" to %s governor\n",
|
||||
policy->governor->name,
|
||||
gov->name);
|
||||
policy->governor = gov;
|
||||
}
|
||||
}
|
||||
|
||||
if (!try_module_get(policy->governor->owner))
|
||||
return -EINVAL;
|
||||
|
||||
@ -1703,7 +1725,7 @@ int cpufreq_update_policy(unsigned int cpu)
|
||||
}
|
||||
EXPORT_SYMBOL(cpufreq_update_policy);
|
||||
|
||||
static int cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||
static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
@ -58,7 +58,7 @@ static unsigned int def_sampling_rate;
|
||||
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
|
||||
#define DEF_SAMPLING_DOWN_FACTOR (1)
|
||||
#define MAX_SAMPLING_DOWN_FACTOR (10)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
|
||||
|
||||
static void do_dbs_timer(struct work_struct *work);
|
||||
|
||||
@ -466,9 +466,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
(!policy->cur))
|
||||
return -EINVAL;
|
||||
|
||||
if (policy->cpuinfo.transition_latency >
|
||||
(TRANSITION_LATENCY_LIMIT * 1000))
|
||||
return -EINVAL;
|
||||
if (this_dbs_info->enable) /* Already enabled */
|
||||
break;
|
||||
|
||||
@ -551,15 +548,17 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_governor cpufreq_gov_dbs = {
|
||||
.name = "conservative",
|
||||
.governor = cpufreq_governor_dbs,
|
||||
.owner = THIS_MODULE,
|
||||
struct cpufreq_governor cpufreq_gov_conservative = {
|
||||
.name = "conservative",
|
||||
.governor = cpufreq_governor_dbs,
|
||||
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
EXPORT_SYMBOL(cpufreq_gov_conservative);
|
||||
|
||||
static int __init cpufreq_gov_dbs_init(void)
|
||||
{
|
||||
return cpufreq_register_governor(&cpufreq_gov_dbs);
|
||||
return cpufreq_register_governor(&cpufreq_gov_conservative);
|
||||
}
|
||||
|
||||
static void __exit cpufreq_gov_dbs_exit(void)
|
||||
@ -567,7 +566,7 @@ static void __exit cpufreq_gov_dbs_exit(void)
|
||||
/* Make sure that the scheduled work is indeed not running */
|
||||
flush_scheduled_work();
|
||||
|
||||
cpufreq_unregister_governor(&cpufreq_gov_dbs);
|
||||
cpufreq_unregister_governor(&cpufreq_gov_conservative);
|
||||
}
|
||||
|
||||
|
||||
|
@ -47,7 +47,7 @@ static unsigned int def_sampling_rate;
|
||||
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
|
||||
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
|
||||
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
|
||||
|
||||
static void do_dbs_timer(struct work_struct *work);
|
||||
|
||||
@ -508,12 +508,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
if ((!cpu_online(cpu)) || (!policy->cur))
|
||||
return -EINVAL;
|
||||
|
||||
if (policy->cpuinfo.transition_latency >
|
||||
(TRANSITION_LATENCY_LIMIT * 1000)) {
|
||||
printk(KERN_WARNING "ondemand governor failed to load "
|
||||
"due to too long transition latency\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (this_dbs_info->enable) /* Already enabled */
|
||||
break;
|
||||
|
||||
@ -585,11 +579,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_governor cpufreq_gov_dbs = {
|
||||
.name = "ondemand",
|
||||
.governor = cpufreq_governor_dbs,
|
||||
.owner = THIS_MODULE,
|
||||
struct cpufreq_governor cpufreq_gov_ondemand = {
|
||||
.name = "ondemand",
|
||||
.governor = cpufreq_governor_dbs,
|
||||
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
EXPORT_SYMBOL(cpufreq_gov_ondemand);
|
||||
|
||||
static int __init cpufreq_gov_dbs_init(void)
|
||||
{
|
||||
@ -598,12 +594,12 @@ static int __init cpufreq_gov_dbs_init(void)
|
||||
printk(KERN_ERR "Creation of kondemand failed\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
return cpufreq_register_governor(&cpufreq_gov_dbs);
|
||||
return cpufreq_register_governor(&cpufreq_gov_ondemand);
|
||||
}
|
||||
|
||||
static void __exit cpufreq_gov_dbs_exit(void)
|
||||
{
|
||||
cpufreq_unregister_governor(&cpufreq_gov_dbs);
|
||||
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
|
||||
destroy_workqueue(kondemand_wq);
|
||||
}
|
||||
|
||||
|
@ -164,8 +164,7 @@ freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void
|
||||
cpufreq_stats_free_table (unsigned int cpu)
|
||||
static void __cpuexit cpufreq_stats_free_table(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_stats *stat = cpufreq_stats_table[cpu];
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||
@ -305,8 +304,9 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
||||
@ -323,7 +323,7 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block cpufreq_stat_cpu_notifier =
|
||||
static struct notifier_block cpufreq_stat_cpu_notifier __cpuinitdata =
|
||||
{
|
||||
.notifier_call = cpufreq_stat_cpu_callback,
|
||||
};
|
||||
@ -356,8 +356,7 @@ __init cpufreq_stats_init(void)
|
||||
|
||||
register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
|
||||
for_each_online_cpu(cpu) {
|
||||
cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
|
||||
CPU_ONLINE, (void *)(long)cpu);
|
||||
cpufreq_update_policy(cpu);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -372,13 +371,12 @@ __exit cpufreq_stats_exit(void)
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
|
||||
for_each_online_cpu(cpu) {
|
||||
cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
|
||||
CPU_DEAD, (void *)(long)cpu);
|
||||
cpufreq_stats_free_table(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
|
||||
MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats"
|
||||
MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats "
|
||||
"through sysfs filesystem");
|
||||
MODULE_LICENSE ("GPL");
|
||||
|
||||
|
@ -32,12 +32,24 @@
|
||||
* CPUFREQ NOTIFIER INTERFACE *
|
||||
*********************************************************************/
|
||||
|
||||
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
|
||||
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
|
||||
|
||||
#define CPUFREQ_TRANSITION_NOTIFIER (0)
|
||||
#define CPUFREQ_POLICY_NOTIFIER (1)
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
|
||||
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
|
||||
#else /* CONFIG_CPU_FREQ */
|
||||
static inline int cpufreq_register_notifier(struct notifier_block *nb,
|
||||
unsigned int list)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
|
||||
unsigned int list)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_CPU_FREQ */
|
||||
|
||||
/* if (cpufreq_driver->target) exists, the ->governor decides what frequency
|
||||
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
|
||||
@ -155,6 +167,9 @@ struct cpufreq_governor {
|
||||
char name[CPUFREQ_NAME_LEN];
|
||||
int (*governor) (struct cpufreq_policy *policy,
|
||||
unsigned int event);
|
||||
unsigned int max_transition_latency; /* HW must be able to switch to
|
||||
next freq faster than this value in nano secs or we
|
||||
will fallback to performance governor */
|
||||
struct list_head governor_list;
|
||||
struct module *owner;
|
||||
};
|
||||
@ -279,12 +294,24 @@ static inline unsigned int cpufreq_quick_get(unsigned int cpu)
|
||||
*********************************************************************/
|
||||
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
|
||||
/*
|
||||
Performance governor is fallback governor if any other gov failed to
|
||||
auto load due latency restrictions
|
||||
*/
|
||||
#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
|
||||
extern struct cpufreq_governor cpufreq_gov_performance;
|
||||
#define CPUFREQ_DEFAULT_GOVERNOR &cpufreq_gov_performance
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
|
||||
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance)
|
||||
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
|
||||
extern struct cpufreq_governor cpufreq_gov_userspace;
|
||||
#define CPUFREQ_DEFAULT_GOVERNOR &cpufreq_gov_userspace
|
||||
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace)
|
||||
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
|
||||
extern struct cpufreq_governor cpufreq_gov_ondemand;
|
||||
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand)
|
||||
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
|
||||
extern struct cpufreq_governor cpufreq_gov_conservative;
|
||||
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
|
||||
#endif
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user