Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Remove pointless printk from p4-clockmod.
[CPUFREQ] Fix section mismatch for powernow_cpu_init in powernow-k7.c
[CPUFREQ] Fix section mismatch for longhaul_cpu_init.
[CPUFREQ] Fix section mismatch for longrun_cpu_init.
[CPUFREQ] powernow-k8: Fix misleading variable naming
[CPUFREQ] Convert pci_table entries to PCI_VDEVICE (if PCI_ANY_ID is used)
[CPUFREQ] arch/x86/kernel/cpu/cpufreq: use for_each_pci_dev()
[CPUFREQ] fix brace coding style issue.
[CPUFREQ] x86 cpufreq: Make trace_power_frequency cpufreq driver independent
[CPUFREQ] acpi-cpufreq: Fix CPU_ANY CPUFREQ_{PRE,POST}CHANGE notification
[CPUFREQ] ondemand: don't synchronize sample rate unless multiple cpus present
[CPUFREQ] unexport (un)lock_policy_rwsem* functions
[CPUFREQ] ondemand: Refactor frequency increase code
[CPUFREQ] powernow-k8: On load failure, remind the user to enable support in BIOS setup
[CPUFREQ] powernow-k8: Limit Pstate transition latency check
[CPUFREQ] Fix PCC driver error path
[CPUFREQ] fix double freeing in error path of pcc-cpufreq
[CPUFREQ] pcc driver should check for pcch method before calling _OSC
[CPUFREQ] fix memory leak in cpufreq_add_dev
[CPUFREQ] revert "[CPUFREQ] remove rwsem lock from CPUFREQ_GOV_STOP call (second call site)"
Manually fix up non-data merge conflict introduced by new calling
conventions for trace_power_start() in commit 6f4f2723d0
("x86
cpufreq: Make trace_power_frequency cpufreq driver independent"), which
didn't update the intel_idle native hardware cpuidle driver.
This commit is contained in:
commit
8d91530c5f
@ -377,16 +377,6 @@ Who: Eric Paris <eparis@redhat.com>
|
||||
|
||||
----------------------------
|
||||
|
||||
What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be
|
||||
exported interface anymore.
|
||||
When: 2.6.33
|
||||
Why: cpu_policy_rwsem has a new cleaner definition making it local to
|
||||
cpufreq core and contained inside cpufreq.c. Other dependent
|
||||
drivers should not use it in order to safely avoid lockdep issues.
|
||||
Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||
|
||||
----------------------------
|
||||
|
||||
What: sound-slot/service-* module aliases and related clutters in
|
||||
sound/sound_core.c
|
||||
When: August 2010
|
||||
|
@ -34,7 +34,6 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/slab.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/io.h>
|
||||
@ -324,8 +323,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||
}
|
||||
}
|
||||
|
||||
trace_power_frequency(POWER_PSTATE, data->freq_table[next_state].frequency);
|
||||
|
||||
switch (data->cpu_feature) {
|
||||
case SYSTEM_INTEL_MSR_CAPABLE:
|
||||
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
|
||||
@ -351,7 +348,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||
|
||||
freqs.old = perf->states[perf->state].core_frequency * 1000;
|
||||
freqs.new = data->freq_table[next_state].frequency;
|
||||
for_each_cpu(i, cmd.mask) {
|
||||
for_each_cpu(i, policy->cpus) {
|
||||
freqs.cpu = i;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
|
||||
}
|
||||
@ -367,7 +364,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||
}
|
||||
}
|
||||
|
||||
for_each_cpu(i, cmd.mask) {
|
||||
for_each_cpu(i, policy->cpus) {
|
||||
freqs.cpu = i;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
}
|
||||
|
@ -169,12 +169,9 @@ static int gx_freq_mult[16] = {
|
||||
* Low Level chipset interface *
|
||||
****************************************************************/
|
||||
static struct pci_device_id gx_chipset_tbl[] __initdata = {
|
||||
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
|
||||
PCI_ANY_ID, PCI_ANY_ID },
|
||||
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520,
|
||||
PCI_ANY_ID, PCI_ANY_ID },
|
||||
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510,
|
||||
PCI_ANY_ID, PCI_ANY_ID },
|
||||
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY), },
|
||||
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
|
||||
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
|
||||
{ 0, },
|
||||
};
|
||||
|
||||
@ -199,7 +196,7 @@ static __init struct pci_dev *gx_detect_chipset(void)
|
||||
}
|
||||
|
||||
/* detect which companion chip is used */
|
||||
while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) {
|
||||
for_each_pci_dev(gx_pci) {
|
||||
if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL)
|
||||
return gx_pci;
|
||||
}
|
||||
|
@ -426,7 +426,7 @@ static int guess_fsb(int mult)
|
||||
}
|
||||
|
||||
|
||||
static int __init longhaul_get_ranges(void)
|
||||
static int __cpuinit longhaul_get_ranges(void)
|
||||
{
|
||||
unsigned int i, j, k = 0;
|
||||
unsigned int ratio;
|
||||
@ -530,7 +530,7 @@ static int __init longhaul_get_ranges(void)
|
||||
}
|
||||
|
||||
|
||||
static void __init longhaul_setup_voltagescaling(void)
|
||||
static void __cpuinit longhaul_setup_voltagescaling(void)
|
||||
{
|
||||
union msr_longhaul longhaul;
|
||||
struct mV_pos minvid, maxvid, vid;
|
||||
@ -784,7 +784,7 @@ static int longhaul_setup_southbridge(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
||||
static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
char *cpuname = NULL;
|
||||
|
@ -56,7 +56,7 @@ union msr_longhaul {
|
||||
/*
|
||||
* VIA C3 Samuel 1 & Samuel 2 (stepping 0)
|
||||
*/
|
||||
static const int __initdata samuel1_mults[16] = {
|
||||
static const int __cpuinitdata samuel1_mults[16] = {
|
||||
-1, /* 0000 -> RESERVED */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@ -75,7 +75,7 @@ static const int __initdata samuel1_mults[16] = {
|
||||
-1, /* 1111 -> RESERVED */
|
||||
};
|
||||
|
||||
static const int __initdata samuel1_eblcr[16] = {
|
||||
static const int __cpuinitdata samuel1_eblcr[16] = {
|
||||
50, /* 0000 -> RESERVED */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@ -97,7 +97,7 @@ static const int __initdata samuel1_eblcr[16] = {
|
||||
/*
|
||||
* VIA C3 Samuel2 Stepping 1->15
|
||||
*/
|
||||
static const int __initdata samuel2_eblcr[16] = {
|
||||
static const int __cpuinitdata samuel2_eblcr[16] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@ -119,7 +119,7 @@ static const int __initdata samuel2_eblcr[16] = {
|
||||
/*
|
||||
* VIA C3 Ezra
|
||||
*/
|
||||
static const int __initdata ezra_mults[16] = {
|
||||
static const int __cpuinitdata ezra_mults[16] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@ -138,7 +138,7 @@ static const int __initdata ezra_mults[16] = {
|
||||
120, /* 1111 -> 12.0x */
|
||||
};
|
||||
|
||||
static const int __initdata ezra_eblcr[16] = {
|
||||
static const int __cpuinitdata ezra_eblcr[16] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@ -160,7 +160,7 @@ static const int __initdata ezra_eblcr[16] = {
|
||||
/*
|
||||
* VIA C3 (Ezra-T) [C5M].
|
||||
*/
|
||||
static const int __initdata ezrat_mults[32] = {
|
||||
static const int __cpuinitdata ezrat_mults[32] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@ -196,7 +196,7 @@ static const int __initdata ezrat_mults[32] = {
|
||||
-1, /* 1111 -> RESERVED (12.0x) */
|
||||
};
|
||||
|
||||
static const int __initdata ezrat_eblcr[32] = {
|
||||
static const int __cpuinitdata ezrat_eblcr[32] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@ -235,7 +235,7 @@ static const int __initdata ezrat_eblcr[32] = {
|
||||
/*
|
||||
* VIA C3 Nehemiah */
|
||||
|
||||
static const int __initdata nehemiah_mults[32] = {
|
||||
static const int __cpuinitdata nehemiah_mults[32] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
-1, /* 0001 -> 16.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@ -270,7 +270,7 @@ static const int __initdata nehemiah_mults[32] = {
|
||||
-1, /* 1111 -> 12.0x */
|
||||
};
|
||||
|
||||
static const int __initdata nehemiah_eblcr[32] = {
|
||||
static const int __cpuinitdata nehemiah_eblcr[32] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
160, /* 0001 -> 16.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@ -315,7 +315,7 @@ struct mV_pos {
|
||||
unsigned short pos;
|
||||
};
|
||||
|
||||
static const struct mV_pos __initdata vrm85_mV[32] = {
|
||||
static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
|
||||
{1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
|
||||
{1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
|
||||
{1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
|
||||
@ -326,14 +326,14 @@ static const struct mV_pos __initdata vrm85_mV[32] = {
|
||||
{1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
|
||||
};
|
||||
|
||||
static const unsigned char __initdata mV_vrm85[32] = {
|
||||
static const unsigned char __cpuinitdata mV_vrm85[32] = {
|
||||
0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
|
||||
0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
|
||||
0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
|
||||
0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
|
||||
};
|
||||
|
||||
static const struct mV_pos __initdata mobilevrm_mV[32] = {
|
||||
static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
|
||||
{1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
|
||||
{1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
|
||||
{1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
|
||||
@ -344,7 +344,7 @@ static const struct mV_pos __initdata mobilevrm_mV[32] = {
|
||||
{675, 3}, {650, 2}, {625, 1}, {600, 0}
|
||||
};
|
||||
|
||||
static const unsigned char __initdata mV_mobilevrm[32] = {
|
||||
static const unsigned char __cpuinitdata mV_mobilevrm[32] = {
|
||||
0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
|
||||
0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
|
||||
0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
|
||||
|
@ -165,8 +165,8 @@ static unsigned int longrun_get(unsigned int cpu)
|
||||
* TMTA rules:
|
||||
* performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
|
||||
*/
|
||||
static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
|
||||
unsigned int *high_freq)
|
||||
static unsigned int __cpuinit longrun_determine_freqs(unsigned int *low_freq,
|
||||
unsigned int *high_freq)
|
||||
{
|
||||
u32 msr_lo, msr_hi;
|
||||
u32 save_lo, save_hi;
|
||||
@ -258,7 +258,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
|
||||
}
|
||||
|
||||
|
||||
static int __init longrun_cpu_init(struct cpufreq_policy *policy)
|
||||
static int __cpuinit longrun_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
|
@ -178,13 +178,8 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
if (c->x86 != 0xF) {
|
||||
if (!cpu_has(c, X86_FEATURE_EST))
|
||||
printk(KERN_WARNING PFX "Unknown CPU. "
|
||||
"Please send an e-mail to "
|
||||
"<cpufreq@vger.kernel.org>\n");
|
||||
if (c->x86 != 0xF)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* on P-4s, the TSC runs with constant frequency independent whether
|
||||
* throttling is active or not. */
|
||||
|
@ -569,7 +569,7 @@ static int powernow_verify(struct cpufreq_policy *policy)
|
||||
* We will then get the same kind of behaviour already tested under
|
||||
* the "well-known" other OS.
|
||||
*/
|
||||
static int __init fixup_sgtc(void)
|
||||
static int __cpuinit fixup_sgtc(void)
|
||||
{
|
||||
unsigned int sgtc;
|
||||
unsigned int m;
|
||||
@ -603,7 +603,7 @@ static unsigned int powernow_get(unsigned int cpu)
|
||||
}
|
||||
|
||||
|
||||
static int __init acer_cpufreq_pst(const struct dmi_system_id *d)
|
||||
static int __cpuinit acer_cpufreq_pst(const struct dmi_system_id *d)
|
||||
{
|
||||
printk(KERN_WARNING PFX
|
||||
"%s laptop with broken PST tables in BIOS detected.\n",
|
||||
@ -621,7 +621,7 @@ static int __init acer_cpufreq_pst(const struct dmi_system_id *d)
|
||||
* A BIOS update is all that can save them.
|
||||
* Mention this, and disable cpufreq.
|
||||
*/
|
||||
static struct dmi_system_id __initdata powernow_dmi_table[] = {
|
||||
static struct dmi_system_id __cpuinitdata powernow_dmi_table[] = {
|
||||
{
|
||||
.callback = acer_cpufreq_pst,
|
||||
.ident = "Acer Aspire",
|
||||
@ -633,7 +633,7 @@ static struct dmi_system_id __initdata powernow_dmi_table[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init powernow_cpu_init(struct cpufreq_policy *policy)
|
||||
static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
union msr_fidvidstatus fidvidstatus;
|
||||
int result;
|
||||
|
@ -806,6 +806,8 @@ static int find_psb_table(struct powernow_k8_data *data)
|
||||
* www.amd.com
|
||||
*/
|
||||
printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n");
|
||||
printk(KERN_ERR PFX "Make sure that your BIOS is up to date"
|
||||
" and Cool'N'Quiet support is enabled in BIOS setup\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -910,8 +912,8 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
|
||||
{
|
||||
int i;
|
||||
u32 hi = 0, lo = 0;
|
||||
rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo);
|
||||
data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
|
||||
rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
|
||||
data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
|
||||
|
||||
for (i = 0; i < data->acpi_data.state_count; i++) {
|
||||
u32 index;
|
||||
|
@ -372,7 +372,7 @@ static inline int hlt_use_halt(void)
|
||||
void default_idle(void)
|
||||
{
|
||||
if (hlt_use_halt()) {
|
||||
trace_power_start(POWER_CSTATE, 1);
|
||||
trace_power_start(POWER_CSTATE, 1, smp_processor_id());
|
||||
current_thread_info()->status &= ~TS_POLLING;
|
||||
/*
|
||||
* TS_POLLING-cleared state must be visible before we
|
||||
@ -442,7 +442,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
|
||||
*/
|
||||
void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
|
||||
{
|
||||
trace_power_start(POWER_CSTATE, (ax>>4)+1);
|
||||
trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
|
||||
if (!need_resched()) {
|
||||
if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
@ -458,7 +458,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
|
||||
static void mwait_idle(void)
|
||||
{
|
||||
if (!need_resched()) {
|
||||
trace_power_start(POWER_CSTATE, 1);
|
||||
trace_power_start(POWER_CSTATE, 1, smp_processor_id());
|
||||
if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
|
||||
@ -479,7 +479,7 @@ static void mwait_idle(void)
|
||||
*/
|
||||
static void poll_idle(void)
|
||||
{
|
||||
trace_power_start(POWER_CSTATE, 0);
|
||||
trace_power_start(POWER_CSTATE, 0, smp_processor_id());
|
||||
local_irq_enable();
|
||||
while (!need_resched())
|
||||
cpu_relax();
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
|
||||
"cpufreq-core", msg)
|
||||
|
||||
@ -68,7 +70,7 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
|
||||
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
|
||||
|
||||
#define lock_policy_rwsem(mode, cpu) \
|
||||
int lock_policy_rwsem_##mode \
|
||||
static int lock_policy_rwsem_##mode \
|
||||
(int cpu) \
|
||||
{ \
|
||||
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
|
||||
@ -83,26 +85,22 @@ int lock_policy_rwsem_##mode \
|
||||
}
|
||||
|
||||
lock_policy_rwsem(read, cpu);
|
||||
EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
|
||||
|
||||
lock_policy_rwsem(write, cpu);
|
||||
EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
|
||||
|
||||
void unlock_policy_rwsem_read(int cpu)
|
||||
static void unlock_policy_rwsem_read(int cpu)
|
||||
{
|
||||
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
|
||||
BUG_ON(policy_cpu == -1);
|
||||
up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
|
||||
|
||||
void unlock_policy_rwsem_write(int cpu)
|
||||
static void unlock_policy_rwsem_write(int cpu)
|
||||
{
|
||||
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
|
||||
BUG_ON(policy_cpu == -1);
|
||||
up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
|
||||
|
||||
|
||||
/* internal prototypes */
|
||||
@ -354,6 +352,9 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
|
||||
|
||||
case CPUFREQ_POSTCHANGE:
|
||||
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
|
||||
dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
|
||||
(unsigned long)freqs->cpu);
|
||||
trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
|
||||
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
|
||||
CPUFREQ_POSTCHANGE, freqs);
|
||||
if (likely(policy) && likely(policy->cpu == freqs->cpu))
|
||||
@ -1875,8 +1876,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __refdata cpufreq_cpu_notifier =
|
||||
{
|
||||
static struct notifier_block __refdata cpufreq_cpu_notifier = {
|
||||
.notifier_call = cpufreq_cpu_callback,
|
||||
};
|
||||
|
||||
|
@ -459,6 +459,17 @@ static struct attribute_group dbs_attr_group_old = {
|
||||
|
||||
/************************** sysfs end ************************/
|
||||
|
||||
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
|
||||
{
|
||||
if (dbs_tuners_ins.powersave_bias)
|
||||
freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
|
||||
else if (p->cur == p->max)
|
||||
return;
|
||||
|
||||
__cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
|
||||
CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
|
||||
}
|
||||
|
||||
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||
{
|
||||
unsigned int max_load_freq;
|
||||
@ -551,19 +562,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||
|
||||
/* Check for frequency increase */
|
||||
if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
|
||||
/* if we are already at full speed then break out early */
|
||||
if (!dbs_tuners_ins.powersave_bias) {
|
||||
if (policy->cur == policy->max)
|
||||
return;
|
||||
|
||||
__cpufreq_driver_target(policy, policy->max,
|
||||
CPUFREQ_RELATION_H);
|
||||
} else {
|
||||
int freq = powersave_bias_target(policy, policy->max,
|
||||
CPUFREQ_RELATION_H);
|
||||
__cpufreq_driver_target(policy, freq,
|
||||
CPUFREQ_RELATION_L);
|
||||
}
|
||||
dbs_freq_increase(policy, policy->max);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -610,7 +609,9 @@ static void do_dbs_timer(struct work_struct *work)
|
||||
/* We want all CPUs to do sampling nearly on same jiffy */
|
||||
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||
|
||||
delay -= jiffies % delay;
|
||||
if (num_online_cpus() > 1)
|
||||
delay -= jiffies % delay;
|
||||
|
||||
mutex_lock(&dbs_info->timer_mutex);
|
||||
|
||||
/* Common NORMAL_SAMPLE setup */
|
||||
@ -635,7 +636,9 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
|
||||
{
|
||||
/* We want all CPUs to do sampling nearly on same jiffy */
|
||||
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||
delay -= jiffies % delay;
|
||||
|
||||
if (num_online_cpus() > 1)
|
||||
delay -= jiffies % delay;
|
||||
|
||||
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
|
||||
|
@ -95,7 +95,7 @@ static void cpuidle_idle_call(void)
|
||||
/* give the governor an opportunity to reflect on the outcome */
|
||||
if (cpuidle_curr_governor->reflect)
|
||||
cpuidle_curr_governor->reflect(dev);
|
||||
trace_power_end(0);
|
||||
trace_power_end(smp_processor_id());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -231,7 +231,7 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
|
||||
|
||||
stop_critical_timings();
|
||||
#ifndef MODULE
|
||||
trace_power_start(POWER_CSTATE, (eax >> 4) + 1);
|
||||
trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu);
|
||||
#endif
|
||||
if (!need_resched()) {
|
||||
|
||||
|
@ -196,11 +196,6 @@ extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
|
||||
int cpufreq_register_governor(struct cpufreq_governor *governor);
|
||||
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
|
||||
|
||||
int lock_policy_rwsem_read(int cpu);
|
||||
int lock_policy_rwsem_write(int cpu);
|
||||
void unlock_policy_rwsem_read(int cpu);
|
||||
void unlock_policy_rwsem_write(int cpu);
|
||||
|
||||
|
||||
/*********************************************************************
|
||||
* CPUFREQ DRIVER INTERFACE *
|
||||
|
@ -18,52 +18,55 @@ enum {
|
||||
|
||||
DECLARE_EVENT_CLASS(power,
|
||||
|
||||
TP_PROTO(unsigned int type, unsigned int state),
|
||||
TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
|
||||
|
||||
TP_ARGS(type, state),
|
||||
TP_ARGS(type, state, cpu_id),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( u64, type )
|
||||
__field( u64, state )
|
||||
__field( u64, cpu_id )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->type = type;
|
||||
__entry->state = state;
|
||||
__entry->cpu_id = cpu_id;
|
||||
),
|
||||
|
||||
TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state)
|
||||
TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type,
|
||||
(unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(power, power_start,
|
||||
|
||||
TP_PROTO(unsigned int type, unsigned int state),
|
||||
TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
|
||||
|
||||
TP_ARGS(type, state)
|
||||
TP_ARGS(type, state, cpu_id)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(power, power_frequency,
|
||||
|
||||
TP_PROTO(unsigned int type, unsigned int state),
|
||||
TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
|
||||
|
||||
TP_ARGS(type, state)
|
||||
TP_ARGS(type, state, cpu_id)
|
||||
);
|
||||
|
||||
TRACE_EVENT(power_end,
|
||||
|
||||
TP_PROTO(int dummy),
|
||||
TP_PROTO(unsigned int cpu_id),
|
||||
|
||||
TP_ARGS(dummy),
|
||||
TP_ARGS(cpu_id),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( u64, dummy )
|
||||
__field( u64, cpu_id )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dummy = 0xffff;
|
||||
__entry->cpu_id = cpu_id;
|
||||
),
|
||||
|
||||
TP_printk("dummy=%lu", (unsigned long)__entry->dummy)
|
||||
TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
|
||||
|
||||
);
|
||||
|
||||
|
@ -300,8 +300,9 @@ struct trace_entry {
|
||||
|
||||
struct power_entry {
|
||||
struct trace_entry te;
|
||||
s64 type;
|
||||
s64 value;
|
||||
u64 type;
|
||||
u64 value;
|
||||
u64 cpu_id;
|
||||
};
|
||||
|
||||
#define TASK_COMM_LEN 16
|
||||
@ -498,13 +499,13 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
return 0;
|
||||
|
||||
if (strcmp(event_str, "power:power_start") == 0)
|
||||
c_state_start(data.cpu, data.time, pe->value);
|
||||
c_state_start(pe->cpu_id, data.time, pe->value);
|
||||
|
||||
if (strcmp(event_str, "power:power_end") == 0)
|
||||
c_state_end(data.cpu, data.time);
|
||||
c_state_end(pe->cpu_id, data.time);
|
||||
|
||||
if (strcmp(event_str, "power:power_frequency") == 0)
|
||||
p_state_change(data.cpu, data.time, pe->value);
|
||||
p_state_change(pe->cpu_id, data.time, pe->value);
|
||||
|
||||
if (strcmp(event_str, "sched:sched_wakeup") == 0)
|
||||
sched_wakeup(data.cpu, data.time, data.pid, te);
|
||||
|
Loading…
Reference in New Issue
Block a user