Merge branch 'pm-cpufreq'

* pm-cpufreq: (37 commits)
  cpufreq: dt: allow driver to boot automatically
  intel_pstate: Fix overflow in busy_scaled due to long delay
  cpufreq: qoriq: optimize the CPU frequency switching time
  cpufreq: gx-suspmod: Fix two typos in two comments
  cpufreq: nforce2: Fix typo in comment to function nforce2_init()
  cpufreq: governor: Serialize governor callbacks
  cpufreq: governor: split cpufreq_governor_dbs()
  cpufreq: governor: register notifier from cs_init()
  cpufreq: Remove cpufreq_update_policy()
  cpufreq: Restart governor as soon as possible
  cpufreq: Call cpufreq_policy_put_kobj() from cpufreq_policy_free()
  cpufreq: Initialize policy->kobj while allocating policy
  cpufreq: Stop migrating sysfs files on hotplug
  cpufreq: Don't allow updating inactive policies from sysfs
  intel_pstate: Force setting target pstate when required
  intel_pstate: change some inconsistent debug information
  cpufreq: Track cpu managing sysfs kobjects separately
  cpufreq: Fix for typos in two comments
  cpufreq: Mark policy->governor = NULL for inactive policies
  cpufreq: Manage governor usage history with 'policy->last_governor'
  ...
This commit is contained in:
Rafael J. Wysocki 2015-06-19 01:17:50 +02:00
commit 8ced6789da
16 changed files with 699 additions and 532 deletions

View File

@ -196,8 +196,6 @@ affected_cpus : List of Online CPUs that require software
related_cpus : List of Online + Offline CPUs that need software related_cpus : List of Online + Offline CPUs that need software
coordination of frequency. coordination of frequency.
scaling_driver : Hardware driver for cpufreq.
scaling_cur_freq : Current frequency of the CPU as determined by scaling_cur_freq : Current frequency of the CPU as determined by
the governor and cpufreq core, in KHz. This is the governor and cpufreq core, in KHz. This is
the frequency the kernel thinks the CPU runs the frequency the kernel thinks the CPU runs

View File

@ -5,7 +5,7 @@
# big LITTLE core layer and glue drivers # big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver" tristate "Generic ARM big LITTLE CPUfreq driver"
depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK depends on (ARM_CPU_TOPOLOGY || ARM64) && HAVE_CLK
select PM_OPP select PM_OPP
help help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.

View File

@ -31,7 +31,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/topology.h> #include <linux/topology.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/bL_switcher.h>
#include "arm_big_little.h" #include "arm_big_little.h"
@ -41,12 +40,16 @@
#define MAX_CLUSTERS 2 #define MAX_CLUSTERS 2
#ifdef CONFIG_BL_SWITCHER #ifdef CONFIG_BL_SWITCHER
#include <asm/bL_switcher.h>
static bool bL_switching_enabled; static bool bL_switching_enabled;
#define is_bL_switching_enabled() bL_switching_enabled #define is_bL_switching_enabled() bL_switching_enabled
#define set_switching_enabled(x) (bL_switching_enabled = (x)) #define set_switching_enabled(x) (bL_switching_enabled = (x))
#else #else
#define is_bL_switching_enabled() false #define is_bL_switching_enabled() false
#define set_switching_enabled(x) do { } while (0) #define set_switching_enabled(x) do { } while (0)
#define bL_switch_request(...) do { } while (0)
#define bL_switcher_put_enabled() do { } while (0)
#define bL_switcher_get_enabled() do { } while (0)
#endif #endif
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
@ -186,6 +189,15 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
mutex_unlock(&cluster_lock[old_cluster]); mutex_unlock(&cluster_lock[old_cluster]);
} }
/*
* FIXME: clk_set_rate has to handle the case where clk_change_rate
* can fail due to hardware or firmware issues. Until the clk core
* layer is fixed, we can check here. In most of the cases we will
* be reading only the cached value anyway. This needs to be removed
* once clk core is fixed.
*/
if (bL_cpufreq_get_rate(cpu) != new_rate)
return -EIO;
return 0; return 0;
} }
@ -322,7 +334,6 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
{ {
u32 cluster = raw_cpu_to_cluster(cpu_dev->id); u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
char name[14] = "cpu-cluster.";
int ret; int ret;
if (freq_table[cluster]) if (freq_table[cluster])
@ -342,8 +353,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
goto free_opp_table; goto free_opp_table;
} }
name[12] = cluster + '0'; clk[cluster] = clk_get(cpu_dev, NULL);
clk[cluster] = clk_get(cpu_dev, name);
if (!IS_ERR(clk[cluster])) { if (!IS_ERR(clk[cluster])) {
dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
__func__, clk[cluster], freq_table[cluster], __func__, clk[cluster], freq_table[cluster],
@ -506,6 +516,7 @@ static struct cpufreq_driver bL_cpufreq_driver = {
.attr = cpufreq_generic_attr, .attr = cpufreq_generic_attr,
}; };
#ifdef CONFIG_BL_SWITCHER
static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
unsigned long action, void *_arg) unsigned long action, void *_arg)
{ {
@ -538,6 +549,20 @@ static struct notifier_block bL_switcher_notifier = {
.notifier_call = bL_cpufreq_switcher_notifier, .notifier_call = bL_cpufreq_switcher_notifier,
}; };
static int __bLs_register_notifier(void)
{
return bL_switcher_register_notifier(&bL_switcher_notifier);
}
static int __bLs_unregister_notifier(void)
{
return bL_switcher_unregister_notifier(&bL_switcher_notifier);
}
#else
static int __bLs_register_notifier(void) { return 0; }
static int __bLs_unregister_notifier(void) { return 0; }
#endif
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
{ {
int ret, i; int ret, i;
@ -555,8 +580,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
arm_bL_ops = ops; arm_bL_ops = ops;
ret = bL_switcher_get_enabled(); set_switching_enabled(bL_switcher_get_enabled());
set_switching_enabled(ret);
for (i = 0; i < MAX_CLUSTERS; i++) for (i = 0; i < MAX_CLUSTERS; i++)
mutex_init(&cluster_lock[i]); mutex_init(&cluster_lock[i]);
@ -567,7 +591,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
__func__, ops->name, ret); __func__, ops->name, ret);
arm_bL_ops = NULL; arm_bL_ops = NULL;
} else { } else {
ret = bL_switcher_register_notifier(&bL_switcher_notifier); ret = __bLs_register_notifier();
if (ret) { if (ret) {
cpufreq_unregister_driver(&bL_cpufreq_driver); cpufreq_unregister_driver(&bL_cpufreq_driver);
arm_bL_ops = NULL; arm_bL_ops = NULL;
@ -591,7 +615,7 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
} }
bL_switcher_get_enabled(); bL_switcher_get_enabled();
bL_switcher_unregister_notifier(&bL_switcher_notifier); __bLs_unregister_notifier();
cpufreq_unregister_driver(&bL_cpufreq_driver); cpufreq_unregister_driver(&bL_cpufreq_driver);
bL_switcher_put_enabled(); bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__, pr_info("%s: Un-registered platform driver: %s\n", __func__,

View File

@ -416,6 +416,7 @@ static struct platform_driver dt_cpufreq_platdrv = {
}; };
module_platform_driver(dt_cpufreq_platdrv); module_platform_driver(dt_cpufreq_platdrv);
MODULE_ALIAS("platform:cpufreq-dt");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Generic cpufreq driver"); MODULE_DESCRIPTION("Generic cpufreq driver");

View File

@ -414,7 +414,7 @@ static int nforce2_detect_chipset(void)
* nforce2_init - initializes the nForce2 CPUFreq driver * nforce2_init - initializes the nForce2 CPUFreq driver
* *
* Initializes the nForce2 FSB support. Returns -ENODEV on unsupported * Initializes the nForce2 FSB support. Returns -ENODEV on unsupported
* devices, -EINVAL on problems during initiatization, and zero on * devices, -EINVAL on problems during initialization, and zero on
* success. * success.
*/ */
static int __init nforce2_init(void) static int __init nforce2_init(void)

View File

@ -31,10 +31,62 @@
#include <linux/tick.h> #include <linux/tick.h>
#include <trace/events/power.h> #include <trace/events/power.h>
/* Macros to iterate over lists */
/* Iterate over online CPUs policies */
static LIST_HEAD(cpufreq_policy_list); static LIST_HEAD(cpufreq_policy_list);
#define for_each_policy(__policy) \
static inline bool policy_is_inactive(struct cpufreq_policy *policy)
{
return cpumask_empty(policy->cpus);
}
static bool suitable_policy(struct cpufreq_policy *policy, bool active)
{
return active == !policy_is_inactive(policy);
}
/* Finds Next Acive/Inactive policy */
static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
bool active)
{
do {
policy = list_next_entry(policy, policy_list);
/* No more policies in the list */
if (&policy->policy_list == &cpufreq_policy_list)
return NULL;
} while (!suitable_policy(policy, active));
return policy;
}
static struct cpufreq_policy *first_policy(bool active)
{
struct cpufreq_policy *policy;
/* No policies in the list */
if (list_empty(&cpufreq_policy_list))
return NULL;
policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
policy_list);
if (!suitable_policy(policy, active))
policy = next_policy(policy, active);
return policy;
}
/* Macros to iterate over CPU policies */
#define for_each_suitable_policy(__policy, __active) \
for (__policy = first_policy(__active); \
__policy; \
__policy = next_policy(__policy, __active))
#define for_each_active_policy(__policy) \
for_each_suitable_policy(__policy, true)
#define for_each_inactive_policy(__policy) \
for_each_suitable_policy(__policy, false)
#define for_each_policy(__policy) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
/* Iterate over governors */ /* Iterate over governors */
@ -49,13 +101,9 @@ static LIST_HEAD(cpufreq_governor_list);
*/ */
static struct cpufreq_driver *cpufreq_driver; static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
static DEFINE_RWLOCK(cpufreq_driver_lock); static DEFINE_RWLOCK(cpufreq_driver_lock);
DEFINE_MUTEX(cpufreq_governor_lock); DEFINE_MUTEX(cpufreq_governor_lock);
/* This one keeps track of the previously set governor of a removed CPU */
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
/* Flag to suspend/resume CPUFreq governors */ /* Flag to suspend/resume CPUFreq governors */
static bool cpufreq_suspended; static bool cpufreq_suspended;
@ -178,7 +226,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
policy->cpuinfo.transition_latency = transition_latency; policy->cpuinfo.transition_latency = transition_latency;
/* /*
* The driver only supports the SMP configuartion where all processors * The driver only supports the SMP configuration where all processors
* share the clock and voltage and clock. * share the clock and voltage and clock.
*/ */
cpumask_setall(policy->cpus); cpumask_setall(policy->cpus);
@ -187,10 +235,18 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
} }
EXPORT_SYMBOL_GPL(cpufreq_generic_init); EXPORT_SYMBOL_GPL(cpufreq_generic_init);
unsigned int cpufreq_generic_get(unsigned int cpu) /* Only for cpufreq core internal use */
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{ {
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
}
unsigned int cpufreq_generic_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
if (!policy || IS_ERR(policy->clk)) { if (!policy || IS_ERR(policy->clk)) {
pr_err("%s: No %s associated to cpu: %d\n", pr_err("%s: No %s associated to cpu: %d\n",
__func__, policy ? "clk" : "policy", cpu); __func__, policy ? "clk" : "policy", cpu);
@ -201,18 +257,29 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
} }
EXPORT_SYMBOL_GPL(cpufreq_generic_get); EXPORT_SYMBOL_GPL(cpufreq_generic_get);
/* Only for cpufreq core internal use */ /**
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
{ *
return per_cpu(cpufreq_cpu_data, cpu); * @cpu: cpu to find policy for.
} *
* This returns policy for 'cpu', returns NULL if it doesn't exist.
* It also increments the kobject reference count to mark it busy and so would
* require a corresponding call to cpufreq_cpu_put() to decrement it back.
* If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
* freed as that depends on the kobj count.
*
* It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
* valid policy is found. This is done to make sure the driver doesn't get
* unregistered while the policy is being used.
*
* Return: A valid policy on success, otherwise NULL on failure.
*/
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{ {
struct cpufreq_policy *policy = NULL; struct cpufreq_policy *policy = NULL;
unsigned long flags; unsigned long flags;
if (cpu >= nr_cpu_ids) if (WARN_ON(cpu >= nr_cpu_ids))
return NULL; return NULL;
if (!down_read_trylock(&cpufreq_rwsem)) if (!down_read_trylock(&cpufreq_rwsem))
@ -223,7 +290,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
if (cpufreq_driver) { if (cpufreq_driver) {
/* get the CPU */ /* get the CPU */
policy = per_cpu(cpufreq_cpu_data, cpu); policy = cpufreq_cpu_get_raw(cpu);
if (policy) if (policy)
kobject_get(&policy->kobj); kobject_get(&policy->kobj);
} }
@ -237,6 +304,16 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
} }
EXPORT_SYMBOL_GPL(cpufreq_cpu_get); EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
/**
* cpufreq_cpu_put: Decrements the usage count of a policy
*
* @policy: policy earlier returned by cpufreq_cpu_get().
*
* This decrements the kobject reference count incremented earlier by calling
* cpufreq_cpu_get().
*
* It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
*/
void cpufreq_cpu_put(struct cpufreq_policy *policy) void cpufreq_cpu_put(struct cpufreq_policy *policy)
{ {
kobject_put(&policy->kobj); kobject_put(&policy->kobj);
@ -798,11 +875,18 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
down_write(&policy->rwsem); down_write(&policy->rwsem);
/* Updating inactive policies is invalid, so avoid doing that. */
if (unlikely(policy_is_inactive(policy))) {
ret = -EBUSY;
goto unlock_policy_rwsem;
}
if (fattr->store) if (fattr->store)
ret = fattr->store(policy, buf, count); ret = fattr->store(policy, buf, count);
else else
ret = -EIO; ret = -EIO;
unlock_policy_rwsem:
up_write(&policy->rwsem); up_write(&policy->rwsem);
up_read(&cpufreq_rwsem); up_read(&cpufreq_rwsem);
@ -873,28 +957,67 @@ void cpufreq_sysfs_remove_file(const struct attribute *attr)
} }
EXPORT_SYMBOL(cpufreq_sysfs_remove_file); EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
/* symlink affected CPUs */ static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
{
struct device *cpu_dev;
pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
if (!policy)
return 0;
cpu_dev = get_cpu_device(cpu);
if (WARN_ON(!cpu_dev))
return 0;
return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
}
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
{
struct device *cpu_dev;
pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
cpu_dev = get_cpu_device(cpu);
if (WARN_ON(!cpu_dev))
return;
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
}
/* Add/remove symlinks for all related CPUs */
static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
{ {
unsigned int j; unsigned int j;
int ret = 0; int ret = 0;
for_each_cpu(j, policy->cpus) { /* Some related CPUs might not be present (physically hotplugged) */
struct device *cpu_dev; for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
if (j == policy->kobj_cpu)
if (j == policy->cpu)
continue; continue;
pr_debug("Adding link for CPU: %u\n", j); ret = add_cpu_dev_symlink(policy, j);
cpu_dev = get_cpu_device(j);
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
if (ret) if (ret)
break; break;
} }
return ret; return ret;
} }
static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
{
unsigned int j;
/* Some related CPUs might not be present (physically hotplugged) */
for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
if (j == policy->kobj_cpu)
continue;
remove_cpu_dev_symlink(policy, j);
}
}
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
struct device *dev) struct device *dev)
{ {
@ -937,7 +1060,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
memcpy(&new_policy, policy, sizeof(*policy)); memcpy(&new_policy, policy, sizeof(*policy));
/* Update governor of new_policy to the governor used before hotplug */ /* Update governor of new_policy to the governor used before hotplug */
gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); gov = find_governor(policy->last_governor);
if (gov) if (gov)
pr_debug("Restoring governor %s for cpu %d\n", pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, policy->cpu); policy->governor->name, policy->cpu);
@ -963,7 +1086,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
unsigned int cpu, struct device *dev) unsigned int cpu, struct device *dev)
{ {
int ret = 0; int ret = 0;
unsigned long flags;
/* Has this CPU been taken care of already? */
if (cpumask_test_cpu(cpu, policy->cpus))
return 0;
if (has_target()) { if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
@ -974,13 +1100,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
} }
down_write(&policy->rwsem); down_write(&policy->rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpumask_set_cpu(cpu, policy->cpus); cpumask_set_cpu(cpu, policy->cpus);
per_cpu(cpufreq_cpu_data, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
up_write(&policy->rwsem); up_write(&policy->rwsem);
if (has_target()) { if (has_target()) {
@ -994,7 +1114,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
} }
} }
return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); return 0;
} }
static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
@ -1003,20 +1123,25 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
unsigned long flags; unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags); read_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
read_unlock_irqrestore(&cpufreq_driver_lock, flags); read_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (policy) if (likely(policy)) {
policy->governor = NULL; /* Policy should be inactive here */
WARN_ON(!policy_is_inactive(policy));
down_write(&policy->rwsem);
policy->cpu = cpu;
up_write(&policy->rwsem);
}
return policy; return policy;
} }
static struct cpufreq_policy *cpufreq_policy_alloc(void) static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
{ {
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
int ret;
policy = kzalloc(sizeof(*policy), GFP_KERNEL); policy = kzalloc(sizeof(*policy), GFP_KERNEL);
if (!policy) if (!policy)
@ -1028,6 +1153,13 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask; goto err_free_cpumask;
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
"cpufreq");
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
goto err_free_rcpumask;
}
INIT_LIST_HEAD(&policy->policy_list); INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem); init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock); spin_lock_init(&policy->transition_lock);
@ -1035,8 +1167,15 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
init_completion(&policy->kobj_unregister); init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update); INIT_WORK(&policy->update, handle_update);
policy->cpu = dev->id;
/* Set this once on allocation */
policy->kobj_cpu = dev->id;
return policy; return policy;
err_free_rcpumask:
free_cpumask_var(policy->related_cpus);
err_free_cpumask: err_free_cpumask:
free_cpumask_var(policy->cpus); free_cpumask_var(policy->cpus);
err_free_policy: err_free_policy:
@ -1045,18 +1184,20 @@ err_free_policy:
return NULL; return NULL;
} }
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
{ {
struct kobject *kobj; struct kobject *kobj;
struct completion *cmp; struct completion *cmp;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, if (notify)
CPUFREQ_REMOVE_POLICY, policy); blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy);
down_read(&policy->rwsem); down_write(&policy->rwsem);
cpufreq_remove_dev_symlink(policy);
kobj = &policy->kobj; kobj = &policy->kobj;
cmp = &policy->kobj_unregister; cmp = &policy->kobj_unregister;
up_read(&policy->rwsem); up_write(&policy->rwsem);
kobject_put(kobj); kobject_put(kobj);
/* /*
@ -1069,68 +1210,64 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
pr_debug("wait complete\n"); pr_debug("wait complete\n");
} }
static void cpufreq_policy_free(struct cpufreq_policy *policy) static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
{ {
unsigned long flags;
int cpu;
/* Remove policy from list */
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_del(&policy->policy_list);
for_each_cpu(cpu, policy->related_cpus)
per_cpu(cpufreq_cpu_data, cpu) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpufreq_policy_put_kobj(policy, notify);
free_cpumask_var(policy->related_cpus); free_cpumask_var(policy->related_cpus);
free_cpumask_var(policy->cpus); free_cpumask_var(policy->cpus);
kfree(policy); kfree(policy);
} }
static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, /**
struct device *cpu_dev) * cpufreq_add_dev - add a CPU device
{ *
int ret; * Adds the cpufreq interface for a CPU device.
*
if (WARN_ON(cpu == policy->cpu)) * The Oracle says: try running cpufreq registration/unregistration concurrently
return 0; * with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
/* Move kobject to the new policy->cpu */ */
ret = kobject_move(&policy->kobj, &cpu_dev->kobj); static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
if (ret) {
pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
return ret;
}
down_write(&policy->rwsem);
policy->cpu = cpu;
up_write(&policy->rwsem);
return 0;
}
static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{ {
unsigned int j, cpu = dev->id; unsigned int j, cpu = dev->id;
int ret = -ENOMEM; int ret = -ENOMEM;
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
unsigned long flags; unsigned long flags;
bool recover_policy = cpufreq_suspended; bool recover_policy = !sif;
if (cpu_is_offline(cpu))
return 0;
pr_debug("adding CPU %u\n", cpu); pr_debug("adding CPU %u\n", cpu);
/* check whether a different CPU already registered this /*
* CPU because it is in the same boat. */ * Only possible if 'cpu' wasn't physically present earlier and we are
policy = cpufreq_cpu_get_raw(cpu); * here from subsys_interface add callback. A hotplug notifier will
if (unlikely(policy)) * follow and we will handle it like logical CPU hotplug then. For now,
return 0; * just create the sysfs link.
*/
if (cpu_is_offline(cpu))
return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
if (!down_read_trylock(&cpufreq_rwsem)) if (!down_read_trylock(&cpufreq_rwsem))
return 0; return 0;
/* Check if this cpu was hot-unplugged earlier and has siblings */ /* Check if this CPU already has a policy to manage it */
read_lock_irqsave(&cpufreq_driver_lock, flags); policy = per_cpu(cpufreq_cpu_data, cpu);
for_each_policy(policy) { if (policy && !policy_is_inactive(policy)) {
if (cpumask_test_cpu(cpu, policy->related_cpus)) { WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
read_unlock_irqrestore(&cpufreq_driver_lock, flags); ret = cpufreq_add_policy_cpu(policy, cpu, dev);
ret = cpufreq_add_policy_cpu(policy, cpu, dev); up_read(&cpufreq_rwsem);
up_read(&cpufreq_rwsem); return ret;
return ret;
}
} }
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
/* /*
* Restore the saved policy when doing light-weight init and fall back * Restore the saved policy when doing light-weight init and fall back
@ -1139,22 +1276,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
if (!policy) { if (!policy) {
recover_policy = false; recover_policy = false;
policy = cpufreq_policy_alloc(); policy = cpufreq_policy_alloc(dev);
if (!policy) if (!policy)
goto nomem_out; goto nomem_out;
} }
/*
* In the resume path, since we restore a saved policy, the assignment
* to policy->cpu is like an update of the existing policy, rather than
* the creation of a brand new one. So we need to perform this update
* by invoking update_policy_cpu().
*/
if (recover_policy && cpu != policy->cpu)
WARN_ON(update_policy_cpu(policy, cpu, dev));
else
policy->cpu = cpu;
cpumask_copy(policy->cpus, cpumask_of(cpu)); cpumask_copy(policy->cpus, cpumask_of(cpu));
/* call driver. From then on the cpufreq must be able /* call driver. From then on the cpufreq must be able
@ -1181,21 +1307,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
policy->user_policy.min = policy->min; policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max; policy->user_policy.max = policy->max;
/* prepare interface data */ write_lock_irqsave(&cpufreq_driver_lock, flags);
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, for_each_cpu(j, policy->related_cpus)
&dev->kobj, "cpufreq"); per_cpu(cpufreq_cpu_data, j) = policy;
if (ret) { write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: failed to init policy->kobj: %d\n",
__func__, ret);
goto err_init_policy_kobj;
}
} }
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
policy->cur = cpufreq_driver->get(policy->cpu); policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) { if (!policy->cur) {
@ -1253,11 +1370,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto err_out_unregister; goto err_out_unregister;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy); CPUFREQ_CREATE_POLICY, policy);
}
write_lock_irqsave(&cpufreq_driver_lock, flags); write_lock_irqsave(&cpufreq_driver_lock, flags);
list_add(&policy->policy_list, &cpufreq_policy_list); list_add(&policy->policy_list, &cpufreq_policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
}
cpufreq_init_policy(policy); cpufreq_init_policy(policy);
@ -1281,68 +1398,28 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
err_out_unregister: err_out_unregister:
err_get_freq: err_get_freq:
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!recover_policy) {
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
}
err_init_policy_kobj:
up_write(&policy->rwsem); up_write(&policy->rwsem);
if (cpufreq_driver->exit) if (cpufreq_driver->exit)
cpufreq_driver->exit(policy); cpufreq_driver->exit(policy);
err_set_policy_cpu: err_set_policy_cpu:
if (recover_policy) { cpufreq_policy_free(policy, recover_policy);
/* Do not leave stale fallback data behind. */
per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
cpufreq_policy_put_kobj(policy);
}
cpufreq_policy_free(policy);
nomem_out: nomem_out:
up_read(&cpufreq_rwsem); up_read(&cpufreq_rwsem);
return ret; return ret;
} }
/**
* cpufreq_add_dev - add a CPU device
*
* Adds the cpufreq interface for a CPU device.
*
* The Oracle says: try running cpufreq registration/unregistration concurrently
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
return __cpufreq_add_dev(dev, sif);
}
static int __cpufreq_remove_dev_prepare(struct device *dev, static int __cpufreq_remove_dev_prepare(struct device *dev,
struct subsys_interface *sif) struct subsys_interface *sif)
{ {
unsigned int cpu = dev->id, cpus; unsigned int cpu = dev->id;
int ret; int ret = 0;
unsigned long flags;
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu); pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
write_lock_irqsave(&cpufreq_driver_lock, flags); policy = cpufreq_cpu_get_raw(cpu);
policy = per_cpu(cpufreq_cpu_data, cpu);
/* Save the policy somewhere when doing a light-weight tear-down */
if (cpufreq_suspended)
per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!policy) { if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__); pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL; return -EINVAL;
@ -1354,108 +1431,75 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
pr_err("%s: Failed to stop governor\n", __func__); pr_err("%s: Failed to stop governor\n", __func__);
return ret; return ret;
} }
strncpy(per_cpu(cpufreq_cpu_governor, cpu),
policy->governor->name, CPUFREQ_NAME_LEN);
} }
down_read(&policy->rwsem); down_write(&policy->rwsem);
cpus = cpumask_weight(policy->cpus); cpumask_clear_cpu(cpu, policy->cpus);
up_read(&policy->rwsem);
if (cpu != policy->cpu) { if (policy_is_inactive(policy)) {
sysfs_remove_link(&dev->kobj, "cpufreq"); if (has_target())
} else if (cpus > 1) { strncpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
} else if (cpu == policy->cpu) {
/* Nominate new CPU */ /* Nominate new CPU */
int new_cpu = cpumask_any_but(policy->cpus, cpu); policy->cpu = cpumask_any(policy->cpus);
struct device *cpu_dev = get_cpu_device(new_cpu); }
up_write(&policy->rwsem);
sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); /* Start governor again for active policy */
ret = update_policy_cpu(policy, new_cpu, cpu_dev); if (!policy_is_inactive(policy)) {
if (ret) { if (has_target()) {
if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj, ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
"cpufreq")) if (!ret)
pr_err("%s: Failed to restore kobj link to cpu:%d\n", ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
__func__, cpu_dev->id);
return ret; if (ret)
pr_err("%s: Failed to start governor\n", __func__);
} }
if (!cpufreq_suspended)
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
__func__, new_cpu, cpu);
} else if (cpufreq_driver->stop_cpu) { } else if (cpufreq_driver->stop_cpu) {
cpufreq_driver->stop_cpu(policy); cpufreq_driver->stop_cpu(policy);
} }
return 0; return ret;
} }
static int __cpufreq_remove_dev_finish(struct device *dev, static int __cpufreq_remove_dev_finish(struct device *dev,
struct subsys_interface *sif) struct subsys_interface *sif)
{ {
unsigned int cpu = dev->id, cpus; unsigned int cpu = dev->id;
int ret; int ret;
unsigned long flags; struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
struct cpufreq_policy *policy;
write_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
per_cpu(cpufreq_cpu_data, cpu) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!policy) { if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__); pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL; return -EINVAL;
} }
down_write(&policy->rwsem); /* Only proceed for inactive policies */
cpus = cpumask_weight(policy->cpus); if (!policy_is_inactive(policy))
return 0;
if (cpus > 1)
cpumask_clear_cpu(cpu, policy->cpus);
up_write(&policy->rwsem);
/* If cpu is last user of policy, free policy */ /* If cpu is last user of policy, free policy */
if (cpus == 1) { if (has_target()) {
if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
ret = __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
if (ret) {
pr_err("%s: Failed to exit governor\n",
__func__);
return ret;
}
}
if (!cpufreq_suspended)
cpufreq_policy_put_kobj(policy);
/*
* Perform the ->exit() even during light-weight tear-down,
* since this is a core component, and is essential for the
* subsequent light-weight ->init() to succeed.
*/
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
/* Remove policy from list of active policies */
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_del(&policy->policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!cpufreq_suspended)
cpufreq_policy_free(policy);
} else if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
if (!ret)
ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
if (ret) { if (ret) {
pr_err("%s: Failed to start governor\n", __func__); pr_err("%s: Failed to exit governor\n", __func__);
return ret; return ret;
} }
} }
/*
* Perform the ->exit() even during light-weight tear-down,
* since this is a core component, and is essential for the
* subsequent light-weight ->init() to succeed.
*/
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
/* Free the policy only if the driver is getting removed. */
if (sif)
cpufreq_policy_free(policy, true);
return 0; return 0;
} }
@ -1469,8 +1513,33 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
unsigned int cpu = dev->id; unsigned int cpu = dev->id;
int ret; int ret;
if (cpu_is_offline(cpu)) /*
* Only possible if 'cpu' is getting physically removed now. A hotplug
* notifier should have already been called and we just need to remove
* link or free policy here.
*/
if (cpu_is_offline(cpu)) {
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
struct cpumask mask;
if (!policy)
return 0;
cpumask_copy(&mask, policy->related_cpus);
cpumask_clear_cpu(cpu, &mask);
/*
* Free policy only if all policy->related_cpus are removed
* physically.
*/
if (cpumask_intersects(&mask, cpu_present_mask)) {
remove_cpu_dev_symlink(policy, cpu);
return 0;
}
cpufreq_policy_free(policy, true);
return 0; return 0;
}
ret = __cpufreq_remove_dev_prepare(dev, sif); ret = __cpufreq_remove_dev_prepare(dev, sif);
@ -1567,6 +1636,10 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
ret_freq = cpufreq_driver->get(policy->cpu); ret_freq = cpufreq_driver->get(policy->cpu);
/* Updating inactive policies is invalid, so avoid doing that. */
if (unlikely(policy_is_inactive(policy)))
return ret_freq;
if (ret_freq && policy->cur && if (ret_freq && policy->cur &&
!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
/* verify no discrepancy between actual and /* verify no discrepancy between actual and
@ -1656,7 +1729,7 @@ void cpufreq_suspend(void)
pr_debug("%s: Suspending Governors\n", __func__); pr_debug("%s: Suspending Governors\n", __func__);
for_each_policy(policy) { for_each_active_policy(policy) {
if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
pr_err("%s: Failed to stop governor for policy: %p\n", pr_err("%s: Failed to stop governor for policy: %p\n",
__func__, policy); __func__, policy);
@ -1690,7 +1763,7 @@ void cpufreq_resume(void)
pr_debug("%s: Resuming Governors\n", __func__); pr_debug("%s: Resuming Governors\n", __func__);
for_each_policy(policy) { for_each_active_policy(policy) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
pr_err("%s: Failed to resume driver: %p\n", __func__, pr_err("%s: Failed to resume driver: %p\n", __func__,
policy); policy);
@ -1891,7 +1964,7 @@ static int __target_index(struct cpufreq_policy *policy,
* Failed after setting to intermediate freq? Driver should have * Failed after setting to intermediate freq? Driver should have
* reverted back to initial frequency and so should we. Check * reverted back to initial frequency and so should we. Check
* here for intermediate_freq instead of get_intermediate, in * here for intermediate_freq instead of get_intermediate, in
* case we have't switched to intermediate freq at all. * case we haven't switched to intermediate freq at all.
*/ */
if (unlikely(retval && intermediate_freq)) { if (unlikely(retval && intermediate_freq)) {
freqs.old = intermediate_freq; freqs.old = intermediate_freq;
@ -2092,7 +2165,8 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor) void cpufreq_unregister_governor(struct cpufreq_governor *governor)
{ {
int cpu; struct cpufreq_policy *policy;
unsigned long flags;
if (!governor) if (!governor)
return; return;
@ -2100,12 +2174,15 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
if (cpufreq_disabled()) if (cpufreq_disabled())
return; return;
for_each_present_cpu(cpu) { /* clear last_governor for all inactive policies */
if (cpu_online(cpu)) read_lock_irqsave(&cpufreq_driver_lock, flags);
continue; for_each_inactive_policy(policy) {
if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) if (!strcmp(policy->last_governor, governor->name)) {
strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); policy->governor = NULL;
strcpy(policy->last_governor, "\0");
}
} }
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
mutex_lock(&cpufreq_governor_mutex); mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list); list_del(&governor->governor_list);
@ -2304,7 +2381,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
if (dev) { if (dev) {
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE: case CPU_ONLINE:
__cpufreq_add_dev(dev, NULL); cpufreq_add_dev(dev, NULL);
break; break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
@ -2316,7 +2393,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
break; break;
case CPU_DOWN_FAILED: case CPU_DOWN_FAILED:
__cpufreq_add_dev(dev, NULL); cpufreq_add_dev(dev, NULL);
break; break;
} }
} }
@ -2336,7 +2413,7 @@ static int cpufreq_boost_set_sw(int state)
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
int ret = -EINVAL; int ret = -EINVAL;
for_each_policy(policy) { for_each_active_policy(policy) {
freq_table = cpufreq_frequency_get_table(policy->cpu); freq_table = cpufreq_frequency_get_table(policy->cpu);
if (freq_table) { if (freq_table) {
ret = cpufreq_frequency_table_cpuinfo(policy, ret = cpufreq_frequency_table_cpuinfo(policy,

View File

@ -148,6 +148,10 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
return 0; return 0;
} }
static struct notifier_block cs_cpufreq_notifier_block = {
.notifier_call = dbs_cpufreq_notifier,
};
/************************** sysfs interface ************************/ /************************** sysfs interface ************************/
static struct common_dbs_data cs_dbs_cdata; static struct common_dbs_data cs_dbs_cdata;
@ -317,7 +321,7 @@ static struct attribute_group cs_attr_group_gov_pol = {
/************************** sysfs end ************************/ /************************** sysfs end ************************/
static int cs_init(struct dbs_data *dbs_data) static int cs_init(struct dbs_data *dbs_data, bool notify)
{ {
struct cs_dbs_tuners *tuners; struct cs_dbs_tuners *tuners;
@ -336,25 +340,25 @@ static int cs_init(struct dbs_data *dbs_data)
dbs_data->tuners = tuners; dbs_data->tuners = tuners;
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10); jiffies_to_usecs(10);
mutex_init(&dbs_data->mutex);
if (notify)
cpufreq_register_notifier(&cs_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
return 0; return 0;
} }
static void cs_exit(struct dbs_data *dbs_data) static void cs_exit(struct dbs_data *dbs_data, bool notify)
{ {
if (notify)
cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
kfree(dbs_data->tuners); kfree(dbs_data->tuners);
} }
define_get_cpu_dbs_routines(cs_cpu_dbs_info); define_get_cpu_dbs_routines(cs_cpu_dbs_info);
static struct notifier_block cs_cpufreq_notifier_block = {
.notifier_call = dbs_cpufreq_notifier,
};
static struct cs_ops cs_ops = {
.notifier_block = &cs_cpufreq_notifier_block,
};
static struct common_dbs_data cs_dbs_cdata = { static struct common_dbs_data cs_dbs_cdata = {
.governor = GOV_CONSERVATIVE, .governor = GOV_CONSERVATIVE,
.attr_group_gov_sys = &cs_attr_group_gov_sys, .attr_group_gov_sys = &cs_attr_group_gov_sys,
@ -363,9 +367,9 @@ static struct common_dbs_data cs_dbs_cdata = {
.get_cpu_dbs_info_s = get_cpu_dbs_info_s, .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
.gov_dbs_timer = cs_dbs_timer, .gov_dbs_timer = cs_dbs_timer,
.gov_check_cpu = cs_check_cpu, .gov_check_cpu = cs_check_cpu,
.gov_ops = &cs_ops,
.init = cs_init, .init = cs_init,
.exit = cs_exit, .exit = cs_exit,
.mutex = __MUTEX_INITIALIZER(cs_dbs_cdata.mutex),
}; };
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,

View File

@ -239,211 +239,242 @@ static void set_sampling_rate(struct dbs_data *dbs_data,
} }
} }
static int cpufreq_governor_init(struct cpufreq_policy *policy,
struct dbs_data *dbs_data,
struct common_dbs_data *cdata)
{
unsigned int latency;
int ret;
if (dbs_data) {
if (WARN_ON(have_governor_per_policy()))
return -EINVAL;
dbs_data->usage_count++;
policy->governor_data = dbs_data;
return 0;
}
dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
if (!dbs_data)
return -ENOMEM;
dbs_data->cdata = cdata;
dbs_data->usage_count = 1;
ret = cdata->init(dbs_data, !policy->governor->initialized);
if (ret)
goto free_dbs_data;
/* policy latency is in ns. Convert it to us first */
latency = policy->cpuinfo.transition_latency / 1000;
if (latency == 0)
latency = 1;
/* Bring kernel and HW constraints together */
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
MIN_LATENCY_MULTIPLIER * latency);
set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
latency * LATENCY_MULTIPLIER));
if (!have_governor_per_policy()) {
if (WARN_ON(cpufreq_get_global_kobject())) {
ret = -EINVAL;
goto cdata_exit;
}
cdata->gdbs_data = dbs_data;
}
ret = sysfs_create_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
if (ret)
goto put_kobj;
policy->governor_data = dbs_data;
return 0;
put_kobj:
if (!have_governor_per_policy()) {
cdata->gdbs_data = NULL;
cpufreq_put_global_kobject();
}
cdata_exit:
cdata->exit(dbs_data, !policy->governor->initialized);
free_dbs_data:
kfree(dbs_data);
return ret;
}
static void cpufreq_governor_exit(struct cpufreq_policy *policy,
struct dbs_data *dbs_data)
{
struct common_dbs_data *cdata = dbs_data->cdata;
policy->governor_data = NULL;
if (!--dbs_data->usage_count) {
sysfs_remove_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
if (!have_governor_per_policy()) {
cdata->gdbs_data = NULL;
cpufreq_put_global_kobject();
}
cdata->exit(dbs_data, policy->governor->initialized == 1);
kfree(dbs_data);
}
}
static int cpufreq_governor_start(struct cpufreq_policy *policy,
struct dbs_data *dbs_data)
{
struct common_dbs_data *cdata = dbs_data->cdata;
unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
int io_busy = 0;
if (!policy->cur)
return -EINVAL;
if (cdata->governor == GOV_CONSERVATIVE) {
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
sampling_rate = cs_tuners->sampling_rate;
ignore_nice = cs_tuners->ignore_nice_load;
} else {
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
sampling_rate = od_tuners->sampling_rate;
ignore_nice = od_tuners->ignore_nice_load;
io_busy = od_tuners->io_is_busy;
}
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j);
unsigned int prev_load;
j_cdbs->cpu = j;
j_cdbs->cur_policy = policy;
j_cdbs->prev_cpu_idle =
get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
j_cdbs->prev_cpu_idle);
j_cdbs->prev_load = 100 * prev_load /
(unsigned int)j_cdbs->prev_cpu_wall;
if (ignore_nice)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
mutex_init(&j_cdbs->timer_mutex);
INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer);
}
if (cdata->governor == GOV_CONSERVATIVE) {
struct cs_cpu_dbs_info_s *cs_dbs_info =
cdata->get_cpu_dbs_info_s(cpu);
cs_dbs_info->down_skip = 0;
cs_dbs_info->enable = 1;
cs_dbs_info->requested_freq = policy->cur;
} else {
struct od_ops *od_ops = cdata->gov_ops;
struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
od_dbs_info->rate_mult = 1;
od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
od_ops->powersave_bias_init_cpu(cpu);
}
/* Initiate timer time stamp */
cpu_cdbs->time_stamp = ktime_get();
gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
true);
return 0;
}
static void cpufreq_governor_stop(struct cpufreq_policy *policy,
struct dbs_data *dbs_data)
{
struct common_dbs_data *cdata = dbs_data->cdata;
unsigned int cpu = policy->cpu;
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
if (cdata->governor == GOV_CONSERVATIVE) {
struct cs_cpu_dbs_info_s *cs_dbs_info =
cdata->get_cpu_dbs_info_s(cpu);
cs_dbs_info->enable = 0;
}
gov_cancel_work(dbs_data, policy);
mutex_destroy(&cpu_cdbs->timer_mutex);
cpu_cdbs->cur_policy = NULL;
}
static void cpufreq_governor_limits(struct cpufreq_policy *policy,
struct dbs_data *dbs_data)
{
struct common_dbs_data *cdata = dbs_data->cdata;
unsigned int cpu = policy->cpu;
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
if (!cpu_cdbs->cur_policy)
return;
mutex_lock(&cpu_cdbs->timer_mutex);
if (policy->max < cpu_cdbs->cur_policy->cur)
__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max,
CPUFREQ_RELATION_H);
else if (policy->min > cpu_cdbs->cur_policy->cur)
__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min,
CPUFREQ_RELATION_L);
dbs_check_cpu(dbs_data, cpu);
mutex_unlock(&cpu_cdbs->timer_mutex);
}
int cpufreq_governor_dbs(struct cpufreq_policy *policy, int cpufreq_governor_dbs(struct cpufreq_policy *policy,
struct common_dbs_data *cdata, unsigned int event) struct common_dbs_data *cdata, unsigned int event)
{ {
struct dbs_data *dbs_data; struct dbs_data *dbs_data;
struct od_cpu_dbs_info_s *od_dbs_info = NULL; int ret = 0;
struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
struct od_ops *od_ops = NULL; /* Lock governor to block concurrent initialization of governor */
struct od_dbs_tuners *od_tuners = NULL; mutex_lock(&cdata->mutex);
struct cs_dbs_tuners *cs_tuners = NULL;
struct cpu_dbs_common_info *cpu_cdbs;
unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
int io_busy = 0;
int rc;
if (have_governor_per_policy()) if (have_governor_per_policy())
dbs_data = policy->governor_data; dbs_data = policy->governor_data;
else else
dbs_data = cdata->gdbs_data; dbs_data = cdata->gdbs_data;
WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)); if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
ret = -EINVAL;
goto unlock;
}
switch (event) { switch (event) {
case CPUFREQ_GOV_POLICY_INIT: case CPUFREQ_GOV_POLICY_INIT:
if (have_governor_per_policy()) { ret = cpufreq_governor_init(policy, dbs_data, cdata);
WARN_ON(dbs_data); break;
} else if (dbs_data) {
dbs_data->usage_count++;
policy->governor_data = dbs_data;
return 0;
}
dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
if (!dbs_data) {
pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
return -ENOMEM;
}
dbs_data->cdata = cdata;
dbs_data->usage_count = 1;
rc = cdata->init(dbs_data);
if (rc) {
pr_err("%s: POLICY_INIT: init() failed\n", __func__);
kfree(dbs_data);
return rc;
}
if (!have_governor_per_policy())
WARN_ON(cpufreq_get_global_kobject());
rc = sysfs_create_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
if (rc) {
cdata->exit(dbs_data);
kfree(dbs_data);
return rc;
}
policy->governor_data = dbs_data;
/* policy latency is in ns. Convert it to us first */
latency = policy->cpuinfo.transition_latency / 1000;
if (latency == 0)
latency = 1;
/* Bring kernel and HW constraints together */
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
MIN_LATENCY_MULTIPLIER * latency);
set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
latency * LATENCY_MULTIPLIER));
if ((cdata->governor == GOV_CONSERVATIVE) &&
(!policy->governor->initialized)) {
struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
cpufreq_register_notifier(cs_ops->notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
if (!have_governor_per_policy())
cdata->gdbs_data = dbs_data;
return 0;
case CPUFREQ_GOV_POLICY_EXIT: case CPUFREQ_GOV_POLICY_EXIT:
if (!--dbs_data->usage_count) { cpufreq_governor_exit(policy, dbs_data);
sysfs_remove_group(get_governor_parent_kobj(policy), break;
get_sysfs_attr(dbs_data));
if (!have_governor_per_policy())
cpufreq_put_global_kobject();
if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
(policy->governor->initialized == 1)) {
struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
cpufreq_unregister_notifier(cs_ops->notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
cdata->exit(dbs_data);
kfree(dbs_data);
cdata->gdbs_data = NULL;
}
policy->governor_data = NULL;
return 0;
}
cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
cs_tuners = dbs_data->tuners;
cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = cs_tuners->sampling_rate;
ignore_nice = cs_tuners->ignore_nice_load;
} else {
od_tuners = dbs_data->tuners;
od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = od_tuners->sampling_rate;
ignore_nice = od_tuners->ignore_nice_load;
od_ops = dbs_data->cdata->gov_ops;
io_busy = od_tuners->io_is_busy;
}
switch (event) {
case CPUFREQ_GOV_START: case CPUFREQ_GOV_START:
if (!policy->cur) ret = cpufreq_governor_start(policy, dbs_data);
return -EINVAL;
mutex_lock(&dbs_data->mutex);
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs =
dbs_data->cdata->get_cpu_cdbs(j);
unsigned int prev_load;
j_cdbs->cpu = j;
j_cdbs->cur_policy = policy;
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
&j_cdbs->prev_cpu_wall, io_busy);
prev_load = (unsigned int)
(j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle);
j_cdbs->prev_load = 100 * prev_load /
(unsigned int) j_cdbs->prev_cpu_wall;
if (ignore_nice)
j_cdbs->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
mutex_init(&j_cdbs->timer_mutex);
INIT_DEFERRABLE_WORK(&j_cdbs->work,
dbs_data->cdata->gov_dbs_timer);
}
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
cs_dbs_info->down_skip = 0;
cs_dbs_info->enable = 1;
cs_dbs_info->requested_freq = policy->cur;
} else {
od_dbs_info->rate_mult = 1;
od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
od_ops->powersave_bias_init_cpu(cpu);
}
mutex_unlock(&dbs_data->mutex);
/* Initiate timer time stamp */
cpu_cdbs->time_stamp = ktime_get();
gov_queue_work(dbs_data, policy,
delay_for_sampling_rate(sampling_rate), true);
break; break;
case CPUFREQ_GOV_STOP: case CPUFREQ_GOV_STOP:
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) cpufreq_governor_stop(policy, dbs_data);
cs_dbs_info->enable = 0;
gov_cancel_work(dbs_data, policy);
mutex_lock(&dbs_data->mutex);
mutex_destroy(&cpu_cdbs->timer_mutex);
cpu_cdbs->cur_policy = NULL;
mutex_unlock(&dbs_data->mutex);
break; break;
case CPUFREQ_GOV_LIMITS: case CPUFREQ_GOV_LIMITS:
mutex_lock(&dbs_data->mutex); cpufreq_governor_limits(policy, dbs_data);
if (!cpu_cdbs->cur_policy) {
mutex_unlock(&dbs_data->mutex);
break;
}
mutex_lock(&cpu_cdbs->timer_mutex);
if (policy->max < cpu_cdbs->cur_policy->cur)
__cpufreq_driver_target(cpu_cdbs->cur_policy,
policy->max, CPUFREQ_RELATION_H);
else if (policy->min > cpu_cdbs->cur_policy->cur)
__cpufreq_driver_target(cpu_cdbs->cur_policy,
policy->min, CPUFREQ_RELATION_L);
dbs_check_cpu(dbs_data, cpu);
mutex_unlock(&cpu_cdbs->timer_mutex);
mutex_unlock(&dbs_data->mutex);
break; break;
} }
return 0;
unlock:
mutex_unlock(&cdata->mutex);
return ret;
} }
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);

View File

@ -208,11 +208,16 @@ struct common_dbs_data {
void *(*get_cpu_dbs_info_s)(int cpu); void *(*get_cpu_dbs_info_s)(int cpu);
void (*gov_dbs_timer)(struct work_struct *work); void (*gov_dbs_timer)(struct work_struct *work);
void (*gov_check_cpu)(int cpu, unsigned int load); void (*gov_check_cpu)(int cpu, unsigned int load);
int (*init)(struct dbs_data *dbs_data); int (*init)(struct dbs_data *dbs_data, bool notify);
void (*exit)(struct dbs_data *dbs_data); void (*exit)(struct dbs_data *dbs_data, bool notify);
/* Governor specific ops, see below */ /* Governor specific ops, see below */
void *gov_ops; void *gov_ops;
/*
* Protects governor's data (struct dbs_data and struct common_dbs_data)
*/
struct mutex mutex;
}; };
/* Governor Per policy data */ /* Governor Per policy data */
@ -221,9 +226,6 @@ struct dbs_data {
unsigned int min_sampling_rate; unsigned int min_sampling_rate;
int usage_count; int usage_count;
void *tuners; void *tuners;
/* dbs_mutex protects dbs_enable in governor start/stop */
struct mutex mutex;
}; };
/* Governor specific ops, will be passed to dbs_data->gov_ops */ /* Governor specific ops, will be passed to dbs_data->gov_ops */
@ -234,10 +236,6 @@ struct od_ops {
void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq); void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
}; };
struct cs_ops {
struct notifier_block *notifier_block;
};
static inline int delay_for_sampling_rate(unsigned int sampling_rate) static inline int delay_for_sampling_rate(unsigned int sampling_rate)
{ {
int delay = usecs_to_jiffies(sampling_rate); int delay = usecs_to_jiffies(sampling_rate);

View File

@ -475,7 +475,7 @@ static struct attribute_group od_attr_group_gov_pol = {
/************************** sysfs end ************************/ /************************** sysfs end ************************/
static int od_init(struct dbs_data *dbs_data) static int od_init(struct dbs_data *dbs_data, bool notify)
{ {
struct od_dbs_tuners *tuners; struct od_dbs_tuners *tuners;
u64 idle_time; u64 idle_time;
@ -513,11 +513,10 @@ static int od_init(struct dbs_data *dbs_data)
tuners->io_is_busy = should_io_be_busy(); tuners->io_is_busy = should_io_be_busy();
dbs_data->tuners = tuners; dbs_data->tuners = tuners;
mutex_init(&dbs_data->mutex);
return 0; return 0;
} }
static void od_exit(struct dbs_data *dbs_data) static void od_exit(struct dbs_data *dbs_data, bool notify)
{ {
kfree(dbs_data->tuners); kfree(dbs_data->tuners);
} }
@ -541,6 +540,7 @@ static struct common_dbs_data od_dbs_cdata = {
.gov_ops = &od_ops, .gov_ops = &od_ops,
.init = od_init, .init = od_init,
.exit = od_exit, .exit = od_exit,
.mutex = __MUTEX_INITIALIZER(od_dbs_cdata.mutex),
}; };
static void od_set_powersave_bias(unsigned int powersave_bias) static void od_set_powersave_bias(unsigned int powersave_bias)

View File

@ -144,7 +144,7 @@ module_param(max_duration, int, 0444);
/** /**
* we can detect a core multipiler from dir0_lsb * we can detect a core multiplier from dir0_lsb
* from GX1 datasheet p.56, * from GX1 datasheet p.56,
* MULT[3:0]: * MULT[3:0]:
* 0000 = SYSCLK multiplied by 4 (test only) * 0000 = SYSCLK multiplied by 4 (test only)
@ -346,7 +346,7 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
/* it needs to be assured that at least one supported frequency is /* it needs to be assured that at least one supported frequency is
* within policy->min and policy->max. If it is not, policy->max * within policy->min and policy->max. If it is not, policy->max
* needs to be increased until one freuqency is supported. * needs to be increased until one frequency is supported.
* policy->min may not be decreased, though. This way we guarantee a * policy->min may not be decreased, though. This way we guarantee a
* specific processing capacity. * specific processing capacity.
*/ */

View File

@ -48,9 +48,9 @@ static inline int32_t mul_fp(int32_t x, int32_t y)
return ((int64_t)x * (int64_t)y) >> FRAC_BITS; return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
} }
static inline int32_t div_fp(int32_t x, int32_t y) static inline int32_t div_fp(s64 x, s64 y)
{ {
return div_s64((int64_t)x << FRAC_BITS, y); return div64_s64((int64_t)x << FRAC_BITS, y);
} }
static inline int ceiling_fp(int32_t x) static inline int ceiling_fp(int32_t x)
@ -68,6 +68,7 @@ struct sample {
int32_t core_pct_busy; int32_t core_pct_busy;
u64 aperf; u64 aperf;
u64 mperf; u64 mperf;
u64 tsc;
int freq; int freq;
ktime_t time; ktime_t time;
}; };
@ -109,6 +110,7 @@ struct cpudata {
ktime_t last_sample_time; ktime_t last_sample_time;
u64 prev_aperf; u64 prev_aperf;
u64 prev_mperf; u64 prev_mperf;
u64 prev_tsc;
struct sample sample; struct sample sample;
}; };
@ -396,7 +398,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
update_turbo_state(); update_turbo_state();
if (limits.turbo_disabled) { if (limits.turbo_disabled) {
pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
return -EPERM; return -EPERM;
} }
@ -484,7 +486,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(void) static void intel_pstate_hwp_enable(void)
{ {
hwp_active++; hwp_active++;
pr_info("intel_pstate HWP enabled\n"); pr_info("intel_pstate: HWP enabled\n");
wrmsrl( MSR_PM_ENABLE, 0x1); wrmsrl( MSR_PM_ENABLE, 0x1);
} }
@ -535,7 +537,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
val |= vid; val |= vid;
wrmsrl(MSR_IA32_PERF_CTL, val); wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
} }
#define BYT_BCLK_FREQS 5 #define BYT_BCLK_FREQS 5
@ -704,19 +706,20 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
} }
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
{ {
int max_perf, min_perf; int max_perf, min_perf;
update_turbo_state(); if (force) {
update_turbo_state();
intel_pstate_get_min_max(cpu, &min_perf, &max_perf); intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
pstate = clamp_t(int, pstate, min_perf, max_perf); pstate = clamp_t(int, pstate, min_perf, max_perf);
if (pstate == cpu->pstate.current_pstate)
return;
if (pstate == cpu->pstate.current_pstate)
return;
}
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
cpu->pstate.current_pstate = pstate; cpu->pstate.current_pstate = pstate;
@ -733,7 +736,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_vid) if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu); pstate_funcs.get_vid(cpu);
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
} }
static inline void intel_pstate_calc_busy(struct cpudata *cpu) static inline void intel_pstate_calc_busy(struct cpudata *cpu)
@ -756,23 +759,28 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
{ {
u64 aperf, mperf; u64 aperf, mperf;
unsigned long flags; unsigned long flags;
u64 tsc;
local_irq_save(flags); local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf); rdmsrl(MSR_IA32_MPERF, mperf);
tsc = native_read_tsc();
local_irq_restore(flags); local_irq_restore(flags);
cpu->last_sample_time = cpu->sample.time; cpu->last_sample_time = cpu->sample.time;
cpu->sample.time = ktime_get(); cpu->sample.time = ktime_get();
cpu->sample.aperf = aperf; cpu->sample.aperf = aperf;
cpu->sample.mperf = mperf; cpu->sample.mperf = mperf;
cpu->sample.tsc = tsc;
cpu->sample.aperf -= cpu->prev_aperf; cpu->sample.aperf -= cpu->prev_aperf;
cpu->sample.mperf -= cpu->prev_mperf; cpu->sample.mperf -= cpu->prev_mperf;
cpu->sample.tsc -= cpu->prev_tsc;
intel_pstate_calc_busy(cpu); intel_pstate_calc_busy(cpu);
cpu->prev_aperf = aperf; cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf; cpu->prev_mperf = mperf;
cpu->prev_tsc = tsc;
} }
static inline void intel_hwp_set_sample_time(struct cpudata *cpu) static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
@ -794,7 +802,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
{ {
int32_t core_busy, max_pstate, current_pstate, sample_ratio; int32_t core_busy, max_pstate, current_pstate, sample_ratio;
u32 duration_us; s64 duration_us;
u32 sample_time; u32 sample_time;
/* /*
@ -821,8 +829,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
* to adjust our busyness. * to adjust our busyness.
*/ */
sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
duration_us = (u32) ktime_us_delta(cpu->sample.time, duration_us = ktime_us_delta(cpu->sample.time,
cpu->last_sample_time); cpu->last_sample_time);
if (duration_us > sample_time * 3) { if (duration_us > sample_time * 3) {
sample_ratio = div_fp(int_tofp(sample_time), sample_ratio = div_fp(int_tofp(sample_time),
int_tofp(duration_us)); int_tofp(duration_us));
@ -837,6 +845,10 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
int32_t busy_scaled; int32_t busy_scaled;
struct _pid *pid; struct _pid *pid;
signed int ctl; signed int ctl;
int from;
struct sample *sample;
from = cpu->pstate.current_pstate;
pid = &cpu->pid; pid = &cpu->pid;
busy_scaled = intel_pstate_get_scaled_busy(cpu); busy_scaled = intel_pstate_get_scaled_busy(cpu);
@ -844,7 +856,17 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
ctl = pid_calc(pid, busy_scaled); ctl = pid_calc(pid, busy_scaled);
/* Negative values of ctl increase the pstate and vice versa */ /* Negative values of ctl increase the pstate and vice versa */
intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true);
sample = &cpu->sample;
trace_pstate_sample(fp_toint(sample->core_pct_busy),
fp_toint(busy_scaled),
from,
cpu->pstate.current_pstate,
sample->mperf,
sample->aperf,
sample->tsc,
sample->freq);
} }
static void intel_hwp_timer_func(unsigned long __data) static void intel_hwp_timer_func(unsigned long __data)
@ -858,21 +880,11 @@ static void intel_hwp_timer_func(unsigned long __data)
static void intel_pstate_timer_func(unsigned long __data) static void intel_pstate_timer_func(unsigned long __data)
{ {
struct cpudata *cpu = (struct cpudata *) __data; struct cpudata *cpu = (struct cpudata *) __data;
struct sample *sample;
intel_pstate_sample(cpu); intel_pstate_sample(cpu);
sample = &cpu->sample;
intel_pstate_adjust_busy_pstate(cpu); intel_pstate_adjust_busy_pstate(cpu);
trace_pstate_sample(fp_toint(sample->core_pct_busy),
fp_toint(intel_pstate_get_scaled_busy(cpu)),
cpu->pstate.current_pstate,
sample->mperf,
sample->aperf,
sample->freq);
intel_pstate_set_sample_time(cpu); intel_pstate_set_sample_time(cpu);
} }
@ -935,7 +947,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
add_timer_on(&cpu->timer, cpunum); add_timer_on(&cpu->timer, cpunum);
pr_debug("Intel pstate controlling: cpu %d\n", cpunum); pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
return 0; return 0;
} }
@ -1001,13 +1013,13 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
int cpu_num = policy->cpu; int cpu_num = policy->cpu;
struct cpudata *cpu = all_cpu_data[cpu_num]; struct cpudata *cpu = all_cpu_data[cpu_num];
pr_info("intel_pstate CPU %d exiting\n", cpu_num); pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
del_timer_sync(&all_cpu_data[cpu_num]->timer); del_timer_sync(&all_cpu_data[cpu_num]->timer);
if (hwp_active) if (hwp_active)
return; return;
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
} }
static int intel_pstate_cpu_init(struct cpufreq_policy *policy) static int intel_pstate_cpu_init(struct cpufreq_policy *policy)

View File

@ -56,7 +56,7 @@ module_param(pxa27x_maxfreq, uint, 0);
MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz" MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
"(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)"); "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
typedef struct { struct pxa_freqs {
unsigned int khz; unsigned int khz;
unsigned int membus; unsigned int membus;
unsigned int cccr; unsigned int cccr;
@ -64,7 +64,7 @@ typedef struct {
unsigned int cclkcfg; unsigned int cclkcfg;
int vmin; int vmin;
int vmax; int vmax;
} pxa_freqs_t; };
/* Define the refresh period in mSec for the SDRAM and the number of rows */ /* Define the refresh period in mSec for the SDRAM and the number of rows */
#define SDRAM_TREF 64 /* standard 64ms SDRAM */ #define SDRAM_TREF 64 /* standard 64ms SDRAM */
@ -86,7 +86,7 @@ static unsigned int sdram_rows;
/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */ /* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS #define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
static pxa_freqs_t pxa255_run_freqs[] = static const struct pxa_freqs pxa255_run_freqs[] =
{ {
/* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
{ 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
@ -98,7 +98,7 @@ static pxa_freqs_t pxa255_run_freqs[] =
}; };
/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */ /* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
static pxa_freqs_t pxa255_turbo_freqs[] = static const struct pxa_freqs pxa255_turbo_freqs[] =
{ {
/* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
{ 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
@ -153,7 +153,7 @@ MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table
((HT) ? CCLKCFG_HALFTURBO : 0) | \ ((HT) ? CCLKCFG_HALFTURBO : 0) | \
((T) ? CCLKCFG_TURBO : 0)) ((T) ? CCLKCFG_TURBO : 0))
static pxa_freqs_t pxa27x_freqs[] = { static struct pxa_freqs pxa27x_freqs[] = {
{104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 }, {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
{156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 }, {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
{208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 }, {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
@ -171,7 +171,7 @@ extern unsigned get_clk_frequency_khz(int info);
#ifdef CONFIG_REGULATOR #ifdef CONFIG_REGULATOR
static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
{ {
int ret = 0; int ret = 0;
int vmin, vmax; int vmin, vmax;
@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
} }
} }
#else #else
static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
{ {
return 0; return 0;
} }
@ -211,7 +211,7 @@ static void __init pxa_cpufreq_init_voltages(void) { }
#endif #endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table, static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
pxa_freqs_t **pxa_freqs) const struct pxa_freqs **pxa_freqs)
{ {
if (cpu_is_pxa25x()) { if (cpu_is_pxa25x()) {
if (!pxa255_turbo_table) { if (!pxa255_turbo_table) {
@ -270,7 +270,7 @@ static unsigned int pxa_cpufreq_get(unsigned int cpu)
static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx) static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
{ {
struct cpufreq_frequency_table *pxa_freqs_table; struct cpufreq_frequency_table *pxa_freqs_table;
pxa_freqs_t *pxa_freq_settings; const struct pxa_freqs *pxa_freq_settings;
unsigned long flags; unsigned long flags;
unsigned int new_freq_cpu, new_freq_mem; unsigned int new_freq_cpu, new_freq_mem;
unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
@ -361,7 +361,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
int i; int i;
unsigned int freq; unsigned int freq;
struct cpufreq_frequency_table *pxa255_freq_table; struct cpufreq_frequency_table *pxa255_freq_table;
pxa_freqs_t *pxa255_freqs; const struct pxa_freqs *pxa255_freqs;
/* try to guess pxa27x cpu */ /* try to guess pxa27x cpu */
if (cpu_is_pxa27x()) if (cpu_is_pxa27x())

View File

@ -27,11 +27,11 @@
/** /**
* struct cpu_data * struct cpu_data
* @parent: the parent node of cpu clock * @pclk: the parent clock of cpu
* @table: frequency table * @table: frequency table
*/ */
struct cpu_data { struct cpu_data {
struct device_node *parent; struct clk **pclk;
struct cpufreq_frequency_table *table; struct cpufreq_frequency_table *table;
}; };
@ -196,7 +196,7 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy) static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
{ {
struct device_node *np; struct device_node *np, *pnode;
int i, count, ret; int i, count, ret;
u32 freq, mask; u32 freq, mask;
struct clk *clk; struct clk *clk;
@ -219,17 +219,23 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_nomem2; goto err_nomem2;
} }
data->parent = of_parse_phandle(np, "clocks", 0); pnode = of_parse_phandle(np, "clocks", 0);
if (!data->parent) { if (!pnode) {
pr_err("%s: could not get clock information\n", __func__); pr_err("%s: could not get clock information\n", __func__);
goto err_nomem2; goto err_nomem2;
} }
count = of_property_count_strings(data->parent, "clock-names"); count = of_property_count_strings(pnode, "clock-names");
data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
if (!data->pclk) {
pr_err("%s: no memory\n", __func__);
goto err_node;
}
table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL); table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
if (!table) { if (!table) {
pr_err("%s: no memory\n", __func__); pr_err("%s: no memory\n", __func__);
goto err_node; goto err_pclk;
} }
if (fmask) if (fmask)
@ -238,7 +244,8 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
mask = 0x0; mask = 0x0;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
clk = of_clk_get(data->parent, i); clk = of_clk_get(pnode, i);
data->pclk[i] = clk;
freq = clk_get_rate(clk); freq = clk_get_rate(clk);
/* /*
* the clock is valid if its frequency is not masked * the clock is valid if its frequency is not masked
@ -273,13 +280,16 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = u64temp + 1; policy->cpuinfo.transition_latency = u64temp + 1;
of_node_put(np); of_node_put(np);
of_node_put(pnode);
return 0; return 0;
err_nomem1: err_nomem1:
kfree(table); kfree(table);
err_pclk:
kfree(data->pclk);
err_node: err_node:
of_node_put(data->parent); of_node_put(pnode);
err_nomem2: err_nomem2:
policy->driver_data = NULL; policy->driver_data = NULL;
kfree(data); kfree(data);
@ -293,7 +303,7 @@ static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{ {
struct cpu_data *data = policy->driver_data; struct cpu_data *data = policy->driver_data;
of_node_put(data->parent); kfree(data->pclk);
kfree(data->table); kfree(data->table);
kfree(data); kfree(data);
policy->driver_data = NULL; policy->driver_data = NULL;
@ -307,7 +317,7 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
struct clk *parent; struct clk *parent;
struct cpu_data *data = policy->driver_data; struct cpu_data *data = policy->driver_data;
parent = of_clk_get(data->parent, data->table[index].driver_data); parent = data->pclk[data->table[index].driver_data];
return clk_set_parent(policy->clk, parent); return clk_set_parent(policy->clk, parent);
} }

View File

@ -65,7 +65,9 @@ struct cpufreq_policy {
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */ should set cpufreq */
unsigned int cpu; /* cpu nr of CPU managing this policy */ unsigned int cpu; /* cpu managing this policy, must be online */
unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */
struct clk *clk; struct clk *clk;
struct cpufreq_cpuinfo cpuinfo;/* see above */ struct cpufreq_cpuinfo cpuinfo;/* see above */
@ -80,6 +82,7 @@ struct cpufreq_policy {
struct cpufreq_governor *governor; /* see below */ struct cpufreq_governor *governor; /* see below */
void *governor_data; void *governor_data;
bool governor_enabled; /* governor start/stop flag */ bool governor_enabled; /* governor start/stop flag */
char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
struct work_struct update; /* if update_policy() needs to be struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */ * called, but you're in IRQ context */

View File

@ -42,45 +42,54 @@ TRACE_EVENT(pstate_sample,
TP_PROTO(u32 core_busy, TP_PROTO(u32 core_busy,
u32 scaled_busy, u32 scaled_busy,
u32 state, u32 from,
u32 to,
u64 mperf, u64 mperf,
u64 aperf, u64 aperf,
u64 tsc,
u32 freq u32 freq
), ),
TP_ARGS(core_busy, TP_ARGS(core_busy,
scaled_busy, scaled_busy,
state, from,
to,
mperf, mperf,
aperf, aperf,
tsc,
freq freq
), ),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, core_busy) __field(u32, core_busy)
__field(u32, scaled_busy) __field(u32, scaled_busy)
__field(u32, state) __field(u32, from)
__field(u32, to)
__field(u64, mperf) __field(u64, mperf)
__field(u64, aperf) __field(u64, aperf)
__field(u64, tsc)
__field(u32, freq) __field(u32, freq)
),
),
TP_fast_assign( TP_fast_assign(
__entry->core_busy = core_busy; __entry->core_busy = core_busy;
__entry->scaled_busy = scaled_busy; __entry->scaled_busy = scaled_busy;
__entry->state = state; __entry->from = from;
__entry->to = to;
__entry->mperf = mperf; __entry->mperf = mperf;
__entry->aperf = aperf; __entry->aperf = aperf;
__entry->tsc = tsc;
__entry->freq = freq; __entry->freq = freq;
), ),
TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ", TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu ",
(unsigned long)__entry->core_busy, (unsigned long)__entry->core_busy,
(unsigned long)__entry->scaled_busy, (unsigned long)__entry->scaled_busy,
(unsigned long)__entry->state, (unsigned long)__entry->from,
(unsigned long)__entry->to,
(unsigned long long)__entry->mperf, (unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf, (unsigned long long)__entry->aperf,
(unsigned long long)__entry->tsc,
(unsigned long)__entry->freq (unsigned long)__entry->freq
) )