Merge back earlier cpufreq material for v6.2.
This commit is contained in:
commit
c7a5518ac3
@ -19,6 +19,7 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string_helpers.h>
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/io.h>
|
||||
@ -135,8 +136,8 @@ static int set_boost(struct cpufreq_policy *policy, int val)
|
||||
{
|
||||
on_each_cpu_mask(policy->cpus, boost_set_msr_each,
|
||||
(void *)(long)val, 1);
|
||||
pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
|
||||
cpumask_pr_args(policy->cpus), val ? "en" : "dis");
|
||||
pr_debug("CPU %*pbl: Core Boosting %s.\n",
|
||||
cpumask_pr_args(policy->cpus), str_enabled_disabled(val));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -535,15 +536,6 @@ static void free_acpi_perf_data(void)
|
||||
free_percpu(acpi_perf_data);
|
||||
}
|
||||
|
||||
static int cpufreq_boost_online(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* On the CPU_UP path we simply keep the boost-disable flag
|
||||
* in sync with the current global state.
|
||||
*/
|
||||
return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
|
||||
}
|
||||
|
||||
static int cpufreq_boost_down_prep(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
@ -897,6 +889,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
|
||||
pr_warn(FW_WARN "P-state 0 is not max freq\n");
|
||||
|
||||
set_boost(policy, acpi_cpufreq_driver.boost_enabled);
|
||||
|
||||
return result;
|
||||
|
||||
err_unreg:
|
||||
@ -916,6 +910,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
|
||||
pr_debug("%s\n", __func__);
|
||||
|
||||
cpufreq_boost_down_prep(policy->cpu);
|
||||
policy->fast_switch_possible = false;
|
||||
policy->driver_data = NULL;
|
||||
acpi_processor_unregister_performance(data->acpi_perf_cpu);
|
||||
@ -958,12 +953,8 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
|
||||
.attr = acpi_cpufreq_attr,
|
||||
};
|
||||
|
||||
static enum cpuhp_state acpi_cpufreq_online;
|
||||
|
||||
static void __init acpi_cpufreq_boost_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
|
||||
pr_debug("Boost capabilities not present in the processor\n");
|
||||
return;
|
||||
@ -971,24 +962,6 @@ static void __init acpi_cpufreq_boost_init(void)
|
||||
|
||||
acpi_cpufreq_driver.set_boost = set_boost;
|
||||
acpi_cpufreq_driver.boost_enabled = boost_state(0);
|
||||
|
||||
/*
|
||||
* This calls the online callback on all online cpu and forces all
|
||||
* MSRs to the same value.
|
||||
*/
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
|
||||
cpufreq_boost_online, cpufreq_boost_down_prep);
|
||||
if (ret < 0) {
|
||||
pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
|
||||
return;
|
||||
}
|
||||
acpi_cpufreq_online = ret;
|
||||
}
|
||||
|
||||
static void acpi_cpufreq_boost_exit(void)
|
||||
{
|
||||
if (acpi_cpufreq_online > 0)
|
||||
cpuhp_remove_state_nocalls(acpi_cpufreq_online);
|
||||
}
|
||||
|
||||
static int __init acpi_cpufreq_init(void)
|
||||
@ -1032,7 +1005,6 @@ static int __init acpi_cpufreq_init(void)
|
||||
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
|
||||
if (ret) {
|
||||
free_acpi_perf_data();
|
||||
acpi_cpufreq_boost_exit();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -1041,8 +1013,6 @@ static void __exit acpi_cpufreq_exit(void)
|
||||
{
|
||||
pr_debug("%s\n", __func__);
|
||||
|
||||
acpi_cpufreq_boost_exit();
|
||||
|
||||
cpufreq_unregister_driver(&acpi_cpufreq_driver);
|
||||
|
||||
free_acpi_perf_data();
|
||||
|
@ -298,6 +298,7 @@ static int hwp_active __read_mostly;
|
||||
static int hwp_mode_bdw __read_mostly;
|
||||
static bool per_cpu_limits __read_mostly;
|
||||
static bool hwp_boost __read_mostly;
|
||||
static bool hwp_forced __read_mostly;
|
||||
|
||||
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
|
||||
|
||||
@ -1679,12 +1680,12 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If powerup EPP is something other than chipset default 0x80 and
|
||||
* - is more performance oriented than 0x80 (default balance_perf EPP)
|
||||
* If the EPP is set by firmware, which means that firmware enabled HWP
|
||||
* - Is equal or less than 0x80 (default balance_perf EPP)
|
||||
* - But less performance oriented than performance EPP
|
||||
* then use this as new balance_perf EPP.
|
||||
*/
|
||||
if (cpudata->epp_default < HWP_EPP_BALANCE_PERFORMANCE &&
|
||||
if (hwp_forced && cpudata->epp_default <= HWP_EPP_BALANCE_PERFORMANCE &&
|
||||
cpudata->epp_default > HWP_EPP_PERFORMANCE) {
|
||||
epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default;
|
||||
return;
|
||||
@ -3384,7 +3385,7 @@ static int __init intel_pstate_init(void)
|
||||
|
||||
id = x86_match_cpu(hwp_support_ids);
|
||||
if (id) {
|
||||
bool hwp_forced = intel_pstate_hwp_is_enabled();
|
||||
hwp_forced = intel_pstate_hwp_is_enabled();
|
||||
|
||||
if (hwp_forced)
|
||||
pr_info("HWP enabled by BIOS\n");
|
||||
|
@ -407,10 +407,10 @@ static int guess_fsb(int mult)
|
||||
{
|
||||
int speed = cpu_khz / 1000;
|
||||
int i;
|
||||
int speeds[] = { 666, 1000, 1333, 2000 };
|
||||
static const int speeds[] = { 666, 1000, 1333, 2000 };
|
||||
int f_max, f_min;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(speeds); i++) {
|
||||
f_max = ((speeds[i] * mult) + 50) / 100;
|
||||
f_max += (ROUNDING / 2);
|
||||
f_min = f_max - ROUNDING;
|
||||
|
@ -39,7 +39,7 @@ static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
|
||||
* In SPEAr1340, cpu clk's parent sys clk can take input from
|
||||
* following sources
|
||||
*/
|
||||
const char *sys_clk_src[] = {
|
||||
static const char * const sys_clk_src[] = {
|
||||
"sys_syn_clk",
|
||||
"pll1_clk",
|
||||
"pll2_clk",
|
||||
|
Loading…
Reference in New Issue
Block a user