linux/drivers/cpufreq/cppc_cpufreq.c

537 lines
13 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* CPPC (Collaborative Processor Performance Control) driver for
* interfacing with the CPUfreq layer and governors. See
* cppc_acpi.c for CPPC specific methods.
*
* (C) Copyright 2014, 2015 Linaro Ltd.
* Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
*/
#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
cpufreq: CPPC: Force reporting values in KHz to fix user space interface When CPPC is being used by ACPI on arm64, user space tools such as cpupower report CPU frequency values from sysfs that are incorrect. What the driver was doing was reporting the values given by ACPI tables in whatever scale was used to provide them. However, the ACPI spec defines the CPPC values as unitless abstract numbers. Internal kernel structures such as struct perf_cap, in contrast, expect these values to be in KHz. When these struct values get reported via sysfs, the user space tools also assume they are in KHz, causing them to report incorrect values (for example, reporting a CPU frequency of 1MHz when it should be 1.8GHz). The downside is that this approach has some assumptions: (1) It relies on SMBIOS3 being used, *and* that the Max Frequency value for a processor is set to a non-zero value. (2) It assumes that all processors run at the same speed, or that the CPPC values have all been scaled to reflect relative speed. This patch retrieves the largest CPU Max Frequency from a type 4 DMI record that it can find. This may not be an issue, however, as a sampling of DMI data on x86 and arm64 indicates there is often only one such record regardless. Since CPPC is relatively new, it is unclear if the ACPI ASL will always be written to reflect any sort of relative performance of processors of differing speeds. (3) It assumes that performance and frequency both scale linearly. For arm64 servers, this may be sufficient, but it does rely on firmware values being set correctly. Hence, other approaches will be considered in the future. This has been tested on three arm64 servers, with and without DMI, with and without CPPC support. Signed-off-by: Al Stone <ahs3@redhat.com> Signed-off-by: Prashanth Prakash <pprakash@codeaurora.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-07-21 00:10:04 +03:00
#include <linux/dmi.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
cpufreq: CPPC: Force reporting values in KHz to fix user space interface When CPPC is being used by ACPI on arm64, user space tools such as cpupower report CPU frequency values from sysfs that are incorrect. What the driver was doing was reporting the values given by ACPI tables in whatever scale was used to provide them. However, the ACPI spec defines the CPPC values as unitless abstract numbers. Internal kernel structures such as struct perf_cap, in contrast, expect these values to be in KHz. When these struct values get reported via sysfs, the user space tools also assume they are in KHz, causing them to report incorrect values (for example, reporting a CPU frequency of 1MHz when it should be 1.8GHz). The downside is that this approach has some assumptions: (1) It relies on SMBIOS3 being used, *and* that the Max Frequency value for a processor is set to a non-zero value. (2) It assumes that all processors run at the same speed, or that the CPPC values have all been scaled to reflect relative speed. This patch retrieves the largest CPU Max Frequency from a type 4 DMI record that it can find. This may not be an issue, however, as a sampling of DMI data on x86 and arm64 indicates there is often only one such record regardless. Since CPPC is relatively new, it is unclear if the ACPI ASL will always be written to reflect any sort of relative performance of processors of differing speeds. (3) It assumes that performance and frequency both scale linearly. For arm64 servers, this may be sufficient, but it does rely on firmware values being set correctly. Hence, other approaches will be considered in the future. This has been tested on three arm64 servers, with and without DMI, with and without CPPC support. Signed-off-by: Al Stone <ahs3@redhat.com> Signed-off-by: Prashanth Prakash <pprakash@codeaurora.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-07-21 00:10:04 +03:00
#include <asm/unaligned.h>
#include <acpi/cppc_acpi.h>
cpufreq: CPPC: Force reporting values in KHz to fix user space interface When CPPC is being used by ACPI on arm64, user space tools such as cpupower report CPU frequency values from sysfs that are incorrect. What the driver was doing was reporting the values given by ACPI tables in whatever scale was used to provide them. However, the ACPI spec defines the CPPC values as unitless abstract numbers. Internal kernel structures such as struct perf_cap, in contrast, expect these values to be in KHz. When these struct values get reported via sysfs, the user space tools also assume they are in KHz, causing them to report incorrect values (for example, reporting a CPU frequency of 1MHz when it should be 1.8GHz). The downside is that this approach has some assumptions: (1) It relies on SMBIOS3 being used, *and* that the Max Frequency value for a processor is set to a non-zero value. (2) It assumes that all processors run at the same speed, or that the CPPC values have all been scaled to reflect relative speed. This patch retrieves the largest CPU Max Frequency from a type 4 DMI record that it can find. This may not be an issue, however, as a sampling of DMI data on x86 and arm64 indicates there is often only one such record regardless. Since CPPC is relatively new, it is unclear if the ACPI ASL will always be written to reflect any sort of relative performance of processors of differing speeds. (3) It assumes that performance and frequency both scale linearly. For arm64 servers, this may be sufficient, but it does rely on firmware values being set correctly. Hence, other approaches will be considered in the future. This has been tested on three arm64 servers, with and without DMI, with and without CPPC support. Signed-off-by: Al Stone <ahs3@redhat.com> Signed-off-by: Prashanth Prakash <pprakash@codeaurora.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-07-21 00:10:04 +03:00
/* Minimum struct length needed for the DMI processor entry we want */
#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
/* Offest in the DMI processor structure for the max frequency */
#define DMI_PROCESSOR_MAX_SPEED 0x14
/*
* These structs contain information parsed from per CPU
* ACPI _CPC structures.
* e.g. For each CPU the highest, lowest supported
* performance capabilities, desired performance level
* requested etc.
*/
static struct cppc_cpudata **all_cpu_data;
cpufreq: CPPC: add SW BOOST support To add SW BOOST support for CPPC, we need to get the max frequency of boost mode and non-boost mode. ACPI spec 6.2 section 8.4.7.1 describes the following two CPC registers. "Highest performance is the absolute maximum performance an individual processor may reach, assuming ideal conditions. This performance level may not be sustainable for long durations, and may only be achievable if other platform components are in a specific state; for example, it may require other processors be in an idle state. Nominal Performance is the maximum sustained performance level of the processor, assuming ideal operating conditions. In absence of an external constraint (power, thermal, etc.) this is the performance level the platform is expected to be able to maintain continuously. All processors are expected to be able to sustain their nominal performance state simultaneously." To add SW BOOST support for CPPC, we can use Highest Performance as the max performance in boost mode and Nominal Performance as the max performance in non-boost mode. If the Highest Performance is greater than the Nominal Performance, we assume SW BOOST is supported. The current CPPC driver does not support SW BOOST and use 'Highest Performance' as the max performance the CPU can achieve. 'Nominal Performance' is used to convert 'performance' to 'frequency'. That means, if firmware enable boost and provide a value for Highest Performance which is greater than Nominal Performance, boost feature is enabled by default. Because SW BOOST is disabled by default, so, after this patch, boost feature is disabled by default even if boost is enabled by firmware. Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com> Suggested-by: Viresh Kumar <viresh.kumar@linaro.org> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> [ rjw: Subject ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-05-30 05:08:31 +03:00
static bool boost_supported;
struct cppc_workaround_oem_info {
char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
};
static bool apply_hisi_workaround;
static struct cppc_workaround_oem_info wa_info[] = {
{
.oem_id = "HISI ",
.oem_table_id = "HIP07 ",
.oem_revision = 0,
}, {
.oem_id = "HISI ",
.oem_table_id = "HIP08 ",
.oem_revision = 0,
}
};
static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
unsigned int perf);
/*
* HISI platform does not support delivered performance counter and
* reference performance counter. It can calculate the performance using the
* platform specific mechanism. We reuse the desired performance register to
* store the real performance calculated by the platform.
*/
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum)
{
struct cppc_cpudata *cpudata = all_cpu_data[cpunum];
u64 desired_perf;
int ret;
ret = cppc_get_desired_perf(cpunum, &desired_perf);
if (ret < 0)
return -EIO;
return cppc_cpufreq_perf_to_khz(cpudata, desired_perf);
}
static void cppc_check_hisi_workaround(void)
{
struct acpi_table_header *tbl;
acpi_status status = AE_OK;
int i;
status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
if (ACPI_FAILURE(status) || !tbl)
return;
for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
wa_info[i].oem_revision == tbl->oem_revision) {
apply_hisi_workaround = true;
break;
}
}
acpi_put_table(tbl);
}
cpufreq: CPPC: Force reporting values in KHz to fix user space interface When CPPC is being used by ACPI on arm64, user space tools such as cpupower report CPU frequency values from sysfs that are incorrect. What the driver was doing was reporting the values given by ACPI tables in whatever scale was used to provide them. However, the ACPI spec defines the CPPC values as unitless abstract numbers. Internal kernel structures such as struct perf_cap, in contrast, expect these values to be in KHz. When these struct values get reported via sysfs, the user space tools also assume they are in KHz, causing them to report incorrect values (for example, reporting a CPU frequency of 1MHz when it should be 1.8GHz). The downside is that this approach has some assumptions: (1) It relies on SMBIOS3 being used, *and* that the Max Frequency value for a processor is set to a non-zero value. (2) It assumes that all processors run at the same speed, or that the CPPC values have all been scaled to reflect relative speed. This patch retrieves the largest CPU Max Frequency from a type 4 DMI record that it can find. This may not be an issue, however, as a sampling of DMI data on x86 and arm64 indicates there is often only one such record regardless. Since CPPC is relatively new, it is unclear if the ACPI ASL will always be written to reflect any sort of relative performance of processors of differing speeds. (3) It assumes that performance and frequency both scale linearly. For arm64 servers, this may be sufficient, but it does rely on firmware values being set correctly. Hence, other approaches will be considered in the future. This has been tested on three arm64 servers, with and without DMI, with and without CPPC support. Signed-off-by: Al Stone <ahs3@redhat.com> Signed-off-by: Prashanth Prakash <pprakash@codeaurora.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-07-21 00:10:04 +03:00
/* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{
const u8 *dmi_data = (const u8 *)dm;
u16 *mhz = (u16 *)private;
if (dm->type == DMI_ENTRY_PROCESSOR &&
dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
u16 val = (u16)get_unaligned((const u16 *)
(dmi_data + DMI_PROCESSOR_MAX_SPEED));
*mhz = val > *mhz ? val : *mhz;
}
}
/* Look up the max frequency in DMI */
static u64 cppc_get_dmi_max_khz(void)
{
u16 mhz = 0;
dmi_walk(cppc_find_dmi_mhz, &mhz);
/*
* Real stupid fallback value, just in case there is no
* actual value set.
*/
mhz = mhz ? mhz : 1;
return (1000 * mhz);
}
/*
* If CPPC lowest_freq and nominal_freq registers are exposed then we can
* use them to convert perf to freq and vice versa
*
* If the perf/freq point lies between Nominal and Lowest, we can treat
* (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
* and extrapolate the rest
* For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
*/
static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
unsigned int perf)
{
static u64 max_khz;
struct cppc_perf_caps *caps = &cpu->perf_caps;
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
if (perf >= caps->nominal_perf) {
mul = caps->nominal_freq;
div = caps->nominal_perf;
} else {
mul = caps->nominal_freq - caps->lowest_freq;
div = caps->nominal_perf - caps->lowest_perf;
}
} else {
if (!max_khz)
max_khz = cppc_get_dmi_max_khz();
mul = max_khz;
div = cpu->perf_caps.highest_perf;
}
return (u64)perf * mul / div;
}
static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
unsigned int freq)
{
static u64 max_khz;
struct cppc_perf_caps *caps = &cpu->perf_caps;
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
if (freq >= caps->nominal_freq) {
mul = caps->nominal_perf;
div = caps->nominal_freq;
} else {
mul = caps->lowest_perf;
div = caps->lowest_freq;
}
} else {
if (!max_khz)
max_khz = cppc_get_dmi_max_khz();
mul = cpu->perf_caps.highest_perf;
div = max_khz;
}
return (u64)freq * mul / div;
}
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cppc_cpudata *cpu;
struct cpufreq_freqs freqs;
u32 desired_perf;
int ret = 0;
cpu = all_cpu_data[policy->cpu];
desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
/* Return if it is exactly the same perf */
if (desired_perf == cpu->perf_ctrls.desired_perf)
return ret;
cpu->perf_ctrls.desired_perf = desired_perf;
freqs.old = policy->cur;
freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls);
cpufreq_freq_transition_end(policy, &freqs, ret != 0);
if (ret)
pr_debug("Failed to set target on CPU:%d. ret:%d\n",
cpu->cpu, ret);
return ret;
}
cpufreq: Avoid creating excessively large stack frames In the process of modifying a cpufreq policy, the cpufreq core makes a copy of it including all of the internals which is stored on the CPU stack. Because struct cpufreq_policy is relatively large, this may cause the size of the stack frame to exceed the 2 KB limit and so the GCC complains when -Wframe-larger-than= is used. In fact, it is not necessary to copy the entire policy structure in order to modify it, however. First, because cpufreq_set_policy() obtains the min and max policy limits from frequency QoS now, it is not necessary to pass the limits to it from the callers. The only things that need to be passed to it from there are the new governor pointer or (if there is a built-in governor in the driver) the "policy" value representing the governor choice. They both can be passed as individual arguments, though, so make cpufreq_set_policy() take them this way and rework its callers accordingly. This avoids making copies of cpufreq policies in the callers of cpufreq_set_policy(). Second, cpufreq_set_policy() still needs to pass the new policy data to the ->verify() callback of the cpufreq driver whose task is to sanitize the min and max policy limits. It still does not need to make a full copy of struct cpufreq_policy for this purpose, but it needs to pass a few items from it to the driver in case they are needed (different drivers have different needs in that respect and all of them have to be covered). For this reason, introduce struct cpufreq_policy_data to hold copies of the members of struct cpufreq_policy used by the existing ->verify() driver callbacks and pass a pointer to a temporary structure of that type to ->verify() (instead of passing a pointer to full struct cpufreq_policy to it). While at it, notice that intel_pstate and longrun don't really need to verify the "policy" value in struct cpufreq_policy, so drop those check from them to avoid copying "policy" into struct cpufreq_policy_data (which allows it to be slightly smaller). Also while at it fix up white space in a couple of places and make cpufreq_set_policy() static (as it can be so). Fixes: 3000ce3c52f8 ("cpufreq: Use per-policy frequency QoS") Link: https://lore.kernel.org/linux-pm/CAMuHMdX6-jb1W8uC2_237m8ctCpsnGp=JCxqt8pCWVqNXHmkVg@mail.gmail.com Reported-by: kbuild test robot <lkp@intel.com> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org> Cc: 5.4+ <stable@vger.kernel.org> # 5.4+ Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2020-01-27 01:40:11 +03:00
static int cppc_verify_policy(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
int cpu_num = policy->cpu;
struct cppc_cpudata *cpu = all_cpu_data[cpu_num];
int ret;
cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf;
ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
if (ret)
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
cpu->perf_caps.lowest_perf, cpu_num, ret);
}
cpufreq / CPPC: Set platform specific transition_delay_us Add support to specify platform specific transition_delay_us instead of using the transition delay derived from PCC. With commit 3d41386d556d (cpufreq: CPPC: Use transition_delay_us depending transition_latency) we are setting transition_delay_us directly and not applying the LATENCY_MULTIPLIER. Because of that, on Qualcomm Centriq we can end up with a very high rate of frequency change requests when using the schedutil governor (default rate_limit_us=10 compared to an earlier value of 10000). The PCC subspace describes the rate at which the platform can accept commands on the CPPC's PCC channel. This includes read and write command on the PCC channel that can be used for reasons other than frequency transitions. Moreover the same PCC subspace can be used by multiple freq domains and deriving transition_delay_us from it as we do now can be sub-optimal. Moreover if a platform does not use PCC for desired_perf register then there is no way to compute the transition latency or the delay_us. CPPC does not have a standard defined mechanism to get the transition rate or the latency at the moment. Given the above limitations, it is simpler to have a platform specific transition_delay_us and rely on PCC derived value only if a platform specific value is not available. Signed-off-by: Prashanth Prakash <pprakash@codeaurora.org> Cc: 4.14+ <stable@vger.kernel.org> # 4.14+ Fixes: 3d41386d556d (cpufreq: CPPC: Use transition_delay_us depending transition_latency) Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2018-04-27 20:35:27 +03:00
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
* trasition requests), so ideally we need to use the PCC values as a fallback
* if we don't have a platform specific transition_delay_us
*/
#ifdef CONFIG_ARM64
#include <asm/cputype.h>
static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
{
unsigned long implementor = read_cpuid_implementor();
unsigned long part_num = read_cpuid_part_number();
unsigned int delay_us = 0;
switch (implementor) {
case ARM_CPU_IMP_QCOM:
switch (part_num) {
case QCOM_CPU_PART_FALKOR_V1:
case QCOM_CPU_PART_FALKOR:
delay_us = 10000;
break;
default:
delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
break;
}
break;
default:
delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
break;
}
return delay_us;
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
{
return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
}
#endif
static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu;
unsigned int cpu_num = policy->cpu;
int ret = 0;
cpu = all_cpu_data[policy->cpu];
cpu->cpu = cpu_num;
ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps);
if (ret) {
pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
cpu_num, ret);
return ret;
}
/* Convert the lowest and nominal freq from MHz to KHz */
cpu->perf_caps.lowest_freq *= 1000;
cpu->perf_caps.nominal_freq *= 1000;
cpufreq: CPPC: Force reporting values in KHz to fix user space interface When CPPC is being used by ACPI on arm64, user space tools such as cpupower report CPU frequency values from sysfs that are incorrect. What the driver was doing was reporting the values given by ACPI tables in whatever scale was used to provide them. However, the ACPI spec defines the CPPC values as unitless abstract numbers. Internal kernel structures such as struct perf_cap, in contrast, expect these values to be in KHz. When these struct values get reported via sysfs, the user space tools also assume they are in KHz, causing them to report incorrect values (for example, reporting a CPU frequency of 1MHz when it should be 1.8GHz). The downside is that this approach has some assumptions: (1) It relies on SMBIOS3 being used, *and* that the Max Frequency value for a processor is set to a non-zero value. (2) It assumes that all processors run at the same speed, or that the CPPC values have all been scaled to reflect relative speed. This patch retrieves the largest CPU Max Frequency from a type 4 DMI record that it can find. This may not be an issue, however, as a sampling of DMI data on x86 and arm64 indicates there is often only one such record regardless. Since CPPC is relatively new, it is unclear if the ACPI ASL will always be written to reflect any sort of relative performance of processors of differing speeds. (3) It assumes that performance and frequency both scale linearly. For arm64 servers, this may be sufficient, but it does rely on firmware values being set correctly. Hence, other approaches will be considered in the future. This has been tested on three arm64 servers, with and without DMI, with and without CPPC support. Signed-off-by: Al Stone <ahs3@redhat.com> Signed-off-by: Prashanth Prakash <pprakash@codeaurora.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-07-21 00:10:04 +03:00
/*
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
cpufreq: CPPC: add SW BOOST support To add SW BOOST support for CPPC, we need to get the max frequency of boost mode and non-boost mode. ACPI spec 6.2 section 8.4.7.1 describes the following two CPC registers. "Highest performance is the absolute maximum performance an individual processor may reach, assuming ideal conditions. This performance level may not be sustainable for long durations, and may only be achievable if other platform components are in a specific state; for example, it may require other processors be in an idle state. Nominal Performance is the maximum sustained performance level of the processor, assuming ideal operating conditions. In absence of an external constraint (power, thermal, etc.) this is the performance level the platform is expected to be able to maintain continuously. All processors are expected to be able to sustain their nominal performance state simultaneously." To add SW BOOST support for CPPC, we can use Highest Performance as the max performance in boost mode and Nominal Performance as the max performance in non-boost mode. If the Highest Performance is greater than the Nominal Performance, we assume SW BOOST is supported. The current CPPC driver does not support SW BOOST and use 'Highest Performance' as the max performance the CPU can achieve. 'Nominal Performance' is used to convert 'performance' to 'frequency'. That means, if firmware enable boost and provide a value for Highest Performance which is greater than Nominal Performance, boost feature is enabled by default. Because SW BOOST is disabled by default, so, after this patch, boost feature is disabled by default even if boost is enabled by firmware. Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com> Suggested-by: Viresh Kumar <viresh.kumar@linaro.org> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> [ rjw: Subject ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-05-30 05:08:31 +03:00
policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
* available if userspace wants to use any perf between lowest & lowest
* nonlinear perf
*/
policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
cpufreq: CPPC: add SW BOOST support To add SW BOOST support for CPPC, we need to get the max frequency of boost mode and non-boost mode. ACPI spec 6.2 section 8.4.7.1 describes the following two CPC registers. "Highest performance is the absolute maximum performance an individual processor may reach, assuming ideal conditions. This performance level may not be sustainable for long durations, and may only be achievable if other platform components are in a specific state; for example, it may require other processors be in an idle state. Nominal Performance is the maximum sustained performance level of the processor, assuming ideal operating conditions. In absence of an external constraint (power, thermal, etc.) this is the performance level the platform is expected to be able to maintain continuously. All processors are expected to be able to sustain their nominal performance state simultaneously." To add SW BOOST support for CPPC, we can use Highest Performance as the max performance in boost mode and Nominal Performance as the max performance in non-boost mode. If the Highest Performance is greater than the Nominal Performance, we assume SW BOOST is supported. The current CPPC driver does not support SW BOOST and use 'Highest Performance' as the max performance the CPU can achieve. 'Nominal Performance' is used to convert 'performance' to 'frequency'. That means, if firmware enable boost and provide a value for Highest Performance which is greater than Nominal Performance, boost feature is enabled by default. Because SW BOOST is disabled by default, so, after this patch, boost feature is disabled by default even if boost is enabled by firmware. Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com> Suggested-by: Viresh Kumar <viresh.kumar@linaro.org> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> [ rjw: Subject ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-05-30 05:08:31 +03:00
policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf);
cpufreq / CPPC: Set platform specific transition_delay_us Add support to specify platform specific transition_delay_us instead of using the transition delay derived from PCC. With commit 3d41386d556d (cpufreq: CPPC: Use transition_delay_us depending transition_latency) we are setting transition_delay_us directly and not applying the LATENCY_MULTIPLIER. Because of that, on Qualcomm Centriq we can end up with a very high rate of frequency change requests when using the schedutil governor (default rate_limit_us=10 compared to an earlier value of 10000). The PCC subspace describes the rate at which the platform can accept commands on the CPPC's PCC channel. This includes read and write command on the PCC channel that can be used for reasons other than frequency transitions. Moreover the same PCC subspace can be used by multiple freq domains and deriving transition_delay_us from it as we do now can be sub-optimal. Moreover if a platform does not use PCC for desired_perf register then there is no way to compute the transition latency or the delay_us. CPPC does not have a standard defined mechanism to get the transition rate or the latency at the moment. Given the above limitations, it is simpler to have a platform specific transition_delay_us and rely on PCC derived value only if a platform specific value is not available. Signed-off-by: Prashanth Prakash <pprakash@codeaurora.org> Cc: 4.14+ <stable@vger.kernel.org> # 4.14+ Fixes: 3d41386d556d (cpufreq: CPPC: Use transition_delay_us depending transition_latency) Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2018-04-27 20:35:27 +03:00
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
policy->shared_type = cpu->shared_type;
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
int i;
cpumask_copy(policy->cpus, cpu->shared_cpu_map);
for_each_cpu(i, policy->cpus) {
if (unlikely(i == policy->cpu))
continue;
memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
sizeof(cpu->perf_caps));
}
} else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
/* Support only SW_ANY for now. */
pr_debug("Unsupported CPU co-ord type\n");
return -EFAULT;
}
cpu->cur_policy = policy;
cpufreq: CPPC: add SW BOOST support To add SW BOOST support for CPPC, we need to get the max frequency of boost mode and non-boost mode. ACPI spec 6.2 section 8.4.7.1 describes the following two CPC registers. "Highest performance is the absolute maximum performance an individual processor may reach, assuming ideal conditions. This performance level may not be sustainable for long durations, and may only be achievable if other platform components are in a specific state; for example, it may require other processors be in an idle state. Nominal Performance is the maximum sustained performance level of the processor, assuming ideal operating conditions. In absence of an external constraint (power, thermal, etc.) this is the performance level the platform is expected to be able to maintain continuously. All processors are expected to be able to sustain their nominal performance state simultaneously." To add SW BOOST support for CPPC, we can use Highest Performance as the max performance in boost mode and Nominal Performance as the max performance in non-boost mode. If the Highest Performance is greater than the Nominal Performance, we assume SW BOOST is supported. The current CPPC driver does not support SW BOOST and use 'Highest Performance' as the max performance the CPU can achieve. 'Nominal Performance' is used to convert 'performance' to 'frequency'. That means, if firmware enable boost and provide a value for Highest Performance which is greater than Nominal Performance, boost feature is enabled by default. Because SW BOOST is disabled by default, so, after this patch, boost feature is disabled by default even if boost is enabled by firmware. Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com> Suggested-by: Viresh Kumar <viresh.kumar@linaro.org> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> [ rjw: Subject ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-05-30 05:08:31 +03:00
/*
* If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
* is supported.
*/
if (cpu->perf_caps.highest_perf > cpu->perf_caps.nominal_perf)
boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
policy->cur = cppc_cpufreq_perf_to_khz(cpu,
cpu->perf_caps.highest_perf);
cpufreq: CPPC: Force reporting values in KHz to fix user space interface When CPPC is being used by ACPI on arm64, user space tools such as cpupower report CPU frequency values from sysfs that are incorrect. What the driver was doing was reporting the values given by ACPI tables in whatever scale was used to provide them. However, the ACPI spec defines the CPPC values as unitless abstract numbers. Internal kernel structures such as struct perf_cap, in contrast, expect these values to be in KHz. When these struct values get reported via sysfs, the user space tools also assume they are in KHz, causing them to report incorrect values (for example, reporting a CPU frequency of 1MHz when it should be 1.8GHz). The downside is that this approach has some assumptions: (1) It relies on SMBIOS3 being used, *and* that the Max Frequency value for a processor is set to a non-zero value. (2) It assumes that all processors run at the same speed, or that the CPPC values have all been scaled to reflect relative speed. This patch retrieves the largest CPU Max Frequency from a type 4 DMI record that it can find. This may not be an issue, however, as a sampling of DMI data on x86 and arm64 indicates there is often only one such record regardless. Since CPPC is relatively new, it is unclear if the ACPI ASL will always be written to reflect any sort of relative performance of processors of differing speeds. (3) It assumes that performance and frequency both scale linearly. For arm64 servers, this may be sufficient, but it does rely on firmware values being set correctly. Hence, other approaches will be considered in the future. This has been tested on three arm64 servers, with and without DMI, with and without CPPC support. Signed-off-by: Al Stone <ahs3@redhat.com> Signed-off-by: Prashanth Prakash <pprakash@codeaurora.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-07-21 00:10:04 +03:00
cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
if (ret)
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
cpu->perf_caps.highest_perf, cpu_num, ret);
return ret;
}
static inline u64 get_delta(u64 t1, u64 t0)
{
if (t1 > t0 || t0 > ~(u32)0)
return t1 - t0;
return (u32)t1 - (u32)t0;
}
static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu,
struct cppc_perf_fb_ctrs fb_ctrs_t0,
struct cppc_perf_fb_ctrs fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
u64 reference_perf, delivered_perf;
reference_perf = fb_ctrs_t0.reference_perf;
delta_reference = get_delta(fb_ctrs_t1.reference,
fb_ctrs_t0.reference);
delta_delivered = get_delta(fb_ctrs_t1.delivered,
fb_ctrs_t0.delivered);
/* Check to avoid divide-by zero */
if (delta_reference || delta_delivered)
delivered_perf = (reference_perf * delta_delivered) /
delta_reference;
else
delivered_perf = cpu->perf_ctrls.desired_perf;
return cppc_cpufreq_perf_to_khz(cpu, delivered_perf);
}
static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cppc_cpudata *cpu = all_cpu_data[cpunum];
int ret;
if (apply_hisi_workaround)
return hisi_cppc_cpufreq_get_rate(cpunum);
ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
if (ret)
return ret;
udelay(2); /* 2usec delay between sampling */
ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1);
if (ret)
return ret;
return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
}
cpufreq: CPPC: add SW BOOST support To add SW BOOST support for CPPC, we need to get the max frequency of boost mode and non-boost mode. ACPI spec 6.2 section 8.4.7.1 describes the following two CPC registers. "Highest performance is the absolute maximum performance an individual processor may reach, assuming ideal conditions. This performance level may not be sustainable for long durations, and may only be achievable if other platform components are in a specific state; for example, it may require other processors be in an idle state. Nominal Performance is the maximum sustained performance level of the processor, assuming ideal operating conditions. In absence of an external constraint (power, thermal, etc.) this is the performance level the platform is expected to be able to maintain continuously. All processors are expected to be able to sustain their nominal performance state simultaneously." To add SW BOOST support for CPPC, we can use Highest Performance as the max performance in boost mode and Nominal Performance as the max performance in non-boost mode. If the Highest Performance is greater than the Nominal Performance, we assume SW BOOST is supported. The current CPPC driver does not support SW BOOST and use 'Highest Performance' as the max performance the CPU can achieve. 'Nominal Performance' is used to convert 'performance' to 'frequency'. That means, if firmware enable boost and provide a value for Highest Performance which is greater than Nominal Performance, boost feature is enabled by default. Because SW BOOST is disabled by default, so, after this patch, boost feature is disabled by default even if boost is enabled by firmware. Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com> Suggested-by: Viresh Kumar <viresh.kumar@linaro.org> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> [ rjw: Subject ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-05-30 05:08:31 +03:00
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
{
struct cppc_cpudata *cpudata;
int ret;
if (!boost_supported) {
pr_err("BOOST not supported by CPU or firmware\n");
return -EINVAL;
}
cpudata = all_cpu_data[policy->cpu];
if (state)
policy->max = cppc_cpufreq_perf_to_khz(cpudata,
cpudata->perf_caps.highest_perf);
else
policy->max = cppc_cpufreq_perf_to_khz(cpudata,
cpudata->perf_caps.nominal_perf);
policy->cpuinfo.max_freq = policy->max;
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
return ret;
return 0;
}
static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target,
.get = cppc_cpufreq_get_rate,
.init = cppc_cpufreq_cpu_init,
.stop_cpu = cppc_cpufreq_stop_cpu,
cpufreq: CPPC: add SW BOOST support To add SW BOOST support for CPPC, we need to get the max frequency of boost mode and non-boost mode. ACPI spec 6.2 section 8.4.7.1 describes the following two CPC registers. "Highest performance is the absolute maximum performance an individual processor may reach, assuming ideal conditions. This performance level may not be sustainable for long durations, and may only be achievable if other platform components are in a specific state; for example, it may require other processors be in an idle state. Nominal Performance is the maximum sustained performance level of the processor, assuming ideal operating conditions. In absence of an external constraint (power, thermal, etc.) this is the performance level the platform is expected to be able to maintain continuously. All processors are expected to be able to sustain their nominal performance state simultaneously." To add SW BOOST support for CPPC, we can use Highest Performance as the max performance in boost mode and Nominal Performance as the max performance in non-boost mode. If the Highest Performance is greater than the Nominal Performance, we assume SW BOOST is supported. The current CPPC driver does not support SW BOOST and use 'Highest Performance' as the max performance the CPU can achieve. 'Nominal Performance' is used to convert 'performance' to 'frequency'. That means, if firmware enable boost and provide a value for Highest Performance which is greater than Nominal Performance, boost feature is enabled by default. Because SW BOOST is disabled by default, so, after this patch, boost feature is disabled by default even if boost is enabled by firmware. Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com> Suggested-by: Viresh Kumar <viresh.kumar@linaro.org> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> [ rjw: Subject ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-05-30 05:08:31 +03:00
.set_boost = cppc_cpufreq_set_boost,
.name = "cppc_cpufreq",
};
static int __init cppc_cpufreq_init(void)
{
int i, ret = 0;
struct cppc_cpudata *cpu;
if (acpi_disabled)
return -ENODEV;
treewide: kzalloc() -> kcalloc() The kzalloc() function has a 2-factor argument form, kcalloc(). This patch replaces cases of: kzalloc(a * b, gfp) with: kcalloc(a * b, gfp) as well as handling cases of: kzalloc(a * b * c, gfp) with: kzalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kzalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kzalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kzalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kzalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kzalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(char) * COUNT + COUNT , ...) | kzalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kzalloc + kcalloc ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kzalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kzalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kzalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kzalloc(C1 * C2 * C3, ...) | kzalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kzalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kzalloc(sizeof(THING) * C2, ...) | kzalloc(sizeof(TYPE) * C2, ...) | kzalloc(C1 * C2 * C3, ...) | kzalloc(C1 * C2, ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - (E1) * E2 + E1, E2 , ...) | - kzalloc + kcalloc ( - (E1) * (E2) + E1, E2 , ...) | - kzalloc + kcalloc ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *),
GFP_KERNEL);
if (!all_cpu_data)
return -ENOMEM;
for_each_possible_cpu(i) {
all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
if (!all_cpu_data[i])
goto out;
cpu = all_cpu_data[i];
if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL))
goto out;
}
ret = acpi_get_psd_map(all_cpu_data);
if (ret) {
pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
goto out;
}
cppc_check_hisi_workaround();
ret = cpufreq_register_driver(&cppc_cpufreq_driver);
if (ret)
goto out;
return ret;
out:
for_each_possible_cpu(i) {
cpu = all_cpu_data[i];
if (!cpu)
break;
free_cpumask_var(cpu->shared_cpu_map);
kfree(cpu);
}
kfree(all_cpu_data);
return -ENODEV;
}
static void __exit cppc_cpufreq_exit(void)
{
struct cppc_cpudata *cpu;
int i;
cpufreq_unregister_driver(&cppc_cpufreq_driver);
for_each_possible_cpu(i) {
cpu = all_cpu_data[i];
free_cpumask_var(cpu->shared_cpu_map);
kfree(cpu);
}
kfree(all_cpu_data);
}
module_exit(cppc_cpufreq_exit);
MODULE_AUTHOR("Ashwin Chaugule");
MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
MODULE_LICENSE("GPL");
late_initcall(cppc_cpufreq_init);
static const struct acpi_device_id cppc_acpi_ids[] __used = {
{ACPI_PROCESSOR_DEVICE_HID, },
{}
};
MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);