2019-06-01 11:08:55 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2015-10-02 17:01:19 +03:00
/*
* CPPC ( Collaborative Processor Performance Control ) methods used
* by CPUfreq drivers .
*
* ( C ) Copyright 2014 , 2015 Linaro Ltd .
* Author : Ashwin Chaugule < ashwin . chaugule @ linaro . org >
*/
# ifndef _CPPC_ACPI_H
# define _CPPC_ACPI_H
# include <linux/acpi.h>
2021-03-16 18:54:03 +03:00
# include <linux/cpufreq.h>
2015-10-02 17:01:19 +03:00
# include <linux/types.h>
2016-06-17 00:09:38 +03:00
# include <acpi/pcc.h>
2015-10-02 17:01:19 +03:00
# include <acpi/processor.h>
2018-04-04 21:14:50 +03:00
/* Support CPPCv2 and CPPCv3 */
# define CPPC_V2_REV 2
# define CPPC_V3_REV 3
# define CPPC_V2_NUM_ENT 21
# define CPPC_V3_NUM_ENT 23
2015-10-02 17:01:19 +03:00
2016-08-16 23:39:44 +03:00
# define PCC_CMD_COMPLETE_MASK (1 << 0)
# define PCC_ERROR_MASK (1 << 2)
2018-04-04 21:14:50 +03:00
# define MAX_CPC_REG_ENT 21
2015-10-02 17:01:19 +03:00
/* CPPC specific PCC commands. */
# define CMD_READ 0
# define CMD_WRITE 1
/* Each register has the folowing format. */
struct cpc_reg {
u8 descriptor ;
u16 length ;
u8 space_id ;
u8 bit_width ;
u8 bit_offset ;
u8 access_width ;
2021-01-07 14:17:15 +03:00
u64 address ;
2015-10-02 17:01:19 +03:00
} __packed ;
/*
* Each entry in the CPC table is either
* of type ACPI_TYPE_BUFFER or
* ACPI_TYPE_INTEGER .
*/
struct cpc_register_resource {
acpi_object_type type ;
2016-08-16 23:39:38 +03:00
u64 __iomem * sys_mem_vaddr ;
2015-10-02 17:01:19 +03:00
union {
struct cpc_reg reg ;
u64 int_value ;
} cpc_entry ;
} ;
/* Container to hold the CPC details for each CPU */
struct cpc_desc {
int num_entries ;
int version ;
int cpu_id ;
2016-08-16 23:39:40 +03:00
int write_cmd_status ;
int write_cmd_id ;
2015-10-02 17:01:19 +03:00
struct cpc_register_resource cpc_regs [ MAX_CPC_REG_ENT ] ;
struct acpi_psd_package domain_info ;
2016-08-16 23:39:42 +03:00
struct kobject kobj ;
2015-10-02 17:01:19 +03:00
} ;
/* These are indexes into the per-cpu cpc_regs[]. Order is important. */
enum cppc_regs {
HIGHEST_PERF ,
NOMINAL_PERF ,
LOW_NON_LINEAR_PERF ,
LOWEST_PERF ,
GUARANTEED_PERF ,
DESIRED_PERF ,
MIN_PERF ,
MAX_PERF ,
PERF_REDUC_TOLERANCE ,
TIME_WINDOW ,
CTR_WRAP_TIME ,
REFERENCE_CTR ,
DELIVERED_CTR ,
PERF_LIMITED ,
ENABLE ,
AUTO_SEL_ENABLE ,
AUTO_ACT_WINDOW ,
ENERGY_PERF ,
REFERENCE_PERF ,
2018-04-04 21:14:50 +03:00
LOWEST_FREQ ,
NOMINAL_FREQ ,
2015-10-02 17:01:19 +03:00
} ;
/*
* Categorization of registers as described
* in the ACPI v .5 .1 spec .
* XXX : Only filling up ones which are used by governors
* today .
*/
struct cppc_perf_caps {
2018-10-15 20:37:19 +03:00
u32 guaranteed_perf ;
2015-10-02 17:01:19 +03:00
u32 highest_perf ;
u32 nominal_perf ;
u32 lowest_perf ;
2017-03-29 22:49:59 +03:00
u32 lowest_nonlinear_perf ;
2018-04-04 21:14:50 +03:00
u32 lowest_freq ;
u32 nominal_freq ;
2015-10-02 17:01:19 +03:00
} ;
struct cppc_perf_ctrls {
u32 max_perf ;
u32 min_perf ;
u32 desired_perf ;
} ;
struct cppc_perf_fb_ctrs {
u64 reference ;
u64 delivered ;
2016-08-16 23:39:42 +03:00
u64 reference_perf ;
2017-03-29 22:50:00 +03:00
u64 wraparound_time ;
2015-10-02 17:01:19 +03:00
} ;
/* Per CPU container for runtime CPPC management. */
2016-09-01 23:37:11 +03:00
struct cppc_cpudata {
cppc_cpufreq: replace per-cpu data array with a list
The cppc_cpudata per-cpu storage was inefficient (1) additional to causing
functional issues (2) when CPUs are hotplugged out, due to per-cpu data
being improperly initialised.
(1) The amount of information needed for CPPC performance control in its
cpufreq driver depends on the domain (PSD) coordination type:
ANY: One set of CPPC control and capability data (e.g desired
performance, highest/lowest performance, etc) applies to all
CPUs in the domain.
ALL: Same as ANY. To be noted that this type is not currently
supported. When supported, information about which CPUs
belong to a domain is needed in order for frequency change
requests to be sent to each of them.
HW: It's necessary to store CPPC control and capability
information for all the CPUs. HW will then coordinate the
performance state based on their limitations and requests.
NONE: Same as HW. No HW coordination is expected.
Despite this, the previous initialisation code would indiscriminately
allocate memory for all CPUs (all_cpu_data) and unnecessarily
duplicate performance capabilities and the domain sharing mask and type
for each possible CPU.
(2) With the current per-cpu structure, when having ANY coordination,
the cppc_cpudata cpu information is not initialised (will remain 0)
for all CPUs in a policy, other than policy->cpu. When policy->cpu is
hotplugged out, the driver will incorrectly use the uninitialised (0)
value of the other CPUs when making frequency changes. Additionally,
the previous values stored in the perf_ctrls.desired_perf will be
lost when policy->cpu changes.
Therefore replace the array of per cpu data with a list. The memory for
each structure is allocated at policy init, where a single structure
can be allocated per policy, not per cpu. In order to accommodate the
struct list_head node in the cppc_cpudata structure, the now unused cpu
and cur_policy variables are removed.
For example, on a arm64 Juno platform with 6 CPUs: (0, 1, 2, 3) in PSD1,
(4, 5) in PSD2 - ANY coordination, the memory allocation comparison shows:
Before patch:
- ANY coordination:
total slack req alloc/free caller
0 0 0 0/1 _kernel_size_le_hi32+0x0xffff800008ff7810
0 0 0 0/6 _kernel_size_le_hi32+0x0xffff800008ff7808
128 80 48 1/0 _kernel_size_le_hi32+0x0xffff800008ffc070
768 0 768 6/0 _kernel_size_le_hi32+0x0xffff800008ffc0e4
After patch:
- ANY coordination:
total slack req alloc/free caller
256 0 256 2/0 _kernel_size_le_hi32+0x0xffff800008fed410
0 0 0 0/2 _kernel_size_le_hi32+0x0xffff800008fed274
Additional notes:
- A pointer to the policy's cppc_cpudata is stored in policy->driver_data
- Driver registration is skipped if _CPC entries are not present.
Signed-off-by: Ionela Voinescu <ionela.voinescu@arm.com>
Tested-by: Mian Yousaf Kaukab <ykaukab@suse.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-12-14 15:38:23 +03:00
struct list_head node ;
2015-10-02 17:01:19 +03:00
struct cppc_perf_caps perf_caps ;
struct cppc_perf_ctrls perf_ctrls ;
struct cppc_perf_fb_ctrs perf_fb_ctrs ;
unsigned int shared_type ;
cpumask_var_t shared_cpu_map ;
} ;
2021-03-16 18:54:03 +03:00
# ifdef CONFIG_ACPI_CPPC_LIB
2019-02-17 06:54:14 +03:00
extern int cppc_get_desired_perf ( int cpunum , u64 * desired_perf ) ;
2021-09-04 16:51:45 +03:00
extern int cppc_get_nominal_perf ( int cpunum , u64 * nominal_perf ) ;
2015-10-02 17:01:19 +03:00
extern int cppc_get_perf_ctrs ( int cpu , struct cppc_perf_fb_ctrs * perf_fb_ctrs ) ;
extern int cppc_set_perf ( int cpu , struct cppc_perf_ctrls * perf_ctrls ) ;
2021-12-24 04:04:59 +03:00
extern int cppc_set_enable ( int cpu , bool enable ) ;
2015-10-02 17:01:19 +03:00
extern int cppc_get_perf_caps ( int cpu , struct cppc_perf_caps * caps ) ;
cppc_cpufreq: replace per-cpu data array with a list
The cppc_cpudata per-cpu storage was inefficient (1) additional to causing
functional issues (2) when CPUs are hotplugged out, due to per-cpu data
being improperly initialised.
(1) The amount of information needed for CPPC performance control in its
cpufreq driver depends on the domain (PSD) coordination type:
ANY: One set of CPPC control and capability data (e.g desired
performance, highest/lowest performance, etc) applies to all
CPUs in the domain.
ALL: Same as ANY. To be noted that this type is not currently
supported. When supported, information about which CPUs
belong to a domain is needed in order for frequency change
requests to be sent to each of them.
HW: It's necessary to store CPPC control and capability
information for all the CPUs. HW will then coordinate the
performance state based on their limitations and requests.
NONE: Same as HW. No HW coordination is expected.
Despite this, the previous initialisation code would indiscriminately
allocate memory for all CPUs (all_cpu_data) and unnecessarily
duplicate performance capabilities and the domain sharing mask and type
for each possible CPU.
(2) With the current per-cpu structure, when having ANY coordination,
the cppc_cpudata cpu information is not initialised (will remain 0)
for all CPUs in a policy, other than policy->cpu. When policy->cpu is
hotplugged out, the driver will incorrectly use the uninitialised (0)
value of the other CPUs when making frequency changes. Additionally,
the previous values stored in the perf_ctrls.desired_perf will be
lost when policy->cpu changes.
Therefore replace the array of per cpu data with a list. The memory for
each structure is allocated at policy init, where a single structure
can be allocated per policy, not per cpu. In order to accommodate the
struct list_head node in the cppc_cpudata structure, the now unused cpu
and cur_policy variables are removed.
For example, on a arm64 Juno platform with 6 CPUs: (0, 1, 2, 3) in PSD1,
(4, 5) in PSD2 - ANY coordination, the memory allocation comparison shows:
Before patch:
- ANY coordination:
total slack req alloc/free caller
0 0 0 0/1 _kernel_size_le_hi32+0x0xffff800008ff7810
0 0 0 0/6 _kernel_size_le_hi32+0x0xffff800008ff7808
128 80 48 1/0 _kernel_size_le_hi32+0x0xffff800008ffc070
768 0 768 6/0 _kernel_size_le_hi32+0x0xffff800008ffc0e4
After patch:
- ANY coordination:
total slack req alloc/free caller
256 0 256 2/0 _kernel_size_le_hi32+0x0xffff800008fed410
0 0 0 0/2 _kernel_size_le_hi32+0x0xffff800008fed274
Additional notes:
- A pointer to the policy's cppc_cpudata is stored in policy->driver_data
- Driver registration is skipped if _CPC entries are not present.
Signed-off-by: Ionela Voinescu <ionela.voinescu@arm.com>
Tested-by: Mian Yousaf Kaukab <ykaukab@suse.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-12-14 15:38:23 +03:00
extern bool acpi_cpc_valid ( void ) ;
extern int acpi_get_psd_map ( unsigned int cpu , struct cppc_cpudata * cpu_data ) ;
2016-08-16 23:39:41 +03:00
extern unsigned int cppc_get_transition_latency ( int cpu ) ;
2018-12-05 02:34:56 +03:00
extern bool cpc_ffh_supported ( void ) ;
extern int cpc_read_ffh ( int cpunum , struct cpc_reg * reg , u64 * val ) ;
extern int cpc_write_ffh ( int cpunum , struct cpc_reg * reg , u64 val ) ;
2021-03-16 18:54:03 +03:00
# else /* !CONFIG_ACPI_CPPC_LIB */
static inline int cppc_get_desired_perf ( int cpunum , u64 * desired_perf )
{
return - ENOTSUPP ;
}
2021-09-04 16:51:45 +03:00
static inline int cppc_get_nominal_perf ( int cpunum , u64 * nominal_perf )
{
return - ENOTSUPP ;
}
2021-03-16 18:54:03 +03:00
static inline int cppc_get_perf_ctrs ( int cpu , struct cppc_perf_fb_ctrs * perf_fb_ctrs )
{
return - ENOTSUPP ;
}
static inline int cppc_set_perf ( int cpu , struct cppc_perf_ctrls * perf_ctrls )
{
return - ENOTSUPP ;
}
2021-12-24 04:04:59 +03:00
static inline int cppc_set_enable ( int cpu , bool enable )
{
return - ENOTSUPP ;
}
2021-03-16 18:54:03 +03:00
static inline int cppc_get_perf_caps ( int cpu , struct cppc_perf_caps * caps )
{
return - ENOTSUPP ;
}
static inline bool acpi_cpc_valid ( void )
{
return false ;
}
static inline unsigned int cppc_get_transition_latency ( int cpu )
{
return CPUFREQ_ETERNAL ;
}
static inline bool cpc_ffh_supported ( void )
{
return false ;
}
static inline int cpc_read_ffh ( int cpunum , struct cpc_reg * reg , u64 * val )
{
return - ENOTSUPP ;
}
static inline int cpc_write_ffh ( int cpunum , struct cpc_reg * reg , u64 val )
{
return - ENOTSUPP ;
}
# endif /* !CONFIG_ACPI_CPPC_LIB */
2015-10-02 17:01:19 +03:00
# endif /* _CPPC_ACPI_H*/