2019-05-27 09:55:06 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-17 02:20:36 +04:00
/*
* processor_thermal . c - Passive cooling submodule of the ACPI processor driver
*
* Copyright ( C ) 2001 , 2002 Andy Grover < andrew . grover @ intel . com >
* Copyright ( C ) 2001 , 2002 Paul Diefenbaugh < paul . s . diefenbaugh @ intel . com >
* Copyright ( C ) 2004 Dominik Brodowski < linux @ brodo . de >
* Copyright ( C ) 2004 Anil S Keshavamurthy < anil . s . keshavamurthy @ intel . com >
* - Added processor hotplug support
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/cpufreq.h>
2013-12-03 04:49:16 +04:00
# include <linux/acpi.h>
2005-04-17 02:20:36 +04:00
# include <acpi/processor.h>
2016-12-24 22:46:01 +03:00
# include <linux/uaccess.h>
2005-04-17 02:20:36 +04:00
2009-07-29 00:45:54 +04:00
# define PREFIX "ACPI: "
2005-04-17 02:20:36 +04:00
# define ACPI_PROCESSOR_CLASS "processor"
# define _COMPONENT ACPI_PROCESSOR_COMPONENT
2007-02-13 06:42:12 +03:00
ACPI_MODULE_NAME ( " processor_thermal " ) ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_CPU_FREQ
/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
* offers ( in most cases ) voltage scaling in addition to frequency scaling , and
* thus a cubic ( instead of linear ) reduction of energy . Also , we allow for
* _any_ cpufreq driver and not only the acpi - cpufreq driver .
*/
2008-01-17 10:51:23 +03:00
# define CPUFREQ_THERMAL_MIN_STEP 0
# define CPUFREQ_THERMAL_MAX_STEP 3
2008-03-05 19:31:29 +03:00
static DEFINE_PER_CPU ( unsigned int , cpufreq_thermal_reduction_pctg ) ;
2005-04-17 02:20:36 +04:00
static unsigned int acpi_thermal_cpufreq_is_init = 0 ;
2012-02-06 20:17:11 +04:00
# define reduction_pctg(cpu) \
per_cpu ( cpufreq_thermal_reduction_pctg , phys_package_first_cpu ( cpu ) )
/*
* Emulate " per package data " using per cpu data ( which should really be
* provided elsewhere )
*
* Note we can lose a CPU on cpu hotunplug , in this case we forget the state
* temporarily . Fortunately that ' s not a big issue here ( I hope )
*/
static int phys_package_first_cpu ( int cpu )
{
int i ;
int id = topology_physical_package_id ( cpu ) ;
for_each_online_cpu ( i )
if ( topology_physical_package_id ( i ) = = id )
return i ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
static int cpu_has_cpufreq ( unsigned int cpu )
{
struct cpufreq_policy policy ;
2004-09-16 19:07:00 +04:00
if ( ! acpi_thermal_cpufreq_is_init | | cpufreq_get_policy ( & policy , cpu ) )
2005-12-21 09:29:00 +03:00
return 0 ;
return 1 ;
2005-04-17 02:20:36 +04:00
}
2005-08-05 08:44:28 +04:00
static int acpi_thermal_cpufreq_notifier ( struct notifier_block * nb ,
unsigned long event , void * data )
2005-04-17 02:20:36 +04:00
{
struct cpufreq_policy * policy = data ;
unsigned long max_freq = 0 ;
if ( event ! = CPUFREQ_ADJUST )
goto out ;
2008-03-05 19:31:29 +03:00
max_freq = (
policy - > cpuinfo . max_freq *
2012-02-06 20:17:11 +04:00
( 100 - reduction_pctg ( policy - > cpu ) * 20 )
2008-03-05 19:31:29 +03:00
) / 100 ;
2005-04-17 02:20:36 +04:00
cpufreq_verify_within_limits ( policy , 0 , max_freq ) ;
2005-08-05 08:44:28 +04:00
out :
2005-04-17 02:20:36 +04:00
return 0 ;
}
static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
. notifier_call = acpi_thermal_cpufreq_notifier ,
} ;
2008-01-17 10:51:23 +03:00
static int cpufreq_get_max_state ( unsigned int cpu )
{
if ( ! cpu_has_cpufreq ( cpu ) )
return 0 ;
return CPUFREQ_THERMAL_MAX_STEP ;
}
static int cpufreq_get_cur_state ( unsigned int cpu )
{
if ( ! cpu_has_cpufreq ( cpu ) )
return 0 ;
2012-02-06 20:17:11 +04:00
return reduction_pctg ( cpu ) ;
2008-01-17 10:51:23 +03:00
}
static int cpufreq_set_cur_state ( unsigned int cpu , int state )
{
2012-02-06 20:17:11 +04:00
int i ;
2008-01-17 10:51:23 +03:00
if ( ! cpu_has_cpufreq ( cpu ) )
return 0 ;
2012-02-06 20:17:11 +04:00
reduction_pctg ( cpu ) = state ;
/*
* Update all the CPUs in the same package because they all
* contribute to the temperature and often share the same
* frequency .
*/
for_each_online_cpu ( i ) {
if ( topology_physical_package_id ( i ) = =
topology_physical_package_id ( cpu ) )
cpufreq_update_policy ( i ) ;
}
2008-01-17 10:51:23 +03:00
return 0 ;
}
2005-08-05 08:44:28 +04:00
void acpi_thermal_cpufreq_init ( void )
{
2005-04-17 02:20:36 +04:00
int i ;
2005-08-05 08:44:28 +04:00
i = cpufreq_register_notifier ( & acpi_thermal_cpufreq_notifier_block ,
CPUFREQ_POLICY_NOTIFIER ) ;
2005-04-17 02:20:36 +04:00
if ( ! i )
acpi_thermal_cpufreq_is_init = 1 ;
}
2005-08-05 08:44:28 +04:00
void acpi_thermal_cpufreq_exit ( void )
{
2005-04-17 02:20:36 +04:00
if ( acpi_thermal_cpufreq_is_init )
2005-08-05 08:44:28 +04:00
cpufreq_unregister_notifier
( & acpi_thermal_cpufreq_notifier_block ,
CPUFREQ_POLICY_NOTIFIER ) ;
2005-04-17 02:20:36 +04:00
acpi_thermal_cpufreq_is_init = 0 ;
}
2005-08-05 08:44:28 +04:00
# else /* ! CONFIG_CPU_FREQ */
2008-01-17 10:51:23 +03:00
static int cpufreq_get_max_state ( unsigned int cpu )
{
return 0 ;
}
static int cpufreq_get_cur_state ( unsigned int cpu )
{
return 0 ;
}
static int cpufreq_set_cur_state ( unsigned int cpu , int state )
{
return 0 ;
}
2005-04-17 02:20:36 +04:00
# endif
2013-12-05 22:14:16 +04:00
/* thermal cooling device callbacks */
2008-01-17 10:51:23 +03:00
static int acpi_processor_max_state ( struct acpi_processor * pr )
{
int max_state = 0 ;
/*
* There exists four states according to
2013-12-05 22:14:16 +04:00
* cpufreq_thermal_reduction_pctg . 0 , 1 , 2 , 3
2008-01-17 10:51:23 +03:00
*/
max_state + = cpufreq_get_max_state ( pr - > id ) ;
if ( pr - > flags . throttling )
max_state + = ( pr - > throttling . state_count - 1 ) ;
return max_state ;
}
static int
2008-11-27 20:48:13 +03:00
processor_get_max_state ( struct thermal_cooling_device * cdev ,
unsigned long * state )
2008-01-17 10:51:23 +03:00
{
struct acpi_device * device = cdev - > devdata ;
2013-03-25 14:50:06 +04:00
struct acpi_processor * pr ;
2008-01-17 10:51:23 +03:00
2013-03-25 14:50:06 +04:00
if ( ! device )
return - EINVAL ;
pr = acpi_driver_data ( device ) ;
if ( ! pr )
2008-01-17 10:51:23 +03:00
return - EINVAL ;
2008-11-27 20:48:13 +03:00
* state = acpi_processor_max_state ( pr ) ;
return 0 ;
2008-01-17 10:51:23 +03:00
}
static int
2008-11-27 20:48:13 +03:00
processor_get_cur_state ( struct thermal_cooling_device * cdev ,
unsigned long * cur_state )
2008-01-17 10:51:23 +03:00
{
struct acpi_device * device = cdev - > devdata ;
2013-03-25 14:50:06 +04:00
struct acpi_processor * pr ;
2008-01-17 10:51:23 +03:00
2013-03-25 14:50:06 +04:00
if ( ! device )
return - EINVAL ;
pr = acpi_driver_data ( device ) ;
if ( ! pr )
2008-01-17 10:51:23 +03:00
return - EINVAL ;
2008-11-27 20:48:13 +03:00
* cur_state = cpufreq_get_cur_state ( pr - > id ) ;
2008-01-17 10:51:23 +03:00
if ( pr - > flags . throttling )
2008-11-27 20:48:13 +03:00
* cur_state + = pr - > throttling . state ;
return 0 ;
2008-01-17 10:51:23 +03:00
}
static int
2008-11-27 20:48:13 +03:00
processor_set_cur_state ( struct thermal_cooling_device * cdev ,
unsigned long state )
2008-01-17 10:51:23 +03:00
{
struct acpi_device * device = cdev - > devdata ;
2013-03-25 14:50:06 +04:00
struct acpi_processor * pr ;
2008-01-17 10:51:23 +03:00
int result = 0 ;
int max_pstate ;
2013-03-25 14:50:06 +04:00
if ( ! device )
return - EINVAL ;
pr = acpi_driver_data ( device ) ;
if ( ! pr )
2008-01-17 10:51:23 +03:00
return - EINVAL ;
max_pstate = cpufreq_get_max_state ( pr - > id ) ;
if ( state > acpi_processor_max_state ( pr ) )
return - EINVAL ;
if ( state < = max_pstate ) {
if ( pr - > flags . throttling & & pr - > throttling . state )
2009-08-27 01:29:29 +04:00
result = acpi_processor_set_throttling ( pr , 0 , false ) ;
2008-01-17 10:51:23 +03:00
cpufreq_set_cur_state ( pr - > id , state ) ;
} else {
cpufreq_set_cur_state ( pr - > id , max_pstate ) ;
result = acpi_processor_set_throttling ( pr ,
2009-08-27 01:29:29 +04:00
state - max_pstate , false ) ;
2008-01-17 10:51:23 +03:00
}
return result ;
}
2011-06-25 21:07:52 +04:00
const struct thermal_cooling_device_ops processor_cooling_ops = {
2008-01-17 10:51:23 +03:00
. get_max_state = processor_get_max_state ,
. get_cur_state = processor_get_cur_state ,
. set_cur_state = processor_set_cur_state ,
} ;