2019-06-01 11:08:55 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-10-02 17:01:19 +03:00
/*
* CPPC ( Collaborative Processor Performance Control ) methods used by CPUfreq drivers .
*
* ( C ) Copyright 2014 , 2015 Linaro Ltd .
* Author : Ashwin Chaugule < ashwin . chaugule @ linaro . org >
*
* CPPC describes a few methods for controlling CPU performance using
* information from a per CPU table called CPC . This table is described in
* the ACPI v5 .0 + specification . The table consists of a list of
* registers which may be memory mapped or hardware registers and also may
* include some static integer values .
*
* CPU performance is on an abstract continuous scale as against a discretized
* P - state scale which is tied to CPU frequency only . In brief , the basic
* operation involves :
*
* - OS makes a CPU performance request . ( Can provide min and max bounds )
*
* - Platform ( such as BMC ) is free to optimize request within requested bounds
* depending on power / thermal budgets etc .
*
* - Platform conveys its decision back to OS
*
* The communication between OS and platform occurs through another medium
* called ( PCC ) Platform Communication Channel . This is a generic mailbox like
* mechanism which includes doorbell semantics to indicate register updates .
* See drivers / mailbox / pcc . c for details on PCC .
*
* Finer details about the PCC and CPPC spec are available in the ACPI v5 .1 and
* above specifications .
*/
# define pr_fmt(fmt) "ACPI CPPC: " fmt
# include <linux/cpufreq.h>
# include <linux/delay.h>
2018-04-25 02:10:02 +03:00
# include <linux/iopoll.h>
2016-02-17 23:20:59 +03:00
# include <linux/ktime.h>
2016-08-16 23:39:40 +03:00
# include <linux/rwsem.h>
# include <linux/wait.h>
2015-10-02 17:01:19 +03:00
# include <acpi/cppc_acpi.h>
2016-08-16 23:39:40 +03:00
2016-08-16 23:39:43 +03:00
struct cppc_pcc_data {
struct mbox_chan * pcc_channel ;
void __iomem * pcc_comm_addr ;
bool pcc_channel_acquired ;
2018-04-25 02:10:02 +03:00
unsigned int deadline_us ;
2016-08-16 23:39:43 +03:00
unsigned int pcc_mpar , pcc_mrtt , pcc_nominal ;
2016-08-16 23:39:40 +03:00
2016-08-16 23:39:43 +03:00
bool pending_pcc_write_cmd ; /* Any pending/batched PCC write cmds? */
2016-08-16 23:39:44 +03:00
bool platform_owns_pcc ; /* Ownership of PCC subspace */
2016-08-16 23:39:43 +03:00
unsigned int pcc_write_cnt ; /* Running count of PCC write commands */
2016-08-16 23:39:40 +03:00
2016-08-16 23:39:43 +03:00
/*
* Lock to provide controlled access to the PCC channel .
*
* For performance critical usecases ( currently cppc_set_perf )
* We need to take read_lock and check if channel belongs to OSPM
* before reading or writing to PCC subspace
* We need to take write_lock before transferring the channel
* ownership to the platform via a Doorbell
* This allows us to batch a number of CPPC requests if they happen
* to originate in about the same time
*
* For non - performance critical usecases ( init )
* Take write_lock for all purposes which gives exclusive access
*/
struct rw_semaphore pcc_lock ;
/* Wait queue for CPUs whose requests were batched */
wait_queue_head_t pcc_write_wait_q ;
2017-10-11 11:54:58 +03:00
ktime_t last_cmd_cmpl_time ;
ktime_t last_mpar_reset ;
int mpar_count ;
int refcount ;
2016-08-16 23:39:43 +03:00
} ;
2016-08-16 23:39:40 +03:00
2019-03-25 21:34:00 +03:00
/* Array to represent the PCC channel per subspace ID */
2017-10-11 11:54:58 +03:00
static struct cppc_pcc_data * pcc_data [ MAX_PCC_SUBSPACES ] ;
2019-03-25 21:34:00 +03:00
/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
2017-10-11 11:54:58 +03:00
static DEFINE_PER_CPU ( int , cpu_pcc_subspace_idx ) ;
2015-10-02 17:01:19 +03:00
/*
* The cpc_desc structure contains the ACPI register details
* as described in the per CPU _CPC tables . The details
* include the type of register ( e . g . PCC , System IO , FFH etc . )
* and destination addresses which lets us READ / WRITE CPU performance
* information using the appropriate I / O methods .
*/
static DEFINE_PER_CPU ( struct cpc_desc * , cpc_desc_ptr ) ;
2016-02-17 23:21:00 +03:00
/* pcc mapped address + header size + offset within PCC subspace */
2017-10-11 11:54:58 +03:00
# define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
0x8 + ( offs ) )
2016-02-17 23:21:00 +03:00
2017-05-09 01:57:50 +03:00
/* Check if a CPC register is in PCC */
2016-08-16 23:39:40 +03:00
# define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
( cpc ) - > cpc_entry . reg . space_id = = \
ACPI_ADR_SPACE_PLATFORM_COMM )
2016-08-16 23:39:42 +03:00
/* Evalutes to True if reg is a NULL register descriptor */
# define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
( reg ) - > address = = 0 & & \
( reg ) - > bit_width = = 0 & & \
( reg ) - > bit_offset = = 0 & & \
( reg ) - > access_width = = 0 )
/* Evalutes to True if an optional cpc field is supported */
# define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
! ! ( cpc ) - > cpc_entry . int_value : \
! IS_NULL_REG ( & ( cpc ) - > cpc_entry . reg ) )
2015-10-02 17:01:19 +03:00
/*
* Arbitrary Retries in case the remote processor is slow to respond
2016-02-17 23:20:59 +03:00
* to PCC commands . Keeping it high enough to cover emulators where
* the processors run painfully slow .
2015-10-02 17:01:19 +03:00
*/
2018-02-07 02:36:17 +03:00
# define NUM_RETRIES 500ULL
2015-10-02 17:01:19 +03:00
2016-08-16 23:39:42 +03:00
struct cppc_attr {
struct attribute attr ;
ssize_t ( * show ) ( struct kobject * kobj ,
struct attribute * attr , char * buf ) ;
ssize_t ( * store ) ( struct kobject * kobj ,
struct attribute * attr , const char * c , ssize_t count ) ;
} ;
# define define_one_cppc_ro(_name) \
static struct cppc_attr _name = \
__ATTR ( _name , 0444 , show_ # # _name , NULL )
# define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
2017-03-29 22:50:00 +03:00
# define show_cppc_data(access_fn, struct_name, member_name) \
static ssize_t show_ # # member_name ( struct kobject * kobj , \
struct attribute * attr , char * buf ) \
{ \
struct cpc_desc * cpc_ptr = to_cpc_desc ( kobj ) ; \
struct struct_name st_name = { 0 } ; \
int ret ; \
\
ret = access_fn ( cpc_ptr - > cpu_id , & st_name ) ; \
if ( ret ) \
return ret ; \
\
return scnprintf ( buf , PAGE_SIZE , " %llu \n " , \
( u64 ) st_name . member_name ) ; \
} \
define_one_cppc_ro ( member_name )
show_cppc_data ( cppc_get_perf_caps , cppc_perf_caps , highest_perf ) ;
show_cppc_data ( cppc_get_perf_caps , cppc_perf_caps , lowest_perf ) ;
show_cppc_data ( cppc_get_perf_caps , cppc_perf_caps , nominal_perf ) ;
show_cppc_data ( cppc_get_perf_caps , cppc_perf_caps , lowest_nonlinear_perf ) ;
2018-04-04 21:14:50 +03:00
show_cppc_data ( cppc_get_perf_caps , cppc_perf_caps , lowest_freq ) ;
show_cppc_data ( cppc_get_perf_caps , cppc_perf_caps , nominal_freq ) ;
2017-03-29 22:50:00 +03:00
show_cppc_data ( cppc_get_perf_ctrs , cppc_perf_fb_ctrs , reference_perf ) ;
show_cppc_data ( cppc_get_perf_ctrs , cppc_perf_fb_ctrs , wraparound_time ) ;
2016-08-16 23:39:42 +03:00
static ssize_t show_feedback_ctrs ( struct kobject * kobj ,
struct attribute * attr , char * buf )
{
struct cpc_desc * cpc_ptr = to_cpc_desc ( kobj ) ;
struct cppc_perf_fb_ctrs fb_ctrs = { 0 } ;
2017-03-29 22:50:00 +03:00
int ret ;
2016-08-16 23:39:42 +03:00
2017-03-29 22:50:00 +03:00
ret = cppc_get_perf_ctrs ( cpc_ptr - > cpu_id , & fb_ctrs ) ;
if ( ret )
return ret ;
2016-08-16 23:39:42 +03:00
return scnprintf ( buf , PAGE_SIZE , " ref:%llu del:%llu \n " ,
fb_ctrs . reference , fb_ctrs . delivered ) ;
}
define_one_cppc_ro ( feedback_ctrs ) ;
static struct attribute * cppc_attrs [ ] = {
& feedback_ctrs . attr ,
& reference_perf . attr ,
& wraparound_time . attr ,
2017-03-29 22:50:00 +03:00
& highest_perf . attr ,
& lowest_perf . attr ,
& lowest_nonlinear_perf . attr ,
& nominal_perf . attr ,
2018-04-04 21:14:50 +03:00
& nominal_freq . attr ,
& lowest_freq . attr ,
2016-08-16 23:39:42 +03:00
NULL
} ;
static struct kobj_type cppc_ktype = {
. sysfs_ops = & kobj_sysfs_ops ,
. default_attrs = cppc_attrs ,
} ;
2017-10-11 11:54:58 +03:00
static int check_pcc_chan ( int pcc_ss_id , bool chk_err_bit )
2016-02-17 23:20:59 +03:00
{
2018-04-25 02:10:02 +03:00
int ret , status ;
2017-10-11 11:54:58 +03:00
struct cppc_pcc_data * pcc_ss_data = pcc_data [ pcc_ss_id ] ;
struct acpi_pcct_shared_memory __iomem * generic_comm_base =
pcc_ss_data - > pcc_comm_addr ;
2016-02-17 23:20:59 +03:00
2017-10-11 11:54:58 +03:00
if ( ! pcc_ss_data - > platform_owns_pcc )
2016-08-16 23:39:44 +03:00
return 0 ;
2018-04-25 02:10:02 +03:00
/*
* Poll PCC status register every 3u s ( delay_us ) for maximum of
* deadline_us ( timeout_us ) until PCC command complete bit is set ( cond )
*/
ret = readw_relaxed_poll_timeout ( & generic_comm_base - > status , status ,
status & PCC_CMD_COMPLETE_MASK , 3 ,
pcc_ss_data - > deadline_us ) ;
2016-02-17 23:20:59 +03:00
2018-04-25 02:10:02 +03:00
if ( likely ( ! ret ) ) {
2017-10-11 11:54:58 +03:00
pcc_ss_data - > platform_owns_pcc = false ;
2018-04-25 02:10:02 +03:00
if ( chk_err_bit & & ( status & PCC_ERROR_MASK ) )
ret = - EIO ;
}
if ( unlikely ( ret ) )
pr_err ( " PCC check channel failed for ss: %d. ret=%d \n " ,
pcc_ss_id , ret ) ;
2016-08-16 23:39:44 +03:00
2016-02-17 23:20:59 +03:00
return ret ;
}
2016-08-16 23:39:40 +03:00
/*
* This function transfers the ownership of the PCC to the platform
* So it must be called while holding write_lock ( pcc_lock )
*/
2017-10-11 11:54:58 +03:00
static int send_pcc_cmd ( int pcc_ss_id , u16 cmd )
2015-10-02 17:01:19 +03:00
{
2016-08-16 23:39:40 +03:00
int ret = - EIO , i ;
2017-10-11 11:54:58 +03:00
struct cppc_pcc_data * pcc_ss_data = pcc_data [ pcc_ss_id ] ;
2015-10-02 17:01:19 +03:00
struct acpi_pcct_shared_memory * generic_comm_base =
2017-10-11 11:54:58 +03:00
( struct acpi_pcct_shared_memory * ) pcc_ss_data - > pcc_comm_addr ;
2016-02-17 23:21:03 +03:00
unsigned int time_delta ;
2015-10-02 17:01:19 +03:00
2016-02-17 23:20:59 +03:00
/*
* For CMD_WRITE we know for a fact the caller should have checked
* the channel before writing to PCC space
*/
if ( cmd = = CMD_READ ) {
2016-08-16 23:39:40 +03:00
/*
* If there are pending cpc_writes , then we stole the channel
* before write completion , so first send a WRITE command to
* platform
*/
2017-10-11 11:54:58 +03:00
if ( pcc_ss_data - > pending_pcc_write_cmd )
send_pcc_cmd ( pcc_ss_id , CMD_WRITE ) ;
2016-08-16 23:39:40 +03:00
2017-10-11 11:54:58 +03:00
ret = check_pcc_chan ( pcc_ss_id , false ) ;
2016-02-17 23:20:59 +03:00
if ( ret )
2016-08-16 23:39:40 +03:00
goto end ;
} else /* CMD_WRITE */
2017-10-11 11:54:58 +03:00
pcc_ss_data - > pending_pcc_write_cmd = FALSE ;
2015-10-02 17:01:19 +03:00
2016-02-17 23:21:03 +03:00
/*
* Handle the Minimum Request Turnaround Time ( MRTT )
* " The minimum amount of time that OSPM must wait after the completion
* of a command before issuing the next command , in microseconds "
*/
2017-10-11 11:54:58 +03:00
if ( pcc_ss_data - > pcc_mrtt ) {
time_delta = ktime_us_delta ( ktime_get ( ) ,
pcc_ss_data - > last_cmd_cmpl_time ) ;
if ( pcc_ss_data - > pcc_mrtt > time_delta )
udelay ( pcc_ss_data - > pcc_mrtt - time_delta ) ;
2016-02-17 23:21:03 +03:00
}
/*
* Handle the non - zero Maximum Periodic Access Rate ( MPAR )
* " The maximum number of periodic requests that the subspace channel can
* support , reported in commands per minute . 0 indicates no limitation . "
*
* This parameter should be ideally zero or large enough so that it can
* handle maximum number of requests that all the cores in the system can
* collectively generate . If it is not , we will follow the spec and just
* not send the request to the platform after hitting the MPAR limit in
* any 60 s window
*/
2017-10-11 11:54:58 +03:00
if ( pcc_ss_data - > pcc_mpar ) {
if ( pcc_ss_data - > mpar_count = = 0 ) {
time_delta = ktime_ms_delta ( ktime_get ( ) ,
pcc_ss_data - > last_mpar_reset ) ;
if ( ( time_delta < 60 * MSEC_PER_SEC ) & & pcc_ss_data - > last_mpar_reset ) {
2018-02-20 14:16:03 +03:00
pr_debug ( " PCC cmd for subspace %d not sent due to MPAR limit " ,
pcc_ss_id ) ;
2016-08-16 23:39:40 +03:00
ret = - EIO ;
goto end ;
2016-02-17 23:21:03 +03:00
}
2017-10-11 11:54:58 +03:00
pcc_ss_data - > last_mpar_reset = ktime_get ( ) ;
pcc_ss_data - > mpar_count = pcc_ss_data - > pcc_mpar ;
2016-02-17 23:21:03 +03:00
}
2017-10-11 11:54:58 +03:00
pcc_ss_data - > mpar_count - - ;
2016-02-17 23:21:03 +03:00
}
2015-10-02 17:01:19 +03:00
/* Write to the shared comm region. */
2016-02-17 23:21:02 +03:00
writew_relaxed ( cmd , & generic_comm_base - > command ) ;
2015-10-02 17:01:19 +03:00
/* Flip CMD COMPLETE bit */
2016-02-17 23:21:02 +03:00
writew_relaxed ( 0 , & generic_comm_base - > status ) ;
2015-10-02 17:01:19 +03:00
2017-10-11 11:54:58 +03:00
pcc_ss_data - > platform_owns_pcc = true ;
2016-08-16 23:39:44 +03:00
2015-10-02 17:01:19 +03:00
/* Ring doorbell */
2017-10-11 11:54:58 +03:00
ret = mbox_send_message ( pcc_ss_data - > pcc_channel , & cmd ) ;
2016-02-17 23:20:59 +03:00
if ( ret < 0 ) {
2018-02-20 14:16:03 +03:00
pr_err ( " Err sending PCC mbox message. ss: %d cmd:%d, ret:%d \n " ,
pcc_ss_id , cmd , ret ) ;
2016-08-16 23:39:40 +03:00
goto end ;
2015-10-02 17:01:19 +03:00
}
2016-08-16 23:39:44 +03:00
/* wait for completion and check for PCC errro bit */
2017-10-11 11:54:58 +03:00
ret = check_pcc_chan ( pcc_ss_id , true ) ;
2016-08-16 23:39:44 +03:00
2017-10-11 11:54:58 +03:00
if ( pcc_ss_data - > pcc_mrtt )
pcc_ss_data - > last_cmd_cmpl_time = ktime_get ( ) ;
2015-10-02 17:01:19 +03:00
2017-10-11 11:54:58 +03:00
if ( pcc_ss_data - > pcc_channel - > mbox - > txdone_irq )
mbox_chan_txdone ( pcc_ss_data - > pcc_channel , ret ) ;
2016-09-14 20:54:58 +03:00
else
2017-10-11 11:54:58 +03:00
mbox_client_txdone ( pcc_ss_data - > pcc_channel , ret ) ;
2016-08-16 23:39:40 +03:00
end :
if ( cmd = = CMD_WRITE ) {
if ( unlikely ( ret ) ) {
for_each_possible_cpu ( i ) {
struct cpc_desc * desc = per_cpu ( cpc_desc_ptr , i ) ;
if ( ! desc )
continue ;
2017-10-11 11:54:58 +03:00
if ( desc - > write_cmd_id = = pcc_ss_data - > pcc_write_cnt )
2016-08-16 23:39:40 +03:00
desc - > write_cmd_status = ret ;
}
}
2017-10-11 11:54:58 +03:00
pcc_ss_data - > pcc_write_cnt + + ;
wake_up_all ( & pcc_ss_data - > pcc_write_wait_q ) ;
2016-08-16 23:39:40 +03:00
}
2016-02-17 23:20:59 +03:00
return ret ;
2015-10-02 17:01:19 +03:00
}
static void cppc_chan_tx_done ( struct mbox_client * cl , void * msg , int ret )
{
2016-02-17 23:20:59 +03:00
if ( ret < 0 )
2015-10-02 17:01:19 +03:00
pr_debug ( " TX did not complete: CMD sent:%x, ret:%d \n " ,
* ( u16 * ) msg , ret ) ;
else
pr_debug ( " TX completed. CMD sent:%x, ret:%d \n " ,
* ( u16 * ) msg , ret ) ;
}
2020-04-23 10:21:58 +03:00
static struct mbox_client cppc_mbox_cl = {
2015-10-02 17:01:19 +03:00
. tx_done = cppc_chan_tx_done ,
. knows_txdone = true ,
} ;
static int acpi_get_psd ( struct cpc_desc * cpc_ptr , acpi_handle handle )
{
int result = - EFAULT ;
acpi_status status = AE_OK ;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER , NULL } ;
struct acpi_buffer format = { sizeof ( " NNNNN " ) , " NNNNN " } ;
struct acpi_buffer state = { 0 , NULL } ;
union acpi_object * psd = NULL ;
struct acpi_psd_package * pdomain ;
ACPI / CPPC: do not require the _PSD method
According to the ACPI 6.3 specification, the _PSD method is optional
when using CPPC. The underlying assumption is that each CPU can change
frequency independently from all other CPUs; _PSD is provided to tell
the OS that some processors can NOT do that.
However, the acpi_get_psd() function returns ENODEV if there is no _PSD
method present, or an ACPI error status if an error occurs when evaluating
_PSD, if present. This makes _PSD mandatory when using CPPC, in violation
of the specification, and only on Linux.
This has forced some firmware writers to provide a dummy _PSD, even though
it is irrelevant, but only because Linux requires it; other OSPMs follow
the spec. We really do not want to have OS specific ACPI tables, though.
So, correct acpi_get_psd() so that it does not return an error if there
is no _PSD method present, but does return a failure when the method can
not be executed properly. This allows _PSD to be optional as it should
be.
Signed-off-by: Al Stone <ahs3@redhat.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2019-08-28 03:21:20 +03:00
status = acpi_evaluate_object_typed ( handle , " _PSD " , NULL ,
& buffer , ACPI_TYPE_PACKAGE ) ;
if ( status = = AE_NOT_FOUND ) /* _PSD is optional */
return 0 ;
2015-10-02 17:01:19 +03:00
if ( ACPI_FAILURE ( status ) )
return - ENODEV ;
psd = buffer . pointer ;
if ( ! psd | | psd - > package . count ! = 1 ) {
pr_debug ( " Invalid _PSD data \n " ) ;
goto end ;
}
pdomain = & ( cpc_ptr - > domain_info ) ;
state . length = sizeof ( struct acpi_psd_package ) ;
state . pointer = pdomain ;
status = acpi_extract_package ( & ( psd - > package . elements [ 0 ] ) ,
& format , & state ) ;
if ( ACPI_FAILURE ( status ) ) {
pr_debug ( " Invalid _PSD data for CPU:%d \n " , cpc_ptr - > cpu_id ) ;
goto end ;
}
if ( pdomain - > num_entries ! = ACPI_PSD_REV0_ENTRIES ) {
pr_debug ( " Unknown _PSD:num_entries for CPU:%d \n " , cpc_ptr - > cpu_id ) ;
goto end ;
}
if ( pdomain - > revision ! = ACPI_PSD_REV0_REVISION ) {
pr_debug ( " Unknown _PSD:revision for CPU: %d \n " , cpc_ptr - > cpu_id ) ;
goto end ;
}
if ( pdomain - > coord_type ! = DOMAIN_COORD_TYPE_SW_ALL & &
pdomain - > coord_type ! = DOMAIN_COORD_TYPE_SW_ANY & &
pdomain - > coord_type ! = DOMAIN_COORD_TYPE_HW_ALL ) {
pr_debug ( " Invalid _PSD:coord_type for CPU:%d \n " , cpc_ptr - > cpu_id ) ;
goto end ;
}
result = 0 ;
end :
kfree ( buffer . pointer ) ;
return result ;
}
/**
* acpi_get_psd_map - Map the CPUs in a common freq domain .
* @ all_cpu_data : Ptrs to CPU specific CPPC data including PSD info .
*
* Return : 0 for success or negative value for err .
*/
2016-09-01 23:37:11 +03:00
int acpi_get_psd_map ( struct cppc_cpudata * * all_cpu_data )
2015-10-02 17:01:19 +03:00
{
int count_target ;
int retval = 0 ;
unsigned int i , j ;
cpumask_var_t covered_cpus ;
2016-09-01 23:37:11 +03:00
struct cppc_cpudata * pr , * match_pr ;
2015-10-02 17:01:19 +03:00
struct acpi_psd_package * pdomain ;
struct acpi_psd_package * match_pdomain ;
struct cpc_desc * cpc_ptr , * match_cpc_ptr ;
if ( ! zalloc_cpumask_var ( & covered_cpus , GFP_KERNEL ) )
return - ENOMEM ;
/*
2019-03-25 21:34:00 +03:00
* Now that we have _PSD data from all CPUs , let ' s setup P - state
2015-10-02 17:01:19 +03:00
* domain info .
*/
for_each_possible_cpu ( i ) {
if ( cpumask_test_cpu ( i , covered_cpus ) )
continue ;
2020-03-26 17:29:10 +03:00
pr = all_cpu_data [ i ] ;
2015-10-02 17:01:19 +03:00
cpc_ptr = per_cpu ( cpc_desc_ptr , i ) ;
2016-06-18 01:16:31 +03:00
if ( ! cpc_ptr ) {
retval = - EFAULT ;
goto err_ret ;
}
2015-10-02 17:01:19 +03:00
pdomain = & ( cpc_ptr - > domain_info ) ;
cpumask_set_cpu ( i , pr - > shared_cpu_map ) ;
cpumask_set_cpu ( i , covered_cpus ) ;
if ( pdomain - > num_processors < = 1 )
continue ;
/* Validate the Domain info */
count_target = pdomain - > num_processors ;
if ( pdomain - > coord_type = = DOMAIN_COORD_TYPE_SW_ALL )
pr - > shared_type = CPUFREQ_SHARED_TYPE_ALL ;
else if ( pdomain - > coord_type = = DOMAIN_COORD_TYPE_HW_ALL )
pr - > shared_type = CPUFREQ_SHARED_TYPE_HW ;
else if ( pdomain - > coord_type = = DOMAIN_COORD_TYPE_SW_ANY )
pr - > shared_type = CPUFREQ_SHARED_TYPE_ANY ;
for_each_possible_cpu ( j ) {
if ( i = = j )
continue ;
match_cpc_ptr = per_cpu ( cpc_desc_ptr , j ) ;
2016-06-18 01:16:31 +03:00
if ( ! match_cpc_ptr ) {
retval = - EFAULT ;
goto err_ret ;
}
2015-10-02 17:01:19 +03:00
match_pdomain = & ( match_cpc_ptr - > domain_info ) ;
if ( match_pdomain - > domain ! = pdomain - > domain )
continue ;
/* Here i and j are in the same domain */
if ( match_pdomain - > num_processors ! = count_target ) {
retval = - EFAULT ;
goto err_ret ;
}
if ( pdomain - > coord_type ! = match_pdomain - > coord_type ) {
retval = - EFAULT ;
goto err_ret ;
}
cpumask_set_cpu ( j , covered_cpus ) ;
cpumask_set_cpu ( j , pr - > shared_cpu_map ) ;
}
2020-03-26 17:29:10 +03:00
for_each_cpu ( j , pr - > shared_cpu_map ) {
2015-10-02 17:01:19 +03:00
if ( i = = j )
continue ;
match_pr = all_cpu_data [ j ] ;
match_pr - > shared_type = pr - > shared_type ;
cpumask_copy ( match_pr - > shared_cpu_map ,
pr - > shared_cpu_map ) ;
}
}
2020-03-26 17:29:10 +03:00
goto out ;
2015-10-02 17:01:19 +03:00
err_ret :
for_each_possible_cpu ( i ) {
pr = all_cpu_data [ i ] ;
/* Assume no coordination on any error parsing domain info */
2020-03-26 17:29:10 +03:00
cpumask_clear ( pr - > shared_cpu_map ) ;
cpumask_set_cpu ( i , pr - > shared_cpu_map ) ;
pr - > shared_type = CPUFREQ_SHARED_TYPE_ALL ;
2015-10-02 17:01:19 +03:00
}
2020-03-26 17:29:10 +03:00
out :
2015-10-02 17:01:19 +03:00
free_cpumask_var ( covered_cpus ) ;
return retval ;
}
EXPORT_SYMBOL_GPL ( acpi_get_psd_map ) ;
2017-10-11 11:54:58 +03:00
static int register_pcc_channel ( int pcc_ss_idx )
2015-10-02 17:01:19 +03:00
{
2015-11-13 03:52:30 +03:00
struct acpi_pcct_hw_reduced * cppc_ss ;
2016-02-17 23:20:59 +03:00
u64 usecs_lat ;
2015-10-02 17:01:19 +03:00
2017-10-11 11:54:58 +03:00
if ( pcc_ss_idx > = 0 ) {
pcc_data [ pcc_ss_idx ] - > pcc_channel =
pcc_mbox_request_channel ( & cppc_mbox_cl , pcc_ss_idx ) ;
2015-10-02 17:01:19 +03:00
2017-10-11 11:54:58 +03:00
if ( IS_ERR ( pcc_data [ pcc_ss_idx ] - > pcc_channel ) ) {
2018-02-20 14:16:03 +03:00
pr_err ( " Failed to find PCC channel for subspace %d \n " ,
pcc_ss_idx ) ;
2015-10-02 17:01:19 +03:00
return - ENODEV ;
}
/*
* The PCC mailbox controller driver should
* have parsed the PCCT ( global table of all
* PCC channels ) and stored pointers to the
* subspace communication region in con_priv .
*/
2017-10-11 11:54:58 +03:00
cppc_ss = ( pcc_data [ pcc_ss_idx ] - > pcc_channel ) - > con_priv ;
2015-10-02 17:01:19 +03:00
if ( ! cppc_ss ) {
2018-02-20 14:16:03 +03:00
pr_err ( " No PCC subspace found for %d CPPC \n " ,
pcc_ss_idx ) ;
2015-10-02 17:01:19 +03:00
return - ENODEV ;
}
2016-02-17 23:20:59 +03:00
/*
* cppc_ss - > latency is just a Nominal value . In reality
* the remote processor could be much slower to reply .
* So add an arbitrary amount of wait on top of Nominal .
*/
usecs_lat = NUM_RETRIES * cppc_ss - > latency ;
2018-04-25 02:10:02 +03:00
pcc_data [ pcc_ss_idx ] - > deadline_us = usecs_lat ;
2017-10-11 11:54:58 +03:00
pcc_data [ pcc_ss_idx ] - > pcc_mrtt = cppc_ss - > min_turnaround_time ;
pcc_data [ pcc_ss_idx ] - > pcc_mpar = cppc_ss - > max_access_rate ;
pcc_data [ pcc_ss_idx ] - > pcc_nominal = cppc_ss - > latency ;
pcc_data [ pcc_ss_idx ] - > pcc_comm_addr =
acpi_os_ioremap ( cppc_ss - > base_address , cppc_ss - > length ) ;
if ( ! pcc_data [ pcc_ss_idx ] - > pcc_comm_addr ) {
2018-02-20 14:16:03 +03:00
pr_err ( " Failed to ioremap PCC comm region mem for %d \n " ,
pcc_ss_idx ) ;
2015-10-02 17:01:19 +03:00
return - ENOMEM ;
}
2019-03-25 21:34:00 +03:00
/* Set flag so that we don't come here for each CPU. */
2017-10-11 11:54:58 +03:00
pcc_data [ pcc_ss_idx ] - > pcc_channel_acquired = true ;
2015-10-02 17:01:19 +03:00
}
return 0 ;
}
2016-09-01 23:37:10 +03:00
/**
* cpc_ffh_supported ( ) - check if FFH reading supported
*
* Check if the architecture has support for functional fixed hardware
* read / write capability .
*
* Return : true for supported , false for not supported
*/
bool __weak cpc_ffh_supported ( void )
{
return false ;
}
2017-10-11 11:54:58 +03:00
/**
* pcc_data_alloc ( ) - Allocate the pcc_data memory for pcc subspace
*
* Check and allocate the cppc_pcc_data memory .
* In some processor configurations it is possible that same subspace
2019-03-25 21:34:00 +03:00
* is shared between multiple CPUs . This is seen especially in CPUs
2017-10-11 11:54:58 +03:00
* with hardware multi - threading support .
*
* Return : 0 for success , errno for failure
*/
2020-04-23 10:21:58 +03:00
static int pcc_data_alloc ( int pcc_ss_id )
2017-10-11 11:54:58 +03:00
{
if ( pcc_ss_id < 0 | | pcc_ss_id > = MAX_PCC_SUBSPACES )
return - EINVAL ;
if ( pcc_data [ pcc_ss_id ] ) {
pcc_data [ pcc_ss_id ] - > refcount + + ;
} else {
pcc_data [ pcc_ss_id ] = kzalloc ( sizeof ( struct cppc_pcc_data ) ,
GFP_KERNEL ) ;
if ( ! pcc_data [ pcc_ss_id ] )
return - ENOMEM ;
pcc_data [ pcc_ss_id ] - > refcount + + ;
}
return 0 ;
}
2018-04-04 21:14:50 +03:00
/* Check if CPPC revision + num_ent combination is supported */
static bool is_cppc_supported ( int revision , int num_ent )
{
int expected_num_ent ;
switch ( revision ) {
case CPPC_V2_REV :
expected_num_ent = CPPC_V2_NUM_ENT ;
break ;
case CPPC_V3_REV :
expected_num_ent = CPPC_V3_NUM_ENT ;
break ;
default :
pr_debug ( " Firmware exports unsupported CPPC revision: %d \n " ,
revision ) ;
return false ;
}
if ( expected_num_ent ! = num_ent ) {
pr_debug ( " Firmware exports %d entries. Expected: %d for CPPC rev:%d \n " ,
num_ent , expected_num_ent , revision ) ;
return false ;
}
return true ;
}
2015-10-02 17:01:19 +03:00
/*
* An example CPC table looks like the following .
*
* Name ( _CPC , Package ( )
* {
* 17 ,
* NumEntries
* 1 ,
* // Revision
* ResourceTemplate ( ) { Register ( PCC , 32 , 0 , 0x120 , 2 ) } ,
* // Highest Performance
* ResourceTemplate ( ) { Register ( PCC , 32 , 0 , 0x124 , 2 ) } ,
* // Nominal Performance
* ResourceTemplate ( ) { Register ( PCC , 32 , 0 , 0x128 , 2 ) } ,
* // Lowest Nonlinear Performance
* ResourceTemplate ( ) { Register ( PCC , 32 , 0 , 0x12C , 2 ) } ,
* // Lowest Performance
* ResourceTemplate ( ) { Register ( PCC , 32 , 0 , 0x130 , 2 ) } ,
* // Guaranteed Performance Register
* ResourceTemplate ( ) { Register ( PCC , 32 , 0 , 0x110 , 2 ) } ,
* // Desired Performance Register
* ResourceTemplate ( ) { Register ( SystemMemory , 0 , 0 , 0 , 0 ) } ,
* . .
* . .
* . .
*
* }
* Each Register ( ) encodes how to access that specific register .
* e . g . a sample PCC entry has the following encoding :
*
* Register (
* PCC ,
* AddressSpaceKeyword
* 8 ,
* //RegisterBitWidth
* 8 ,
* //RegisterBitOffset
* 0x30 ,
* //RegisterAddress
* 9
* //AccessSize (subspace ID)
* 0
* )
* }
*/
/**
* acpi_cppc_processor_probe - Search for per CPU _CPC objects .
2019-03-25 21:34:00 +03:00
* @ pr : Ptr to acpi_processor containing this CPU ' s logical ID .
2015-10-02 17:01:19 +03:00
*
* Return : 0 for success or negative value for err .
*/
int acpi_cppc_processor_probe ( struct acpi_processor * pr )
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER , NULL } ;
union acpi_object * out_obj , * cpc_obj ;
struct cpc_desc * cpc_ptr ;
struct cpc_reg * gas_t ;
2016-08-16 23:39:42 +03:00
struct device * cpu_dev ;
2015-10-02 17:01:19 +03:00
acpi_handle handle = pr - > handle ;
unsigned int num_ent , i , cpc_rev ;
2017-10-11 11:54:58 +03:00
int pcc_subspace_id = - 1 ;
2015-10-02 17:01:19 +03:00
acpi_status status ;
int ret = - EFAULT ;
2019-03-25 21:34:00 +03:00
/* Parse the ACPI _CPC table for this CPU. */
2015-10-02 17:01:19 +03:00
status = acpi_evaluate_object_typed ( handle , " _CPC " , NULL , & output ,
ACPI_TYPE_PACKAGE ) ;
if ( ACPI_FAILURE ( status ) ) {
ret = - ENODEV ;
goto out_buf_free ;
}
out_obj = ( union acpi_object * ) output . pointer ;
cpc_ptr = kzalloc ( sizeof ( struct cpc_desc ) , GFP_KERNEL ) ;
if ( ! cpc_ptr ) {
ret = - ENOMEM ;
goto out_buf_free ;
}
/* First entry is NumEntries. */
cpc_obj = & out_obj - > package . elements [ 0 ] ;
if ( cpc_obj - > type = = ACPI_TYPE_INTEGER ) {
num_ent = cpc_obj - > integer . value ;
} else {
pr_debug ( " Unexpected entry type(%d) for NumEntries \n " ,
cpc_obj - > type ) ;
goto out_free ;
}
2016-08-16 23:39:38 +03:00
cpc_ptr - > num_entries = num_ent ;
2015-10-02 17:01:19 +03:00
/* Second entry should be revision. */
cpc_obj = & out_obj - > package . elements [ 1 ] ;
if ( cpc_obj - > type = = ACPI_TYPE_INTEGER ) {
cpc_rev = cpc_obj - > integer . value ;
} else {
pr_debug ( " Unexpected entry type(%d) for Revision \n " ,
cpc_obj - > type ) ;
goto out_free ;
}
2018-04-04 21:14:50 +03:00
cpc_ptr - > version = cpc_rev ;
2015-10-02 17:01:19 +03:00
2018-04-04 21:14:50 +03:00
if ( ! is_cppc_supported ( cpc_rev , num_ent ) )
2015-10-02 17:01:19 +03:00
goto out_free ;
/* Iterate through remaining entries in _CPC */
for ( i = 2 ; i < num_ent ; i + + ) {
cpc_obj = & out_obj - > package . elements [ i ] ;
if ( cpc_obj - > type = = ACPI_TYPE_INTEGER ) {
cpc_ptr - > cpc_regs [ i - 2 ] . type = ACPI_TYPE_INTEGER ;
cpc_ptr - > cpc_regs [ i - 2 ] . cpc_entry . int_value = cpc_obj - > integer . value ;
} else if ( cpc_obj - > type = = ACPI_TYPE_BUFFER ) {
gas_t = ( struct cpc_reg * )
cpc_obj - > buffer . pointer ;
/*
* The PCC Subspace index is encoded inside
* the CPC table entries . The same PCC index
* will be used for all the PCC entries ,
* so extract it only once .
*/
if ( gas_t - > space_id = = ACPI_ADR_SPACE_PLATFORM_COMM ) {
2017-10-11 11:54:58 +03:00
if ( pcc_subspace_id < 0 ) {
pcc_subspace_id = gas_t - > access_width ;
if ( pcc_data_alloc ( pcc_subspace_id ) )
goto out_free ;
} else if ( pcc_subspace_id ! = gas_t - > access_width ) {
2015-10-02 17:01:19 +03:00
pr_debug ( " Mismatched PCC ids. \n " ) ;
goto out_free ;
}
2016-08-16 23:39:38 +03:00
} else if ( gas_t - > space_id = = ACPI_ADR_SPACE_SYSTEM_MEMORY ) {
if ( gas_t - > address ) {
void __iomem * addr ;
addr = ioremap ( gas_t - > address , gas_t - > bit_width / 8 ) ;
if ( ! addr )
goto out_free ;
cpc_ptr - > cpc_regs [ i - 2 ] . sys_mem_vaddr = addr ;
}
} else {
2016-09-01 23:37:10 +03:00
if ( gas_t - > space_id ! = ACPI_ADR_SPACE_FIXED_HARDWARE | | ! cpc_ffh_supported ( ) ) {
/* Support only PCC ,SYS MEM and FFH type regs */
pr_debug ( " Unsupported register type: %d \n " , gas_t - > space_id ) ;
goto out_free ;
}
2015-10-02 17:01:19 +03:00
}
cpc_ptr - > cpc_regs [ i - 2 ] . type = ACPI_TYPE_BUFFER ;
memcpy ( & cpc_ptr - > cpc_regs [ i - 2 ] . cpc_entry . reg , gas_t , sizeof ( * gas_t ) ) ;
} else {
pr_debug ( " Err in entry:%d in CPC table of CPU:%d \n " , i , pr - > id ) ;
goto out_free ;
}
}
2017-10-11 11:54:58 +03:00
per_cpu ( cpu_pcc_subspace_idx , pr - > id ) = pcc_subspace_id ;
2018-04-04 21:14:50 +03:00
/*
* Initialize the remaining cpc_regs as unsupported .
* Example : In case FW exposes CPPC v2 , the below loop will initialize
* LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
*/
for ( i = num_ent - 2 ; i < MAX_CPC_REG_ENT ; i + + ) {
cpc_ptr - > cpc_regs [ i ] . type = ACPI_TYPE_INTEGER ;
cpc_ptr - > cpc_regs [ i ] . cpc_entry . int_value = 0 ;
}
2015-10-02 17:01:19 +03:00
/* Store CPU Logical ID */
cpc_ptr - > cpu_id = pr - > id ;
/* Parse PSD data for this CPU */
ret = acpi_get_psd ( cpc_ptr , handle ) ;
if ( ret )
goto out_free ;
2019-03-25 21:34:00 +03:00
/* Register PCC channel once for all PCC subspace ID. */
2017-10-11 11:54:58 +03:00
if ( pcc_subspace_id > = 0 & & ! pcc_data [ pcc_subspace_id ] - > pcc_channel_acquired ) {
ret = register_pcc_channel ( pcc_subspace_id ) ;
2015-10-02 17:01:19 +03:00
if ( ret )
goto out_free ;
2016-08-16 23:39:43 +03:00
2017-10-11 11:54:58 +03:00
init_rwsem ( & pcc_data [ pcc_subspace_id ] - > pcc_lock ) ;
init_waitqueue_head ( & pcc_data [ pcc_subspace_id ] - > pcc_write_wait_q ) ;
2015-10-02 17:01:19 +03:00
}
/* Everything looks okay */
pr_debug ( " Parsed CPC struct for CPU: %d \n " , pr - > id ) ;
2016-08-16 23:39:42 +03:00
/* Add per logical CPU nodes for reading its feedback counters. */
cpu_dev = get_cpu_device ( pr - > id ) ;
2016-11-30 22:22:54 +03:00
if ( ! cpu_dev ) {
ret = - EINVAL ;
2016-08-16 23:39:42 +03:00
goto out_free ;
2016-11-30 22:22:54 +03:00
}
2016-08-16 23:39:42 +03:00
2019-03-25 21:34:00 +03:00
/* Plug PSD data into this CPU's CPC descriptor. */
2016-12-10 02:52:28 +03:00
per_cpu ( cpc_desc_ptr , pr - > id ) = cpc_ptr ;
2016-08-16 23:39:42 +03:00
ret = kobject_init_and_add ( & cpc_ptr - > kobj , & cppc_ktype , & cpu_dev - > kobj ,
" acpi_cppc " ) ;
2016-12-10 02:52:28 +03:00
if ( ret ) {
per_cpu ( cpc_desc_ptr , pr - > id ) = NULL ;
2020-05-28 01:35:51 +03:00
kobject_put ( & cpc_ptr - > kobj ) ;
2016-08-16 23:39:42 +03:00
goto out_free ;
2016-12-10 02:52:28 +03:00
}
2016-08-16 23:39:42 +03:00
2015-10-02 17:01:19 +03:00
kfree ( output . pointer ) ;
return 0 ;
out_free :
2016-08-16 23:39:38 +03:00
/* Free all the mapped sys mem areas for this CPU */
for ( i = 2 ; i < cpc_ptr - > num_entries ; i + + ) {
void __iomem * addr = cpc_ptr - > cpc_regs [ i - 2 ] . sys_mem_vaddr ;
if ( addr )
iounmap ( addr ) ;
}
2015-10-02 17:01:19 +03:00
kfree ( cpc_ptr ) ;
out_buf_free :
kfree ( output . pointer ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( acpi_cppc_processor_probe ) ;
/**
* acpi_cppc_processor_exit - Cleanup CPC structs .
2019-03-25 21:34:00 +03:00
* @ pr : Ptr to acpi_processor containing this CPU ' s logical ID .
2015-10-02 17:01:19 +03:00
*
* Return : Void
*/
void acpi_cppc_processor_exit ( struct acpi_processor * pr )
{
struct cpc_desc * cpc_ptr ;
2016-08-16 23:39:38 +03:00
unsigned int i ;
void __iomem * addr ;
2017-10-11 11:54:58 +03:00
int pcc_ss_id = per_cpu ( cpu_pcc_subspace_idx , pr - > id ) ;
if ( pcc_ss_id > = 0 & & pcc_data [ pcc_ss_id ] ) {
if ( pcc_data [ pcc_ss_id ] - > pcc_channel_acquired ) {
pcc_data [ pcc_ss_id ] - > refcount - - ;
if ( ! pcc_data [ pcc_ss_id ] - > refcount ) {
pcc_mbox_free_channel ( pcc_data [ pcc_ss_id ] - > pcc_channel ) ;
kfree ( pcc_data [ pcc_ss_id ] ) ;
2019-10-15 17:07:31 +03:00
pcc_data [ pcc_ss_id ] = NULL ;
2017-10-11 11:54:58 +03:00
}
}
}
2016-08-16 23:39:42 +03:00
2015-10-02 17:01:19 +03:00
cpc_ptr = per_cpu ( cpc_desc_ptr , pr - > id ) ;
2016-12-07 22:06:08 +03:00
if ( ! cpc_ptr )
return ;
2016-08-16 23:39:38 +03:00
/* Free all the mapped sys mem areas for this CPU */
for ( i = 2 ; i < cpc_ptr - > num_entries ; i + + ) {
addr = cpc_ptr - > cpc_regs [ i - 2 ] . sys_mem_vaddr ;
if ( addr )
iounmap ( addr ) ;
}
2016-08-16 23:39:42 +03:00
kobject_put ( & cpc_ptr - > kobj ) ;
2015-10-02 17:01:19 +03:00
kfree ( cpc_ptr ) ;
}
EXPORT_SYMBOL_GPL ( acpi_cppc_processor_exit ) ;
2016-09-01 23:37:10 +03:00
/**
* cpc_read_ffh ( ) - Read FFH register
2019-03-25 21:34:00 +03:00
* @ cpunum : CPU number to read
2016-09-01 23:37:10 +03:00
* @ reg : cppc register information
* @ val : place holder for return value
*
* Read bit_width bits from a specified address and bit_offset
*
* Return : 0 for success and error code
*/
int __weak cpc_read_ffh ( int cpunum , struct cpc_reg * reg , u64 * val )
{
return - ENOTSUPP ;
}
/**
* cpc_write_ffh ( ) - Write FFH register
2019-03-25 21:34:00 +03:00
* @ cpunum : CPU number to write
2016-09-01 23:37:10 +03:00
* @ reg : cppc register information
* @ val : value to write
*
* Write value of bit_width bits to a specified address and bit_offset
*
* Return : 0 for success and error code
*/
int __weak cpc_write_ffh ( int cpunum , struct cpc_reg * reg , u64 val )
{
return - ENOTSUPP ;
}
2016-02-17 23:21:00 +03:00
/*
* Since cpc_read and cpc_write are called while holding pcc_lock , it should be
* as fast as possible . We have already mapped the PCC subspace during init , so
* we can directly write to it .
*/
2015-10-02 17:01:19 +03:00
2016-09-01 23:37:10 +03:00
static int cpc_read ( int cpu , struct cpc_register_resource * reg_res , u64 * val )
2015-10-02 17:01:19 +03:00
{
2016-02-17 23:21:00 +03:00
int ret_val = 0 ;
2016-08-16 23:39:38 +03:00
void __iomem * vaddr = 0 ;
2017-10-11 11:54:58 +03:00
int pcc_ss_id = per_cpu ( cpu_pcc_subspace_idx , cpu ) ;
2016-08-16 23:39:38 +03:00
struct cpc_reg * reg = & reg_res - > cpc_entry . reg ;
if ( reg_res - > type = = ACPI_TYPE_INTEGER ) {
* val = reg_res - > cpc_entry . int_value ;
return ret_val ;
}
2016-02-17 23:21:00 +03:00
* val = 0 ;
2017-12-04 17:06:54 +03:00
if ( reg - > space_id = = ACPI_ADR_SPACE_PLATFORM_COMM & & pcc_ss_id > = 0 )
2017-10-11 11:54:58 +03:00
vaddr = GET_PCC_VADDR ( reg - > address , pcc_ss_id ) ;
2016-08-16 23:39:38 +03:00
else if ( reg - > space_id = = ACPI_ADR_SPACE_SYSTEM_MEMORY )
vaddr = reg_res - > sys_mem_vaddr ;
2016-09-01 23:37:10 +03:00
else if ( reg - > space_id = = ACPI_ADR_SPACE_FIXED_HARDWARE )
return cpc_read_ffh ( cpu , reg , val ) ;
2016-08-16 23:39:38 +03:00
else
return acpi_os_read_memory ( ( acpi_physical_address ) reg - > address ,
val , reg - > bit_width ) ;
2015-10-02 17:01:19 +03:00
2016-08-16 23:39:38 +03:00
switch ( reg - > bit_width ) {
2016-02-17 23:21:00 +03:00
case 8 :
2016-02-17 23:21:02 +03:00
* val = readb_relaxed ( vaddr ) ;
2016-02-17 23:21:00 +03:00
break ;
case 16 :
2016-02-17 23:21:02 +03:00
* val = readw_relaxed ( vaddr ) ;
2016-02-17 23:21:00 +03:00
break ;
case 32 :
2016-02-17 23:21:02 +03:00
* val = readl_relaxed ( vaddr ) ;
2016-02-17 23:21:00 +03:00
break ;
case 64 :
2016-02-17 23:21:02 +03:00
* val = readq_relaxed ( vaddr ) ;
2016-02-17 23:21:00 +03:00
break ;
default :
2018-02-20 14:16:03 +03:00
pr_debug ( " Error: Cannot read %u bit width from PCC for ss: %d \n " ,
reg - > bit_width , pcc_ss_id ) ;
2016-02-17 23:21:00 +03:00
ret_val = - EFAULT ;
2016-08-16 23:39:38 +03:00
}
2016-02-17 23:21:00 +03:00
return ret_val ;
2015-10-02 17:01:19 +03:00
}
2016-09-01 23:37:10 +03:00
static int cpc_write ( int cpu , struct cpc_register_resource * reg_res , u64 val )
2015-10-02 17:01:19 +03:00
{
2016-02-17 23:21:00 +03:00
int ret_val = 0 ;
2016-08-16 23:39:38 +03:00
void __iomem * vaddr = 0 ;
2017-10-11 11:54:58 +03:00
int pcc_ss_id = per_cpu ( cpu_pcc_subspace_idx , cpu ) ;
2016-08-16 23:39:38 +03:00
struct cpc_reg * reg = & reg_res - > cpc_entry . reg ;
2016-02-17 23:21:00 +03:00
2017-12-04 17:06:54 +03:00
if ( reg - > space_id = = ACPI_ADR_SPACE_PLATFORM_COMM & & pcc_ss_id > = 0 )
2017-10-11 11:54:58 +03:00
vaddr = GET_PCC_VADDR ( reg - > address , pcc_ss_id ) ;
2016-08-16 23:39:38 +03:00
else if ( reg - > space_id = = ACPI_ADR_SPACE_SYSTEM_MEMORY )
vaddr = reg_res - > sys_mem_vaddr ;
2016-09-01 23:37:10 +03:00
else if ( reg - > space_id = = ACPI_ADR_SPACE_FIXED_HARDWARE )
return cpc_write_ffh ( cpu , reg , val ) ;
2016-08-16 23:39:38 +03:00
else
return acpi_os_write_memory ( ( acpi_physical_address ) reg - > address ,
val , reg - > bit_width ) ;
2015-10-02 17:01:19 +03:00
2016-08-16 23:39:38 +03:00
switch ( reg - > bit_width ) {
2016-02-17 23:21:00 +03:00
case 8 :
2016-02-17 23:21:02 +03:00
writeb_relaxed ( val , vaddr ) ;
2016-02-17 23:21:00 +03:00
break ;
case 16 :
2016-02-17 23:21:02 +03:00
writew_relaxed ( val , vaddr ) ;
2016-02-17 23:21:00 +03:00
break ;
case 32 :
2016-02-17 23:21:02 +03:00
writel_relaxed ( val , vaddr ) ;
2016-02-17 23:21:00 +03:00
break ;
case 64 :
2016-02-17 23:21:02 +03:00
writeq_relaxed ( val , vaddr ) ;
2016-02-17 23:21:00 +03:00
break ;
default :
2018-02-20 14:16:03 +03:00
pr_debug ( " Error: Cannot write %u bit width to PCC for ss: %d \n " ,
reg - > bit_width , pcc_ss_id ) ;
2016-02-17 23:21:00 +03:00
ret_val = - EFAULT ;
break ;
2016-08-16 23:39:38 +03:00
}
2016-02-17 23:21:00 +03:00
return ret_val ;
2015-10-02 17:01:19 +03:00
}
2019-02-17 06:54:14 +03:00
/**
* cppc_get_desired_perf - Get the value of desired performance register .
* @ cpunum : CPU from which to get desired performance .
* @ desired_perf : address of a variable to store the returned desired performance
*
* Return : 0 for success , - EIO otherwise .
*/
int cppc_get_desired_perf ( int cpunum , u64 * desired_perf )
{
struct cpc_desc * cpc_desc = per_cpu ( cpc_desc_ptr , cpunum ) ;
int pcc_ss_id = per_cpu ( cpu_pcc_subspace_idx , cpunum ) ;
struct cpc_register_resource * desired_reg ;
struct cppc_pcc_data * pcc_ss_data = NULL ;
desired_reg = & cpc_desc - > cpc_regs [ DESIRED_PERF ] ;
if ( CPC_IN_PCC ( desired_reg ) ) {
int ret = 0 ;
if ( pcc_ss_id < 0 )
return - EIO ;
pcc_ss_data = pcc_data [ pcc_ss_id ] ;
down_write ( & pcc_ss_data - > pcc_lock ) ;
if ( send_pcc_cmd ( pcc_ss_id , CMD_READ ) > = 0 )
cpc_read ( cpunum , desired_reg , desired_perf ) ;
else
ret = - EIO ;
up_write ( & pcc_ss_data - > pcc_lock ) ;
return ret ;
}
cpc_read ( cpunum , desired_reg , desired_perf ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( cppc_get_desired_perf ) ;
2015-10-02 17:01:19 +03:00
/**
2019-03-25 21:34:00 +03:00
* cppc_get_perf_caps - Get a CPU ' s performance capabilities .
2015-10-02 17:01:19 +03:00
* @ cpunum : CPU from which to get capabilities info .
* @ perf_caps : ptr to cppc_perf_caps . See cppc_acpi . h
*
* Return : 0 for success with perf_caps populated else - ERRNO .
*/
int cppc_get_perf_caps ( int cpunum , struct cppc_perf_caps * perf_caps )
{
struct cpc_desc * cpc_desc = per_cpu ( cpc_desc_ptr , cpunum ) ;
2017-03-29 22:49:59 +03:00
struct cpc_register_resource * highest_reg , * lowest_reg ,
2018-10-15 20:37:19 +03:00
* lowest_non_linear_reg , * nominal_reg , * guaranteed_reg ,
2018-04-04 21:14:50 +03:00
* low_freq_reg = NULL , * nom_freq_reg = NULL ;
2018-10-15 20:37:19 +03:00
u64 high , low , guaranteed , nom , min_nonlinear , low_f = 0 , nom_f = 0 ;
2017-10-11 11:54:58 +03:00
int pcc_ss_id = per_cpu ( cpu_pcc_subspace_idx , cpunum ) ;
2018-04-04 21:14:51 +03:00
struct cppc_pcc_data * pcc_ss_data = NULL ;
2016-08-16 23:39:39 +03:00
int ret = 0 , regs_in_pcc = 0 ;
2015-10-02 17:01:19 +03:00
2018-04-04 21:14:51 +03:00
if ( ! cpc_desc ) {
2015-10-02 17:01:19 +03:00
pr_debug ( " No CPC descriptor for CPU:%d \n " , cpunum ) ;
return - ENODEV ;
}
highest_reg = & cpc_desc - > cpc_regs [ HIGHEST_PERF ] ;
lowest_reg = & cpc_desc - > cpc_regs [ LOWEST_PERF ] ;
2017-03-29 22:49:59 +03:00
lowest_non_linear_reg = & cpc_desc - > cpc_regs [ LOW_NON_LINEAR_PERF ] ;
nominal_reg = & cpc_desc - > cpc_regs [ NOMINAL_PERF ] ;
2018-04-04 21:14:50 +03:00
low_freq_reg = & cpc_desc - > cpc_regs [ LOWEST_FREQ ] ;
nom_freq_reg = & cpc_desc - > cpc_regs [ NOMINAL_FREQ ] ;
2018-10-15 20:37:19 +03:00
guaranteed_reg = & cpc_desc - > cpc_regs [ GUARANTEED_PERF ] ;
2015-10-02 17:01:19 +03:00
/* Are any of the regs PCC ?*/
2016-08-16 23:39:40 +03:00
if ( CPC_IN_PCC ( highest_reg ) | | CPC_IN_PCC ( lowest_reg ) | |
2018-04-04 21:14:50 +03:00
CPC_IN_PCC ( lowest_non_linear_reg ) | | CPC_IN_PCC ( nominal_reg ) | |
CPC_IN_PCC ( low_freq_reg ) | | CPC_IN_PCC ( nom_freq_reg ) ) {
2018-04-04 21:14:51 +03:00
if ( pcc_ss_id < 0 ) {
pr_debug ( " Invalid pcc_ss_id \n " ) ;
return - ENODEV ;
}
pcc_ss_data = pcc_data [ pcc_ss_id ] ;
2016-08-16 23:39:39 +03:00
regs_in_pcc = 1 ;
2017-10-11 11:54:58 +03:00
down_write ( & pcc_ss_data - > pcc_lock ) ;
2015-10-02 17:01:19 +03:00
/* Ring doorbell once to update PCC subspace */
2017-10-11 11:54:58 +03:00
if ( send_pcc_cmd ( pcc_ss_id , CMD_READ ) < 0 ) {
2015-10-02 17:01:19 +03:00
ret = - EIO ;
goto out_err ;
}
}
2016-09-01 23:37:10 +03:00
cpc_read ( cpunum , highest_reg , & high ) ;
2015-10-02 17:01:19 +03:00
perf_caps - > highest_perf = high ;
2016-09-01 23:37:10 +03:00
cpc_read ( cpunum , lowest_reg , & low ) ;
2015-10-02 17:01:19 +03:00
perf_caps - > lowest_perf = low ;
2017-03-29 22:49:59 +03:00
cpc_read ( cpunum , nominal_reg , & nom ) ;
2015-10-02 17:01:19 +03:00
perf_caps - > nominal_perf = nom ;
2019-03-25 19:04:39 +03:00
if ( guaranteed_reg - > type ! = ACPI_TYPE_BUFFER | |
IS_NULL_REG ( & guaranteed_reg - > cpc_entry . reg ) ) {
perf_caps - > guaranteed_perf = 0 ;
} else {
cpc_read ( cpunum , guaranteed_reg , & guaranteed ) ;
perf_caps - > guaranteed_perf = guaranteed ;
}
2018-10-15 20:37:19 +03:00
2017-03-29 22:49:59 +03:00
cpc_read ( cpunum , lowest_non_linear_reg , & min_nonlinear ) ;
perf_caps - > lowest_nonlinear_perf = min_nonlinear ;
if ( ! high | | ! low | | ! nom | | ! min_nonlinear )
2015-10-02 17:01:19 +03:00
ret = - EFAULT ;
2018-04-04 21:14:50 +03:00
/* Read optional lowest and nominal frequencies if present */
if ( CPC_SUPPORTED ( low_freq_reg ) )
cpc_read ( cpunum , low_freq_reg , & low_f ) ;
if ( CPC_SUPPORTED ( nom_freq_reg ) )
cpc_read ( cpunum , nom_freq_reg , & nom_f ) ;
perf_caps - > lowest_freq = low_f ;
perf_caps - > nominal_freq = nom_f ;
2015-10-02 17:01:19 +03:00
out_err :
2016-08-16 23:39:39 +03:00
if ( regs_in_pcc )
2017-10-11 11:54:58 +03:00
up_write ( & pcc_ss_data - > pcc_lock ) ;
2015-10-02 17:01:19 +03:00
return ret ;
}
EXPORT_SYMBOL_GPL ( cppc_get_perf_caps ) ;
/**
2019-03-25 21:34:00 +03:00
* cppc_get_perf_ctrs - Read a CPU ' s performance feedback counters .
2015-10-02 17:01:19 +03:00
* @ cpunum : CPU from which to read counters .
* @ perf_fb_ctrs : ptr to cppc_perf_fb_ctrs . See cppc_acpi . h
*
* Return : 0 for success with perf_fb_ctrs populated else - ERRNO .
*/
int cppc_get_perf_ctrs ( int cpunum , struct cppc_perf_fb_ctrs * perf_fb_ctrs )
{
struct cpc_desc * cpc_desc = per_cpu ( cpc_desc_ptr , cpunum ) ;
2016-08-16 23:39:42 +03:00
struct cpc_register_resource * delivered_reg , * reference_reg ,
* ref_perf_reg , * ctr_wrap_reg ;
2017-10-11 11:54:58 +03:00
int pcc_ss_id = per_cpu ( cpu_pcc_subspace_idx , cpunum ) ;
2018-04-04 21:14:51 +03:00
struct cppc_pcc_data * pcc_ss_data = NULL ;
2016-08-16 23:39:42 +03:00
u64 delivered , reference , ref_perf , ctr_wrap_time ;
2016-08-16 23:39:39 +03:00
int ret = 0 , regs_in_pcc = 0 ;
2015-10-02 17:01:19 +03:00
2018-04-04 21:14:51 +03:00
if ( ! cpc_desc ) {
2015-10-02 17:01:19 +03:00
pr_debug ( " No CPC descriptor for CPU:%d \n " , cpunum ) ;
return - ENODEV ;
}
delivered_reg = & cpc_desc - > cpc_regs [ DELIVERED_CTR ] ;
reference_reg = & cpc_desc - > cpc_regs [ REFERENCE_CTR ] ;
2016-08-16 23:39:42 +03:00
ref_perf_reg = & cpc_desc - > cpc_regs [ REFERENCE_PERF ] ;
ctr_wrap_reg = & cpc_desc - > cpc_regs [ CTR_WRAP_TIME ] ;
/*
2019-03-25 21:34:00 +03:00
* If reference perf register is not supported then we should
2016-08-16 23:39:42 +03:00
* use the nominal perf value
*/
if ( ! CPC_SUPPORTED ( ref_perf_reg ) )
ref_perf_reg = & cpc_desc - > cpc_regs [ NOMINAL_PERF ] ;
2015-10-02 17:01:19 +03:00
/* Are any of the regs PCC ?*/
2016-08-16 23:39:42 +03:00
if ( CPC_IN_PCC ( delivered_reg ) | | CPC_IN_PCC ( reference_reg ) | |
CPC_IN_PCC ( ctr_wrap_reg ) | | CPC_IN_PCC ( ref_perf_reg ) ) {
2018-04-04 21:14:51 +03:00
if ( pcc_ss_id < 0 ) {
pr_debug ( " Invalid pcc_ss_id \n " ) ;
return - ENODEV ;
}
pcc_ss_data = pcc_data [ pcc_ss_id ] ;
2017-10-11 11:54:58 +03:00
down_write ( & pcc_ss_data - > pcc_lock ) ;
2016-08-16 23:39:39 +03:00
regs_in_pcc = 1 ;
2015-10-02 17:01:19 +03:00
/* Ring doorbell once to update PCC subspace */
2017-10-11 11:54:58 +03:00
if ( send_pcc_cmd ( pcc_ss_id , CMD_READ ) < 0 ) {
2015-10-02 17:01:19 +03:00
ret = - EIO ;
goto out_err ;
}
}
2016-09-01 23:37:10 +03:00
cpc_read ( cpunum , delivered_reg , & delivered ) ;
cpc_read ( cpunum , reference_reg , & reference ) ;
cpc_read ( cpunum , ref_perf_reg , & ref_perf ) ;
2016-08-16 23:39:42 +03:00
/*
* Per spec , if ctr_wrap_time optional register is unsupported , then the
* performance counters are assumed to never wrap during the lifetime of
* platform
*/
ctr_wrap_time = ( u64 ) ( ~ ( ( u64 ) 0 ) ) ;
if ( CPC_SUPPORTED ( ctr_wrap_reg ) )
2016-09-01 23:37:10 +03:00
cpc_read ( cpunum , ctr_wrap_reg , & ctr_wrap_time ) ;
2015-10-02 17:01:19 +03:00
2016-08-16 23:39:42 +03:00
if ( ! delivered | | ! reference | | ! ref_perf ) {
2015-10-02 17:01:19 +03:00
ret = - EFAULT ;
goto out_err ;
}
perf_fb_ctrs - > delivered = delivered ;
perf_fb_ctrs - > reference = reference ;
2016-08-16 23:39:42 +03:00
perf_fb_ctrs - > reference_perf = ref_perf ;
2017-03-29 22:50:00 +03:00
perf_fb_ctrs - > wraparound_time = ctr_wrap_time ;
2015-10-02 17:01:19 +03:00
out_err :
2016-08-16 23:39:39 +03:00
if ( regs_in_pcc )
2017-10-11 11:54:58 +03:00
up_write ( & pcc_ss_data - > pcc_lock ) ;
2015-10-02 17:01:19 +03:00
return ret ;
}
EXPORT_SYMBOL_GPL ( cppc_get_perf_ctrs ) ;
/**
2019-03-25 21:34:00 +03:00
* cppc_set_perf - Set a CPU ' s performance controls .
2015-10-02 17:01:19 +03:00
* @ cpu : CPU for which to set performance controls .
* @ perf_ctrls : ptr to cppc_perf_ctrls . See cppc_acpi . h
*
* Return : 0 for success , - ERRNO otherwise .
*/
int cppc_set_perf ( int cpu , struct cppc_perf_ctrls * perf_ctrls )
{
struct cpc_desc * cpc_desc = per_cpu ( cpc_desc_ptr , cpu ) ;
struct cpc_register_resource * desired_reg ;
2017-10-11 11:54:58 +03:00
int pcc_ss_id = per_cpu ( cpu_pcc_subspace_idx , cpu ) ;
2018-04-04 21:14:51 +03:00
struct cppc_pcc_data * pcc_ss_data = NULL ;
2015-10-02 17:01:19 +03:00
int ret = 0 ;
2018-04-04 21:14:51 +03:00
if ( ! cpc_desc ) {
2015-10-02 17:01:19 +03:00
pr_debug ( " No CPC descriptor for CPU:%d \n " , cpu ) ;
return - ENODEV ;
}
desired_reg = & cpc_desc - > cpc_regs [ DESIRED_PERF ] ;
2016-08-16 23:39:40 +03:00
/*
* This is Phase - I where we want to write to CPC registers
* - > We want all CPUs to be able to execute this phase in parallel
*
* Since read_lock can be acquired by multiple CPUs simultaneously we
* achieve that goal here
*/
if ( CPC_IN_PCC ( desired_reg ) ) {
2018-04-04 21:14:51 +03:00
if ( pcc_ss_id < 0 ) {
pr_debug ( " Invalid pcc_ss_id \n " ) ;
return - ENODEV ;
}
pcc_ss_data = pcc_data [ pcc_ss_id ] ;
2017-10-11 11:54:58 +03:00
down_read ( & pcc_ss_data - > pcc_lock ) ; /* BEGIN Phase-I */
if ( pcc_ss_data - > platform_owns_pcc ) {
ret = check_pcc_chan ( pcc_ss_id , false ) ;
2016-08-16 23:39:40 +03:00
if ( ret ) {
2017-10-11 11:54:58 +03:00
up_read ( & pcc_ss_data - > pcc_lock ) ;
2016-08-16 23:39:40 +03:00
return ret ;
}
}
2016-08-16 23:39:44 +03:00
/*
* Update the pending_write to make sure a PCC CMD_READ will not
* arrive and steal the channel during the switch to write lock
*/
2017-10-11 11:54:58 +03:00
pcc_ss_data - > pending_pcc_write_cmd = true ;
cpc_desc - > write_cmd_id = pcc_ss_data - > pcc_write_cnt ;
2016-08-16 23:39:40 +03:00
cpc_desc - > write_cmd_status = 0 ;
2016-02-17 23:20:59 +03:00
}
2015-10-02 17:01:19 +03:00
/*
* Skip writing MIN / MAX until Linux knows how to come up with
* useful values .
*/
2016-09-01 23:37:10 +03:00
cpc_write ( cpu , desired_reg , perf_ctrls - > desired_perf ) ;
2015-10-02 17:01:19 +03:00
2016-08-16 23:39:40 +03:00
if ( CPC_IN_PCC ( desired_reg ) )
2017-10-11 11:54:58 +03:00
up_read ( & pcc_ss_data - > pcc_lock ) ; /* END Phase-I */
2016-08-16 23:39:40 +03:00
/*
* This is Phase - II where we transfer the ownership of PCC to Platform
*
* Short Summary : Basically if we think of a group of cppc_set_perf
* requests that happened in short overlapping interval . The last CPU to
* come out of Phase - I will enter Phase - II and ring the doorbell .
*
* We have the following requirements for Phase - II :
* 1. We want to execute Phase - II only when there are no CPUs
* currently executing in Phase - I
* 2. Once we start Phase - II we want to avoid all other CPUs from
* entering Phase - I .
* 3. We want only one CPU among all those who went through Phase - I
* to run phase - II
*
* If write_trylock fails to get the lock and doesn ' t transfer the
* PCC ownership to the platform , then one of the following will be TRUE
* 1. There is at - least one CPU in Phase - I which will later execute
* write_trylock , so the CPUs in Phase - I will be responsible for
* executing the Phase - II .
* 2. Some other CPU has beaten this CPU to successfully execute the
* write_trylock and has already acquired the write_lock . We know for a
2019-03-25 21:34:00 +03:00
* fact it ( other CPU acquiring the write_lock ) couldn ' t have happened
2016-08-16 23:39:40 +03:00
* before this CPU ' s Phase - I as we held the read_lock .
* 3. Some other CPU executing pcc CMD_READ has stolen the
* down_write , in which case , send_pcc_cmd will check for pending
* CMD_WRITE commands by checking the pending_pcc_write_cmd .
* So this CPU can be certain that its request will be delivered
* So in all cases , this CPU knows that its request will be delivered
* by another CPU and can return
*
* After getting the down_write we still need to check for
* pending_pcc_write_cmd to take care of the following scenario
* The thread running this code could be scheduled out between
* Phase - I and Phase - II . Before it is scheduled back on , another CPU
* could have delivered the request to Platform by triggering the
* doorbell and transferred the ownership of PCC to platform . So this
* avoids triggering an unnecessary doorbell and more importantly before
* triggering the doorbell it makes sure that the PCC channel ownership
* is still with OSPM .
* pending_pcc_write_cmd can also be cleared by a different CPU , if
* there was a pcc CMD_READ waiting on down_write and it steals the lock
* before the pcc CMD_WRITE is completed . pcc_send_cmd checks for this
* case during a CMD_READ and if there are pending writes it delivers
* the write command before servicing the read command
*/
if ( CPC_IN_PCC ( desired_reg ) ) {
2017-10-11 11:54:58 +03:00
if ( down_write_trylock ( & pcc_ss_data - > pcc_lock ) ) { /* BEGIN Phase-II */
2016-08-16 23:39:40 +03:00
/* Update only if there are pending write commands */
2017-10-11 11:54:58 +03:00
if ( pcc_ss_data - > pending_pcc_write_cmd )
send_pcc_cmd ( pcc_ss_id , CMD_WRITE ) ;
up_write ( & pcc_ss_data - > pcc_lock ) ; /* END Phase-II */
2016-08-16 23:39:40 +03:00
} else
/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
2017-10-11 11:54:58 +03:00
wait_event ( pcc_ss_data - > pcc_write_wait_q ,
cpc_desc - > write_cmd_id ! = pcc_ss_data - > pcc_write_cnt ) ;
2016-08-16 23:39:40 +03:00
/* send_pcc_cmd updates the status in case of failure */
ret = cpc_desc - > write_cmd_status ;
2015-10-02 17:01:19 +03:00
}
return ret ;
}
EXPORT_SYMBOL_GPL ( cppc_set_perf ) ;
2016-08-16 23:39:41 +03:00
/**
* cppc_get_transition_latency - returns frequency transition latency in ns
*
* ACPI CPPC does not explicitly specifiy how a platform can specify the
* transition latency for perfromance change requests . The closest we have
* is the timing information from the PCCT tables which provides the info
* on the number and frequency of PCC commands the platform can handle .
*/
unsigned int cppc_get_transition_latency ( int cpu_num )
{
/*
* Expected transition latency is based on the PCCT timing values
* Below are definition from ACPI spec :
* pcc_nominal - Expected latency to process a command , in microseconds
* pcc_mpar - The maximum number of periodic requests that the subspace
* channel can support , reported in commands per minute . 0
* indicates no limitation .
* pcc_mrtt - The minimum amount of time that OSPM must wait after the
* completion of a command before issuing the next command ,
* in microseconds .
*/
unsigned int latency_ns = 0 ;
struct cpc_desc * cpc_desc ;
struct cpc_register_resource * desired_reg ;
2017-10-11 11:54:58 +03:00
int pcc_ss_id = per_cpu ( cpu_pcc_subspace_idx , cpu_num ) ;
2017-12-04 17:06:54 +03:00
struct cppc_pcc_data * pcc_ss_data ;
2016-08-16 23:39:41 +03:00
cpc_desc = per_cpu ( cpc_desc_ptr , cpu_num ) ;
if ( ! cpc_desc )
return CPUFREQ_ETERNAL ;
desired_reg = & cpc_desc - > cpc_regs [ DESIRED_PERF ] ;
if ( ! CPC_IN_PCC ( desired_reg ) )
return CPUFREQ_ETERNAL ;
2017-12-04 17:06:54 +03:00
if ( pcc_ss_id < 0 )
return CPUFREQ_ETERNAL ;
pcc_ss_data = pcc_data [ pcc_ss_id ] ;
2017-10-11 11:54:58 +03:00
if ( pcc_ss_data - > pcc_mpar )
latency_ns = 60 * ( 1000 * 1000 * 1000 / pcc_ss_data - > pcc_mpar ) ;
2016-08-16 23:39:41 +03:00
2017-10-11 11:54:58 +03:00
latency_ns = max ( latency_ns , pcc_ss_data - > pcc_nominal * 1000 ) ;
latency_ns = max ( latency_ns , pcc_ss_data - > pcc_mrtt * 1000 ) ;
2016-08-16 23:39:41 +03:00
return latency_ns ;
}
EXPORT_SYMBOL_GPL ( cppc_get_transition_latency ) ;