2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2005-04-17 02:20:36 +04:00
/*
* drivers / cpufreq / cpufreq_stats . c
*
* Copyright ( C ) 2003 - 2004 Venkatesh Pallipadi < venkatesh . pallipadi @ intel . com > .
2009-01-18 09:49:04 +03:00
* ( C ) 2004 Zou Nan hai < nanhai . zou @ intel . com > .
2005-04-17 02:20:36 +04:00
*/
# include <linux/cpu.h>
# include <linux/cpufreq.h>
2011-05-27 21:23:32 +04:00
# include <linux/module.h>
2020-11-17 14:32:10 +03:00
# include <linux/sched/clock.h>
2013-08-06 21:23:03 +04:00
# include <linux/slab.h>
2005-04-17 02:20:36 +04:00
struct cpufreq_stats {
unsigned int total_trans ;
2013-06-19 12:49:33 +04:00
unsigned long long last_time ;
2005-04-17 02:20:36 +04:00
unsigned int max_state ;
unsigned int state_num ;
unsigned int last_index ;
2012-10-26 02:51:21 +04:00
u64 * time_in_state ;
2005-04-17 02:20:36 +04:00
unsigned int * freq_table ;
unsigned int * trans_table ;
2020-10-05 10:56:01 +03:00
/* Deferred reset */
unsigned int reset_pending ;
unsigned long long reset_time ;
2005-04-17 02:20:36 +04:00
} ;
2020-10-05 10:56:01 +03:00
static void cpufreq_stats_update ( struct cpufreq_stats * stats ,
unsigned long long time )
2005-04-17 02:20:36 +04:00
{
2020-11-17 14:32:10 +03:00
unsigned long long cur_time = local_clock ( ) ;
2005-05-26 01:46:50 +04:00
2020-10-05 10:56:01 +03:00
stats - > time_in_state [ stats - > last_index ] + = cur_time - time ;
2015-01-06 18:39:07 +03:00
stats - > last_time = cur_time ;
2005-04-17 02:20:36 +04:00
}
2020-10-05 10:56:01 +03:00
static void cpufreq_stats_reset_table ( struct cpufreq_stats * stats )
2016-11-07 21:02:23 +03:00
{
unsigned int count = stats - > max_state ;
memset ( stats - > time_in_state , 0 , count * sizeof ( u64 ) ) ;
memset ( stats - > trans_table , 0 , count * count * sizeof ( int ) ) ;
2020-11-17 14:32:10 +03:00
stats - > last_time = local_clock ( ) ;
2016-11-07 21:02:23 +03:00
stats - > total_trans = 0 ;
2020-10-05 10:56:01 +03:00
/* Adjust for the time elapsed since reset was requested */
WRITE_ONCE ( stats - > reset_pending , 0 ) ;
2020-10-06 22:43:43 +03:00
/*
* Prevent the reset_time read from being reordered before the
* reset_pending accesses in cpufreq_stats_record_transition ( ) .
*/
smp_rmb ( ) ;
2020-10-05 10:56:01 +03:00
cpufreq_stats_update ( stats , READ_ONCE ( stats - > reset_time ) ) ;
2016-11-07 21:02:23 +03:00
}
2009-01-18 09:49:04 +03:00
static ssize_t show_total_trans ( struct cpufreq_policy * policy , char * buf )
2005-04-17 02:20:36 +04:00
{
2020-10-05 10:56:01 +03:00
struct cpufreq_stats * stats = policy - > stats ;
if ( READ_ONCE ( stats - > reset_pending ) )
return sprintf ( buf , " %d \n " , 0 ) ;
else
2020-10-12 07:50:07 +03:00
return sprintf ( buf , " %u \n " , stats - > total_trans ) ;
2005-04-17 02:20:36 +04:00
}
2019-02-01 09:15:44 +03:00
cpufreq_freq_attr_ro ( total_trans ) ;
2005-04-17 02:20:36 +04:00
2009-01-18 09:49:04 +03:00
static ssize_t show_time_in_state ( struct cpufreq_policy * policy , char * buf )
2005-04-17 02:20:36 +04:00
{
2015-01-06 18:39:07 +03:00
struct cpufreq_stats * stats = policy - > stats ;
2020-10-05 10:56:01 +03:00
bool pending = READ_ONCE ( stats - > reset_pending ) ;
unsigned long long time ;
2005-04-17 02:20:36 +04:00
ssize_t len = 0 ;
int i ;
2015-01-13 09:04:00 +03:00
2015-01-06 18:39:07 +03:00
for ( i = 0 ; i < stats - > state_num ; i + + ) {
2020-10-05 10:56:01 +03:00
if ( pending ) {
2020-10-06 22:43:43 +03:00
if ( i = = stats - > last_index ) {
/*
* Prevent the reset_time read from occurring
* before the reset_pending read above .
*/
smp_rmb ( ) ;
2020-11-17 14:32:10 +03:00
time = local_clock ( ) - READ_ONCE ( stats - > reset_time ) ;
2020-10-06 22:43:43 +03:00
} else {
2020-10-05 10:56:01 +03:00
time = 0 ;
2020-10-06 22:43:43 +03:00
}
2020-10-05 10:56:01 +03:00
} else {
time = stats - > time_in_state [ i ] ;
if ( i = = stats - > last_index )
2020-11-17 14:32:10 +03:00
time + = local_clock ( ) - stats - > last_time ;
2020-10-05 10:56:01 +03:00
}
2015-01-06 18:39:07 +03:00
len + = sprintf ( buf + len , " %u %llu \n " , stats - > freq_table [ i ] ,
2020-11-17 14:32:10 +03:00
nsec_to_clock_t ( time ) ) ;
2005-04-17 02:20:36 +04:00
}
return len ;
}
2019-02-01 09:15:44 +03:00
cpufreq_freq_attr_ro ( time_in_state ) ;
2005-04-17 02:20:36 +04:00
2020-10-05 10:56:01 +03:00
/* We don't care what is written to the attribute */
2016-11-07 21:02:23 +03:00
static ssize_t store_reset ( struct cpufreq_policy * policy , const char * buf ,
size_t count )
{
2020-10-05 10:56:01 +03:00
struct cpufreq_stats * stats = policy - > stats ;
/*
* Defer resetting of stats to cpufreq_stats_record_transition ( ) to
* avoid races .
*/
2020-11-17 14:32:10 +03:00
WRITE_ONCE ( stats - > reset_time , local_clock ( ) ) ;
2020-10-06 22:43:43 +03:00
/*
* The memory barrier below is to prevent the readers of reset_time from
* seeing a stale or partially updated value .
*/
smp_wmb ( ) ;
2020-10-05 10:56:01 +03:00
WRITE_ONCE ( stats - > reset_pending , 1 ) ;
2016-11-07 21:02:23 +03:00
return count ;
}
2019-02-01 09:15:44 +03:00
cpufreq_freq_attr_wo ( reset ) ;
2016-11-07 21:02:23 +03:00
2009-01-18 09:49:04 +03:00
static ssize_t show_trans_table ( struct cpufreq_policy * policy , char * buf )
2005-04-17 02:20:36 +04:00
{
2015-01-06 18:39:07 +03:00
struct cpufreq_stats * stats = policy - > stats ;
2020-10-05 10:56:01 +03:00
bool pending = READ_ONCE ( stats - > reset_pending ) ;
2005-04-17 02:20:36 +04:00
ssize_t len = 0 ;
2020-10-05 10:56:01 +03:00
int i , j , count ;
2005-04-17 02:20:36 +04:00
2020-03-11 10:13:41 +03:00
len + = scnprintf ( buf + len , PAGE_SIZE - len , " From : To \n " ) ;
len + = scnprintf ( buf + len , PAGE_SIZE - len , " : " ) ;
2015-01-06 18:39:07 +03:00
for ( i = 0 ; i < stats - > state_num ; i + + ) {
2005-05-26 01:46:50 +04:00
if ( len > = PAGE_SIZE )
break ;
2020-03-11 10:13:41 +03:00
len + = scnprintf ( buf + len , PAGE_SIZE - len , " %9u " ,
2015-01-06 18:39:07 +03:00
stats - > freq_table [ i ] ) ;
2005-05-26 01:46:50 +04:00
}
if ( len > = PAGE_SIZE )
2008-02-16 13:41:25 +03:00
return PAGE_SIZE ;
2005-05-26 01:46:50 +04:00
2020-03-11 10:13:41 +03:00
len + = scnprintf ( buf + len , PAGE_SIZE - len , " \n " ) ;
2005-05-26 01:46:50 +04:00
2015-01-06 18:39:07 +03:00
for ( i = 0 ; i < stats - > state_num ; i + + ) {
2005-04-17 02:20:36 +04:00
if ( len > = PAGE_SIZE )
break ;
2005-05-26 01:46:50 +04:00
2020-03-11 10:13:41 +03:00
len + = scnprintf ( buf + len , PAGE_SIZE - len , " %9u: " ,
2015-01-06 18:39:07 +03:00
stats - > freq_table [ i ] ) ;
2005-04-17 02:20:36 +04:00
2015-01-06 18:39:07 +03:00
for ( j = 0 ; j < stats - > state_num ; j + + ) {
2005-04-17 02:20:36 +04:00
if ( len > = PAGE_SIZE )
break ;
2020-10-05 10:56:01 +03:00
if ( pending )
count = 0 ;
else
count = stats - > trans_table [ i * stats - > max_state + j ] ;
len + = scnprintf ( buf + len , PAGE_SIZE - len , " %9u " , count ) ;
2005-04-17 02:20:36 +04:00
}
2008-02-16 13:41:25 +03:00
if ( len > = PAGE_SIZE )
break ;
2020-03-11 10:13:41 +03:00
len + = scnprintf ( buf + len , PAGE_SIZE - len , " \n " ) ;
2005-04-17 02:20:36 +04:00
}
2017-11-07 11:09:29 +03:00
if ( len > = PAGE_SIZE ) {
pr_warn_once ( " cpufreq transition table exceeds PAGE_SIZE. Disabling \n " ) ;
return - EFBIG ;
}
2005-04-17 02:20:36 +04:00
return len ;
}
2013-02-04 15:38:52 +04:00
cpufreq_freq_attr_ro ( trans_table ) ;
2005-04-17 02:20:36 +04:00
static struct attribute * default_attrs [ ] = {
2013-02-04 15:38:52 +04:00
& total_trans . attr ,
& time_in_state . attr ,
2016-11-07 21:02:23 +03:00
& reset . attr ,
2013-02-04 15:38:52 +04:00
& trans_table . attr ,
2005-04-17 02:20:36 +04:00
NULL
} ;
2017-07-03 10:59:04 +03:00
static const struct attribute_group stats_attr_group = {
2005-04-17 02:20:36 +04:00
. attrs = default_attrs ,
. name = " stats "
} ;
2015-01-06 18:39:07 +03:00
static int freq_table_get_index ( struct cpufreq_stats * stats , unsigned int freq )
2005-04-17 02:20:36 +04:00
{
int index ;
2015-01-06 18:39:07 +03:00
for ( index = 0 ; index < stats - > max_state ; index + + )
if ( stats - > freq_table [ index ] = = freq )
2005-04-17 02:20:36 +04:00
return index ;
return - 1 ;
}
2016-05-31 23:14:44 +03:00
void cpufreq_stats_free_table ( struct cpufreq_policy * policy )
2005-04-17 02:20:36 +04:00
{
2015-01-06 18:39:07 +03:00
struct cpufreq_stats * stats = policy - > stats ;
2013-01-14 17:23:03 +04:00
2015-01-13 09:04:00 +03:00
/* Already freed */
2015-01-06 18:39:07 +03:00
if ( ! stats )
2014-01-07 05:40:12 +04:00
return ;
2015-01-06 18:39:07 +03:00
pr_debug ( " %s: Free stats table \n " , __func__ ) ;
2014-01-07 05:40:12 +04:00
sysfs_remove_group ( & policy - > kobj , & stats_attr_group ) ;
2015-01-06 18:39:07 +03:00
kfree ( stats - > time_in_state ) ;
kfree ( stats ) ;
2015-01-13 09:04:00 +03:00
policy - > stats = NULL ;
2011-05-02 22:29:17 +04:00
}
2016-05-31 23:14:44 +03:00
void cpufreq_stats_create_table ( struct cpufreq_policy * policy )
2005-04-17 02:20:36 +04:00
{
2021-05-31 10:16:07 +03:00
unsigned int i = 0 , count ;
2015-01-06 18:39:07 +03:00
struct cpufreq_stats * stats ;
2005-04-17 02:20:36 +04:00
unsigned int alloc_size ;
2017-04-25 13:27:15 +03:00
struct cpufreq_frequency_table * pos ;
2014-02-28 05:58:36 +04:00
2017-04-25 13:27:15 +03:00
count = cpufreq_table_count_valid_entries ( policy ) ;
if ( ! count )
2016-05-31 23:14:44 +03:00
return ;
2014-02-28 05:58:36 +04:00
2015-01-06 18:39:01 +03:00
/* stats already initialized */
2015-01-13 09:04:00 +03:00
if ( policy - > stats )
2016-05-31 23:14:44 +03:00
return ;
2015-01-06 18:39:01 +03:00
2015-01-06 18:39:07 +03:00
stats = kzalloc ( sizeof ( * stats ) , GFP_KERNEL ) ;
2015-01-06 18:39:11 +03:00
if ( ! stats )
2016-05-31 23:14:44 +03:00
return ;
2005-04-17 02:20:36 +04:00
2012-10-26 02:51:21 +04:00
alloc_size = count * sizeof ( int ) + count * sizeof ( u64 ) ;
2005-04-17 02:20:36 +04:00
alloc_size + = count * count * sizeof ( int ) ;
2015-01-06 18:39:11 +03:00
/* Allocate memory for time_in_state/freq_table/trans_table in one go */
2015-01-06 18:39:07 +03:00
stats - > time_in_state = kzalloc ( alloc_size , GFP_KERNEL ) ;
2015-01-06 18:39:11 +03:00
if ( ! stats - > time_in_state )
goto free_stat ;
2015-01-06 18:39:07 +03:00
stats - > freq_table = ( unsigned int * ) ( stats - > time_in_state + count ) ;
2005-04-17 02:20:36 +04:00
2015-01-06 18:39:07 +03:00
stats - > trans_table = stats - > freq_table + count ;
2015-01-06 18:39:11 +03:00
stats - > max_state = count ;
/* Find valid-unique entries */
2017-04-25 13:27:15 +03:00
cpufreq_for_each_valid_entry ( pos , policy - > freq_table )
2015-01-06 18:39:07 +03:00
if ( freq_table_get_index ( stats , pos - > frequency ) = = - 1 )
stats - > freq_table [ i + + ] = pos - > frequency ;
2015-01-06 18:39:11 +03:00
2015-01-06 18:39:15 +03:00
stats - > state_num = i ;
2020-11-17 14:32:10 +03:00
stats - > last_time = local_clock ( ) ;
2015-01-06 18:39:07 +03:00
stats - > last_index = freq_table_get_index ( stats , policy - > cur ) ;
2015-01-06 18:39:11 +03:00
policy - > stats = stats ;
2021-05-31 10:16:07 +03:00
if ( ! sysfs_create_group ( & policy - > kobj , & stats_attr_group ) )
2016-05-31 23:14:44 +03:00
return ;
2015-01-06 18:39:11 +03:00
/* We failed, release resources */
2015-01-13 09:04:00 +03:00
policy - > stats = NULL ;
2015-01-06 18:39:11 +03:00
kfree ( stats - > time_in_state ) ;
free_stat :
kfree ( stats ) ;
2014-01-07 05:40:13 +04:00
}
2016-05-31 23:14:44 +03:00
void cpufreq_stats_record_transition ( struct cpufreq_policy * policy ,
unsigned int new_freq )
2005-04-17 02:20:36 +04:00
{
2016-05-31 23:14:44 +03:00
struct cpufreq_stats * stats = policy - > stats ;
2005-04-17 02:20:36 +04:00
int old_index , new_index ;
2020-10-05 10:56:03 +03:00
if ( unlikely ( ! stats ) )
2016-05-31 23:14:44 +03:00
return ;
2020-10-05 10:56:01 +03:00
if ( unlikely ( READ_ONCE ( stats - > reset_pending ) ) )
cpufreq_stats_reset_table ( stats ) ;
2015-01-13 09:04:00 +03:00
2015-01-06 18:39:07 +03:00
old_index = stats - > last_index ;
2016-05-31 23:14:44 +03:00
new_index = freq_table_get_index ( stats , new_freq ) ;
2005-04-17 02:20:36 +04:00
2015-01-06 18:39:07 +03:00
/* We can't do stats->time_in_state[-1]= .. */
2020-10-05 10:56:03 +03:00
if ( unlikely ( old_index = = - 1 | | new_index = = - 1 | | old_index = = new_index ) )
2016-05-31 23:14:44 +03:00
return ;
2006-12-19 23:58:55 +03:00
2020-10-05 10:56:01 +03:00
cpufreq_stats_update ( stats , stats - > last_time ) ;
2015-01-06 18:39:14 +03:00
2015-01-06 18:39:07 +03:00
stats - > last_index = new_index ;
stats - > trans_table [ old_index * stats - > max_state + new_index ] + + ;
stats - > total_trans + + ;
2005-04-17 02:20:36 +04:00
}