2012-09-17 12:31:14 +04:00
# include <math.h>
# include "stat.h"
2015-06-26 12:29:16 +03:00
# include "evlist.h"
2015-06-04 16:50:55 +03:00
# include "evsel.h"
2015-06-26 12:29:16 +03:00
# include "thread_map.h"
2012-09-17 12:31:14 +04:00
void update_stats ( struct stats * stats , u64 val )
{
double delta ;
stats - > n + + ;
delta = val - stats - > mean ;
stats - > mean + = delta / stats - > n ;
stats - > M2 + = delta * ( val - stats - > mean ) ;
2013-08-03 00:05:40 +04:00
if ( val > stats - > max )
stats - > max = val ;
if ( val < stats - > min )
stats - > min = val ;
2012-09-17 12:31:14 +04:00
}
double avg_stats ( struct stats * stats )
{
return stats - > mean ;
}
/*
* http : //en.wikipedia.org/wiki/Algorithms_for_calculating_variance
*
* ( \ Sum n_i ^ 2 ) - ( ( \ Sum n_i ) ^ 2 ) / n
* s ^ 2 = - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* n - 1
*
* http : //en.wikipedia.org/wiki/Stddev
*
* The std dev of the mean is related to the std dev by :
*
* s
* s_mean = - - - - - - -
* sqrt ( n )
*
*/
double stddev_stats ( struct stats * stats )
{
double variance , variance_mean ;
2013-05-26 04:24:48 +04:00
if ( stats - > n < 2 )
2012-09-17 12:31:14 +04:00
return 0.0 ;
variance = stats - > M2 / ( stats - > n - 1 ) ;
variance_mean = variance / stats - > n ;
return sqrt ( variance_mean ) ;
}
double rel_stddev_stats ( double stddev , double avg )
{
double pct = 0.0 ;
if ( avg )
pct = 100.0 * stddev / avg ;
return pct ;
}
2015-06-04 16:50:55 +03:00
bool __perf_evsel_stat__is ( struct perf_evsel * evsel ,
enum perf_stat_evsel_id id )
{
2015-10-16 13:41:03 +03:00
struct perf_stat_evsel * ps = evsel - > priv ;
2015-06-04 16:50:55 +03:00
return ps - > id = = id ;
}
# define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
static const char * id_str [ PERF_STAT_EVSEL_ID__MAX ] = {
2015-06-03 17:25:52 +03:00
ID ( NONE , x ) ,
ID ( CYCLES_IN_TX , cpu / cycles - t / ) ,
ID ( TRANSACTION_START , cpu / tx - start / ) ,
ID ( ELISION_START , cpu / el - start / ) ,
ID ( CYCLES_IN_TX_CP , cpu / cycles - ct / ) ,
2015-06-04 16:50:55 +03:00
} ;
# undef ID
void perf_stat_evsel_id_init ( struct perf_evsel * evsel )
{
2015-10-16 13:41:03 +03:00
struct perf_stat_evsel * ps = evsel - > priv ;
2015-06-04 16:50:55 +03:00
int i ;
/* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
for ( i = 0 ; i < PERF_STAT_EVSEL_ID__MAX ; i + + ) {
if ( ! strcmp ( perf_evsel__name ( evsel ) , id_str [ i ] ) ) {
ps - > id = i ;
break ;
}
}
}
2015-06-14 11:19:26 +03:00
2015-06-26 12:29:14 +03:00
void perf_evsel__reset_stat_priv ( struct perf_evsel * evsel )
{
int i ;
2015-10-16 13:41:03 +03:00
struct perf_stat_evsel * ps = evsel - > priv ;
2015-06-26 12:29:14 +03:00
for ( i = 0 ; i < 3 ; i + + )
init_stats ( & ps - > res_stats [ i ] ) ;
perf_stat_evsel_id_init ( evsel ) ;
}
int perf_evsel__alloc_stat_priv ( struct perf_evsel * evsel )
{
2015-10-16 13:41:03 +03:00
evsel - > priv = zalloc ( sizeof ( struct perf_stat_evsel ) ) ;
2015-06-26 12:29:14 +03:00
if ( evsel - > priv = = NULL )
return - ENOMEM ;
perf_evsel__reset_stat_priv ( evsel ) ;
return 0 ;
}
void perf_evsel__free_stat_priv ( struct perf_evsel * evsel )
{
zfree ( & evsel - > priv ) ;
}
2015-06-26 12:29:15 +03:00
int perf_evsel__alloc_prev_raw_counts ( struct perf_evsel * evsel ,
int ncpus , int nthreads )
{
struct perf_counts * counts ;
counts = perf_counts__new ( ncpus , nthreads ) ;
if ( counts )
evsel - > prev_raw_counts = counts ;
return counts ? 0 : - ENOMEM ;
}
void perf_evsel__free_prev_raw_counts ( struct perf_evsel * evsel )
{
perf_counts__delete ( evsel - > prev_raw_counts ) ;
evsel - > prev_raw_counts = NULL ;
}
2015-06-26 12:29:16 +03:00
2015-06-26 12:29:17 +03:00
int perf_evsel__alloc_stats ( struct perf_evsel * evsel , bool alloc_raw )
{
int ncpus = perf_evsel__nr_cpus ( evsel ) ;
int nthreads = thread_map__nr ( evsel - > threads ) ;
if ( perf_evsel__alloc_stat_priv ( evsel ) < 0 | |
perf_evsel__alloc_counts ( evsel , ncpus , nthreads ) < 0 | |
( alloc_raw & & perf_evsel__alloc_prev_raw_counts ( evsel , ncpus , nthreads ) < 0 ) )
return - ENOMEM ;
return 0 ;
}
2015-06-26 12:29:16 +03:00
int perf_evlist__alloc_stats ( struct perf_evlist * evlist , bool alloc_raw )
{
struct perf_evsel * evsel ;
evlist__for_each ( evlist , evsel ) {
2015-06-26 12:29:17 +03:00
if ( perf_evsel__alloc_stats ( evsel , alloc_raw ) )
2015-06-26 12:29:16 +03:00
goto out_free ;
}
return 0 ;
out_free :
perf_evlist__free_stats ( evlist ) ;
return - 1 ;
}
void perf_evlist__free_stats ( struct perf_evlist * evlist )
{
struct perf_evsel * evsel ;
evlist__for_each ( evlist , evsel ) {
perf_evsel__free_stat_priv ( evsel ) ;
perf_evsel__free_counts ( evsel ) ;
perf_evsel__free_prev_raw_counts ( evsel ) ;
}
}
void perf_evlist__reset_stats ( struct perf_evlist * evlist )
{
struct perf_evsel * evsel ;
evlist__for_each ( evlist , evsel ) {
perf_evsel__reset_stat_priv ( evsel ) ;
perf_evsel__reset_counts ( evsel ) ;
}
}
2015-07-21 15:31:27 +03:00
static void zero_per_pkg ( struct perf_evsel * counter )
{
if ( counter - > per_pkg_mask )
memset ( counter - > per_pkg_mask , 0 , MAX_NR_CPUS ) ;
}
2015-09-03 16:23:40 +03:00
static int check_per_pkg ( struct perf_evsel * counter ,
struct perf_counts_values * vals , int cpu , bool * skip )
2015-07-21 15:31:27 +03:00
{
unsigned long * mask = counter - > per_pkg_mask ;
struct cpu_map * cpus = perf_evsel__cpus ( counter ) ;
int s ;
* skip = false ;
if ( ! counter - > per_pkg )
return 0 ;
if ( cpu_map__empty ( cpus ) )
return 0 ;
if ( ! mask ) {
mask = zalloc ( MAX_NR_CPUS ) ;
if ( ! mask )
return - ENOMEM ;
counter - > per_pkg_mask = mask ;
}
2015-09-03 16:23:40 +03:00
/*
* we do not consider an event that has not run as a good
* instance to mark a package as used ( skip = 1 ) . Otherwise
* we may run into a situation where the first CPU in a package
* is not running anything , yet the second is , and this function
* would mark the package as used after the first CPU and would
* not read the values from the second CPU .
*/
if ( ! ( vals - > run & & vals - > ena ) )
return 0 ;
2015-10-16 13:41:15 +03:00
s = cpu_map__get_socket ( cpus , cpu , NULL ) ;
2015-07-21 15:31:27 +03:00
if ( s < 0 )
return - 1 ;
* skip = test_and_set_bit ( s , mask ) = = 1 ;
return 0 ;
}
static int
process_counter_values ( struct perf_stat_config * config , struct perf_evsel * evsel ,
int cpu , int thread ,
struct perf_counts_values * count )
{
struct perf_counts_values * aggr = & evsel - > counts - > aggr ;
static struct perf_counts_values zero ;
bool skip = false ;
2015-09-03 16:23:40 +03:00
if ( check_per_pkg ( evsel , count , cpu , & skip ) ) {
2015-07-21 15:31:27 +03:00
pr_err ( " failed to read per-pkg counter \n " ) ;
return - 1 ;
}
if ( skip )
count = & zero ;
switch ( config - > aggr_mode ) {
case AGGR_THREAD :
case AGGR_CORE :
case AGGR_SOCKET :
case AGGR_NONE :
if ( ! evsel - > snapshot )
perf_evsel__compute_deltas ( evsel , cpu , thread , count ) ;
perf_counts_values__scale ( count , config - > scale , NULL ) ;
if ( config - > aggr_mode = = AGGR_NONE )
perf_stat__update_shadow_stats ( evsel , count - > values , cpu ) ;
break ;
case AGGR_GLOBAL :
aggr - > val + = count - > val ;
if ( config - > scale ) {
aggr - > ena + = count - > ena ;
aggr - > run + = count - > run ;
}
2015-10-16 13:41:04 +03:00
case AGGR_UNSET :
2015-07-21 15:31:27 +03:00
default :
break ;
}
return 0 ;
}
static int process_counter_maps ( struct perf_stat_config * config ,
struct perf_evsel * counter )
{
int nthreads = thread_map__nr ( counter - > threads ) ;
int ncpus = perf_evsel__nr_cpus ( counter ) ;
int cpu , thread ;
if ( counter - > system_wide )
nthreads = 1 ;
for ( thread = 0 ; thread < nthreads ; thread + + ) {
for ( cpu = 0 ; cpu < ncpus ; cpu + + ) {
if ( process_counter_values ( config , counter , cpu , thread ,
perf_counts ( counter - > counts , cpu , thread ) ) )
return - 1 ;
}
}
return 0 ;
}
int perf_stat_process_counter ( struct perf_stat_config * config ,
struct perf_evsel * counter )
{
struct perf_counts_values * aggr = & counter - > counts - > aggr ;
2015-10-16 13:41:03 +03:00
struct perf_stat_evsel * ps = counter - > priv ;
2015-07-21 15:31:27 +03:00
u64 * count = counter - > counts - > aggr . values ;
int i , ret ;
aggr - > val = aggr - > ena = aggr - > run = 0 ;
init_stats ( ps - > res_stats ) ;
if ( counter - > per_pkg )
zero_per_pkg ( counter ) ;
ret = process_counter_maps ( config , counter ) ;
if ( ret )
return ret ;
if ( config - > aggr_mode ! = AGGR_GLOBAL )
return 0 ;
if ( ! counter - > snapshot )
perf_evsel__compute_deltas ( counter , - 1 , - 1 , aggr ) ;
perf_counts_values__scale ( aggr , config - > scale , & counter - > counts - > scaled ) ;
for ( i = 0 ; i < 3 ; i + + )
update_stats ( & ps - > res_stats [ i ] , count [ i ] ) ;
if ( verbose ) {
fprintf ( config - > output , " %s: % " PRIu64 " % " PRIu64 " % " PRIu64 " \n " ,
perf_evsel__name ( counter ) , count [ 0 ] , count [ 1 ] , count [ 2 ] ) ;
}
/*
* Save the full runtime - to allow normalization during printout :
*/
perf_stat__update_shadow_stats ( counter , count , 0 ) ;
return 0 ;
}
2015-10-25 17:51:32 +03:00
int perf_event__process_stat_event ( struct perf_tool * tool __maybe_unused ,
union perf_event * event ,
struct perf_session * session )
{
struct perf_counts_values count ;
struct stat_event * st = & event - > stat ;
struct perf_evsel * counter ;
count . val = st - > val ;
count . ena = st - > ena ;
count . run = st - > run ;
counter = perf_evlist__id2evsel ( session - > evlist , st - > id ) ;
if ( ! counter ) {
pr_err ( " Failed to resolve counter for stat event. \n " ) ;
return - EINVAL ;
}
* perf_counts ( counter - > counts , st - > cpu , st - > thread ) = count ;
counter - > supported = true ;
return 0 ;
}