2015-06-03 17:25:59 +03:00
# include <stdio.h>
# include "evsel.h"
# include "stat.h"
# include "color.h"
2016-03-01 21:57:52 +03:00
# include "pmu.h"
2015-06-03 17:25:59 +03:00
enum {
CTX_BIT_USER = 1 < < 0 ,
CTX_BIT_KERNEL = 1 < < 1 ,
CTX_BIT_HV = 1 < < 2 ,
CTX_BIT_HOST = 1 < < 3 ,
CTX_BIT_IDLE = 1 < < 4 ,
CTX_BIT_MAX = 1 < < 5 ,
} ;
# define NUM_CTX CTX_BIT_MAX
perf stat: Support metrics in --per-core/socket mode
Enable metrics printing in --per-core / --per-socket mode. We need to
save the shadow metrics in a unique place. Always use the first CPU in
the aggregation. Then use the same CPU to retrieve the shadow value
later.
Example output:
% perf stat --per-core -a ./BC1s
Performance counter stats for 'system wide':
S0-C0 2 2966.020381 task-clock (msec) # 2.004 CPUs utilized (100.00%)
S0-C0 2 49 context-switches # 0.017 K/sec (100.00%)
S0-C0 2 4 cpu-migrations # 0.001 K/sec (100.00%)
S0-C0 2 467 page-faults # 0.157 K/sec
S0-C0 2 4,599,061,773 cycles # 1.551 GHz (100.00%)
S0-C0 2 9,755,886,883 instructions # 2.12 insn per cycle (100.00%)
S0-C0 2 1,906,272,125 branches # 642.704 M/sec (100.00%)
S0-C0 2 81,180,867 branch-misses # 4.26% of all branches
S0-C1 2 2965.995373 task-clock (msec) # 2.003 CPUs utilized (100.00%)
S0-C1 2 62 context-switches # 0.021 K/sec (100.00%)
S0-C1 2 8 cpu-migrations # 0.003 K/sec (100.00%)
S0-C1 2 281 page-faults # 0.095 K/sec
S0-C1 2 6,347,290 cycles # 0.002 GHz (100.00%)
S0-C1 2 4,654,156 instructions # 0.73 insn per cycle (100.00%)
S0-C1 2 947,121 branches # 0.319 M/sec (100.00%)
S0-C1 2 37,322 branch-misses # 3.94% of all branches
1.480409747 seconds time elapsed
v2: Rebase to older patches
v3: Document shadow cpus. Fix aggr_get_id argument. Fix -A shadows (Jiri)
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/1456785386-19481-4-git-send-email-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-03-01 01:36:22 +03:00
/*
* AGGR_GLOBAL : Use CPU 0
* AGGR_SOCKET : Use first CPU of socket
* AGGR_CORE : Use first CPU of core
* AGGR_NONE : Use matching CPU
* AGGR_THREAD : Not supported ?
*/
2015-06-03 17:25:59 +03:00
static struct stats runtime_nsecs_stats [ MAX_NR_CPUS ] ;
static struct stats runtime_cycles_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_stalled_cycles_front_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_stalled_cycles_back_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_branches_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_cacherefs_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_l1_dcache_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_l1_icache_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_ll_cache_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_itlb_cache_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_dtlb_cache_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_cycles_in_tx_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_transaction_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
static struct stats runtime_elision_stats [ NUM_CTX ] [ MAX_NR_CPUS ] ;
2016-03-01 21:57:52 +03:00
static bool have_frontend_stalled ;
2015-06-03 17:25:59 +03:00
struct stats walltime_nsecs_stats ;
2016-03-01 21:57:52 +03:00
void perf_stat__init_shadow_stats ( void )
{
have_frontend_stalled = pmu_have_event ( " cpu " , " stalled-cycles-frontend " ) ;
}
2015-06-03 17:25:59 +03:00
static int evsel_context ( struct perf_evsel * evsel )
{
int ctx = 0 ;
if ( evsel - > attr . exclude_kernel )
ctx | = CTX_BIT_KERNEL ;
if ( evsel - > attr . exclude_user )
ctx | = CTX_BIT_USER ;
if ( evsel - > attr . exclude_hv )
ctx | = CTX_BIT_HV ;
if ( evsel - > attr . exclude_host )
ctx | = CTX_BIT_HOST ;
if ( evsel - > attr . exclude_idle )
ctx | = CTX_BIT_IDLE ;
return ctx ;
}
void perf_stat__reset_shadow_stats ( void )
{
memset ( runtime_nsecs_stats , 0 , sizeof ( runtime_nsecs_stats ) ) ;
memset ( runtime_cycles_stats , 0 , sizeof ( runtime_cycles_stats ) ) ;
memset ( runtime_stalled_cycles_front_stats , 0 , sizeof ( runtime_stalled_cycles_front_stats ) ) ;
memset ( runtime_stalled_cycles_back_stats , 0 , sizeof ( runtime_stalled_cycles_back_stats ) ) ;
memset ( runtime_branches_stats , 0 , sizeof ( runtime_branches_stats ) ) ;
memset ( runtime_cacherefs_stats , 0 , sizeof ( runtime_cacherefs_stats ) ) ;
memset ( runtime_l1_dcache_stats , 0 , sizeof ( runtime_l1_dcache_stats ) ) ;
memset ( runtime_l1_icache_stats , 0 , sizeof ( runtime_l1_icache_stats ) ) ;
memset ( runtime_ll_cache_stats , 0 , sizeof ( runtime_ll_cache_stats ) ) ;
memset ( runtime_itlb_cache_stats , 0 , sizeof ( runtime_itlb_cache_stats ) ) ;
memset ( runtime_dtlb_cache_stats , 0 , sizeof ( runtime_dtlb_cache_stats ) ) ;
memset ( runtime_cycles_in_tx_stats , 0 ,
sizeof ( runtime_cycles_in_tx_stats ) ) ;
memset ( runtime_transaction_stats , 0 ,
sizeof ( runtime_transaction_stats ) ) ;
memset ( runtime_elision_stats , 0 , sizeof ( runtime_elision_stats ) ) ;
memset ( & walltime_nsecs_stats , 0 , sizeof ( walltime_nsecs_stats ) ) ;
}
/*
* Update various tracking values we maintain to print
* more semantic information such as miss / hit ratios ,
* instruction rates , etc :
*/
void perf_stat__update_shadow_stats ( struct perf_evsel * counter , u64 * count ,
int cpu )
{
int ctx = evsel_context ( counter ) ;
if ( perf_evsel__match ( counter , SOFTWARE , SW_TASK_CLOCK ) )
update_stats ( & runtime_nsecs_stats [ cpu ] , count [ 0 ] ) ;
else if ( perf_evsel__match ( counter , HARDWARE , HW_CPU_CYCLES ) )
update_stats ( & runtime_cycles_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_stat_evsel__is ( counter , CYCLES_IN_TX ) )
2015-07-28 02:24:51 +03:00
update_stats ( & runtime_cycles_in_tx_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
2015-06-03 17:25:59 +03:00
else if ( perf_stat_evsel__is ( counter , TRANSACTION_START ) )
update_stats ( & runtime_transaction_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_stat_evsel__is ( counter , ELISION_START ) )
update_stats ( & runtime_elision_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_evsel__match ( counter , HARDWARE , HW_STALLED_CYCLES_FRONTEND ) )
update_stats ( & runtime_stalled_cycles_front_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_evsel__match ( counter , HARDWARE , HW_STALLED_CYCLES_BACKEND ) )
update_stats ( & runtime_stalled_cycles_back_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_evsel__match ( counter , HARDWARE , HW_BRANCH_INSTRUCTIONS ) )
update_stats ( & runtime_branches_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_evsel__match ( counter , HARDWARE , HW_CACHE_REFERENCES ) )
update_stats ( & runtime_cacherefs_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_evsel__match ( counter , HW_CACHE , HW_CACHE_L1D ) )
update_stats ( & runtime_l1_dcache_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_evsel__match ( counter , HW_CACHE , HW_CACHE_L1I ) )
update_stats ( & runtime_ll_cache_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_evsel__match ( counter , HW_CACHE , HW_CACHE_LL ) )
update_stats ( & runtime_ll_cache_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_evsel__match ( counter , HW_CACHE , HW_CACHE_DTLB ) )
update_stats ( & runtime_dtlb_cache_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
else if ( perf_evsel__match ( counter , HW_CACHE , HW_CACHE_ITLB ) )
update_stats ( & runtime_itlb_cache_stats [ ctx ] [ cpu ] , count [ 0 ] ) ;
}
/* used for get_ratio_color() */
enum grc_type {
GRC_STALLED_CYCLES_FE ,
GRC_STALLED_CYCLES_BE ,
GRC_CACHE_MISSES ,
GRC_MAX_NR
} ;
static const char * get_ratio_color ( enum grc_type type , double ratio )
{
static const double grc_table [ GRC_MAX_NR ] [ 3 ] = {
[ GRC_STALLED_CYCLES_FE ] = { 50.0 , 30.0 , 10.0 } ,
[ GRC_STALLED_CYCLES_BE ] = { 75.0 , 50.0 , 20.0 } ,
[ GRC_CACHE_MISSES ] = { 20.0 , 10.0 , 5.0 } ,
} ;
const char * color = PERF_COLOR_NORMAL ;
if ( ratio > grc_table [ type ] [ 0 ] )
color = PERF_COLOR_RED ;
else if ( ratio > grc_table [ type ] [ 1 ] )
color = PERF_COLOR_MAGENTA ;
else if ( ratio > grc_table [ type ] [ 2 ] )
color = PERF_COLOR_YELLOW ;
return color ;
}
2016-01-30 20:06:49 +03:00
static void print_stalled_cycles_frontend ( int cpu ,
2016-03-22 19:09:37 +03:00
struct perf_evsel * evsel , double avg ,
2016-01-30 20:06:49 +03:00
struct perf_stat_output_ctx * out )
2015-06-03 17:25:59 +03:00
{
double total , ratio = 0.0 ;
const char * color ;
int ctx = evsel_context ( evsel ) ;
total = avg_stats ( & runtime_cycles_stats [ ctx ] [ cpu ] ) ;
if ( total )
ratio = avg / total * 100.0 ;
color = get_ratio_color ( GRC_STALLED_CYCLES_FE , ratio ) ;
2016-01-30 20:06:49 +03:00
if ( ratio )
out - > print_metric ( out - > ctx , color , " %7.2f%% " , " frontend cycles idle " ,
ratio ) ;
else
out - > print_metric ( out - > ctx , NULL , NULL , " frontend cycles idle " , 0 ) ;
2015-06-03 17:25:59 +03:00
}
2016-01-30 20:06:49 +03:00
static void print_stalled_cycles_backend ( int cpu ,
2016-03-22 19:09:37 +03:00
struct perf_evsel * evsel , double avg ,
2016-01-30 20:06:49 +03:00
struct perf_stat_output_ctx * out )
2015-06-03 17:25:59 +03:00
{
double total , ratio = 0.0 ;
const char * color ;
int ctx = evsel_context ( evsel ) ;
total = avg_stats ( & runtime_cycles_stats [ ctx ] [ cpu ] ) ;
if ( total )
ratio = avg / total * 100.0 ;
color = get_ratio_color ( GRC_STALLED_CYCLES_BE , ratio ) ;
2016-01-30 20:06:49 +03:00
out - > print_metric ( out - > ctx , color , " %6.2f%% " , " backend cycles idle " , ratio ) ;
2015-06-03 17:25:59 +03:00
}
2016-01-30 20:06:49 +03:00
static void print_branch_misses ( int cpu ,
2016-03-22 19:09:37 +03:00
struct perf_evsel * evsel ,
2016-01-30 20:06:49 +03:00
double avg ,
struct perf_stat_output_ctx * out )
2015-06-03 17:25:59 +03:00
{
double total , ratio = 0.0 ;
const char * color ;
int ctx = evsel_context ( evsel ) ;
total = avg_stats ( & runtime_branches_stats [ ctx ] [ cpu ] ) ;
if ( total )
ratio = avg / total * 100.0 ;
color = get_ratio_color ( GRC_CACHE_MISSES , ratio ) ;
2016-01-30 20:06:49 +03:00
out - > print_metric ( out - > ctx , color , " %7.2f%% " , " of all branches " , ratio ) ;
2015-06-03 17:25:59 +03:00
}
2016-01-30 20:06:49 +03:00
static void print_l1_dcache_misses ( int cpu ,
2016-03-22 19:09:37 +03:00
struct perf_evsel * evsel ,
2016-01-30 20:06:49 +03:00
double avg ,
struct perf_stat_output_ctx * out )
2015-06-03 17:25:59 +03:00
{
double total , ratio = 0.0 ;
const char * color ;
int ctx = evsel_context ( evsel ) ;
total = avg_stats ( & runtime_l1_dcache_stats [ ctx ] [ cpu ] ) ;
if ( total )
ratio = avg / total * 100.0 ;
color = get_ratio_color ( GRC_CACHE_MISSES , ratio ) ;
2016-01-30 20:06:49 +03:00
out - > print_metric ( out - > ctx , color , " %7.2f%% " , " of all L1-dcache hits " , ratio ) ;
2015-06-03 17:25:59 +03:00
}
2016-01-30 20:06:49 +03:00
static void print_l1_icache_misses ( int cpu ,
2016-03-22 19:09:37 +03:00
struct perf_evsel * evsel ,
2016-01-30 20:06:49 +03:00
double avg ,
struct perf_stat_output_ctx * out )
2015-06-03 17:25:59 +03:00
{
double total , ratio = 0.0 ;
const char * color ;
int ctx = evsel_context ( evsel ) ;
total = avg_stats ( & runtime_l1_icache_stats [ ctx ] [ cpu ] ) ;
if ( total )
ratio = avg / total * 100.0 ;
color = get_ratio_color ( GRC_CACHE_MISSES , ratio ) ;
2016-01-30 20:06:49 +03:00
out - > print_metric ( out - > ctx , color , " %7.2f%% " , " of all L1-icache hits " , ratio ) ;
2015-06-03 17:25:59 +03:00
}
2016-01-30 20:06:49 +03:00
static void print_dtlb_cache_misses ( int cpu ,
2016-03-22 19:09:37 +03:00
struct perf_evsel * evsel ,
2016-01-30 20:06:49 +03:00
double avg ,
struct perf_stat_output_ctx * out )
2015-06-03 17:25:59 +03:00
{
double total , ratio = 0.0 ;
const char * color ;
int ctx = evsel_context ( evsel ) ;
total = avg_stats ( & runtime_dtlb_cache_stats [ ctx ] [ cpu ] ) ;
if ( total )
ratio = avg / total * 100.0 ;
color = get_ratio_color ( GRC_CACHE_MISSES , ratio ) ;
2016-01-30 20:06:49 +03:00
out - > print_metric ( out - > ctx , color , " %7.2f%% " , " of all dTLB cache hits " , ratio ) ;
2015-06-03 17:25:59 +03:00
}
2016-01-30 20:06:49 +03:00
static void print_itlb_cache_misses ( int cpu ,
2016-03-22 19:09:37 +03:00
struct perf_evsel * evsel ,
2016-01-30 20:06:49 +03:00
double avg ,
struct perf_stat_output_ctx * out )
2015-06-03 17:25:59 +03:00
{
double total , ratio = 0.0 ;
const char * color ;
int ctx = evsel_context ( evsel ) ;
total = avg_stats ( & runtime_itlb_cache_stats [ ctx ] [ cpu ] ) ;
if ( total )
ratio = avg / total * 100.0 ;
color = get_ratio_color ( GRC_CACHE_MISSES , ratio ) ;
2016-01-30 20:06:49 +03:00
out - > print_metric ( out - > ctx , color , " %7.2f%% " , " of all iTLB cache hits " , ratio ) ;
2015-06-03 17:25:59 +03:00
}
2016-01-30 20:06:49 +03:00
static void print_ll_cache_misses ( int cpu ,
2016-03-22 19:09:37 +03:00
struct perf_evsel * evsel ,
2016-01-30 20:06:49 +03:00
double avg ,
struct perf_stat_output_ctx * out )
2015-06-03 17:25:59 +03:00
{
double total , ratio = 0.0 ;
const char * color ;
int ctx = evsel_context ( evsel ) ;
total = avg_stats ( & runtime_ll_cache_stats [ ctx ] [ cpu ] ) ;
if ( total )
ratio = avg / total * 100.0 ;
color = get_ratio_color ( GRC_CACHE_MISSES , ratio ) ;
2016-01-30 20:06:49 +03:00
out - > print_metric ( out - > ctx , color , " %7.2f%% " , " of all LL-cache hits " , ratio ) ;
2015-06-03 17:25:59 +03:00
}
2016-01-30 20:06:49 +03:00
void perf_stat__print_shadow_stats ( struct perf_evsel * evsel ,
double avg , int cpu ,
struct perf_stat_output_ctx * out )
2015-06-03 17:25:59 +03:00
{
2016-01-30 20:06:49 +03:00
void * ctxp = out - > ctx ;
print_metric_t print_metric = out - > print_metric ;
2015-06-03 17:25:59 +03:00
double total , ratio = 0.0 , total2 ;
int ctx = evsel_context ( evsel ) ;
if ( perf_evsel__match ( evsel , HARDWARE , HW_INSTRUCTIONS ) ) {
total = avg_stats ( & runtime_cycles_stats [ ctx ] [ cpu ] ) ;
if ( total ) {
ratio = avg / total ;
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , " %7.2f " ,
" insn per cycle " , ratio ) ;
2015-06-03 17:25:59 +03:00
} else {
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , NULL , " insn per cycle " , 0 ) ;
2015-06-03 17:25:59 +03:00
}
total = avg_stats ( & runtime_stalled_cycles_front_stats [ ctx ] [ cpu ] ) ;
total = max ( total , avg_stats ( & runtime_stalled_cycles_back_stats [ ctx ] [ cpu ] ) ) ;
if ( total & & avg ) {
perf stat: Implement CSV metrics output
Now support CSV output for metrics. With the new output callbacks this
is relatively straight forward by creating new callbacks.
This allows to easily plot metrics from CSV files.
The new line callback needs to know the number of fields to skip them
correctly
Example output before:
% perf stat -x, true
0.200687,,task-clock,200687,100.00
0,,context-switches,200687,100.00
0,,cpu-migrations,200687,100.00
40,,page-faults,200687,100.00
730871,,cycles,203601,100.00
551056,,stalled-cycles-frontend,203601,100.00
<not supported>,,stalled-cycles-backend,0,100.00
385523,,instructions,203601,100.00
78028,,branches,203601,100.00
3946,,branch-misses,203601,100.00
After:
% perf stat -x, true
.502457,,task-clock,502457,100.00,0.485,CPUs utilized
0,,context-switches,502457,100.00,0.000,K/sec
0,,cpu-migrations,502457,100.00,0.000,K/sec
45,,page-faults,502457,100.00,0.090,M/sec
644692,,cycles,509102,100.00,1.283,GHz
423470,,stalled-cycles-frontend,509102,100.00,65.69,frontend cycles idle
<not supported>,,stalled-cycles-backend,0,100.00,,,,
492701,,instructions,509102,100.00,0.76,insn per cycle
,,,,,0.86,stalled cycles per insn
97767,,branches,509102,100.00,194.578,M/sec
4788,,branch-misses,509102,100.00,4.90,of all branches
or easier readable
$ perf stat -x, -o x.csv true
$ column -s, -t x.csv
0.490635 task-clock 490635 100.00 0.489 CPUs utilized
0 context-switches 490635 100.00 0.000 K/sec
0 cpu-migrations 490635 100.00 0.000 K/sec
45 page-faults 490635 100.00 0.092 M/sec
629080 cycles 497698 100.00 1.282 GHz
409498 stalled-cycles-frontend 497698 100.00 65.09 frontend cycles idle
<not supported> stalled-cycles-backend 0 100.00
491424 instructions 497698 100.00 0.78 insn per cycle
0.83 stalled cycles per insn
97278 branches 497698 100.00 198.270 M/sec
4569 branch-misses 497698 100.00 4.70 of all branches
Two new fields are added: metric value and metric name.
v2: Split out function argument changes
v3: Reenable metrics for real.
v4: Fix wrong hunk from refactoring.
v5: Remove extra "noise" printing (Jiri), but add it to the not counted case.
Print empty metrics for not counted.
v6: Avoid outputting metric on empty format.
v7: Print metric at the end
v8: Remove extra run, ena fields
v9: Avoid extra new line for unsupported counters
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Link: http://lkml.kernel.org/r/1456785386-19481-3-git-send-email-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-03-01 01:36:21 +03:00
out - > new_line ( ctxp ) ;
2015-06-03 17:25:59 +03:00
ratio = total / avg ;
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , " %7.2f " ,
" stalled cycles per insn " ,
ratio ) ;
2016-03-01 21:57:52 +03:00
} else if ( have_frontend_stalled ) {
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , NULL ,
" stalled cycles per insn " , 0 ) ;
2015-06-03 17:25:59 +03:00
}
2016-01-30 20:06:49 +03:00
} else if ( perf_evsel__match ( evsel , HARDWARE , HW_BRANCH_MISSES ) ) {
if ( runtime_branches_stats [ ctx ] [ cpu ] . n ! = 0 )
print_branch_misses ( cpu , evsel , avg , out ) ;
else
print_metric ( ctxp , NULL , NULL , " of all branches " , 0 ) ;
2015-06-03 17:25:59 +03:00
} else if (
evsel - > attr . type = = PERF_TYPE_HW_CACHE & &
evsel - > attr . config = = ( PERF_COUNT_HW_CACHE_L1D |
( ( PERF_COUNT_HW_CACHE_OP_READ ) < < 8 ) |
2016-01-30 20:06:49 +03:00
( ( PERF_COUNT_HW_CACHE_RESULT_MISS ) < < 16 ) ) ) {
if ( runtime_l1_dcache_stats [ ctx ] [ cpu ] . n ! = 0 )
print_l1_dcache_misses ( cpu , evsel , avg , out ) ;
else
print_metric ( ctxp , NULL , NULL , " of all L1-dcache hits " , 0 ) ;
2015-06-03 17:25:59 +03:00
} else if (
evsel - > attr . type = = PERF_TYPE_HW_CACHE & &
evsel - > attr . config = = ( PERF_COUNT_HW_CACHE_L1I |
( ( PERF_COUNT_HW_CACHE_OP_READ ) < < 8 ) |
2016-01-30 20:06:49 +03:00
( ( PERF_COUNT_HW_CACHE_RESULT_MISS ) < < 16 ) ) ) {
if ( runtime_l1_icache_stats [ ctx ] [ cpu ] . n ! = 0 )
print_l1_icache_misses ( cpu , evsel , avg , out ) ;
else
print_metric ( ctxp , NULL , NULL , " of all L1-icache hits " , 0 ) ;
2015-06-03 17:25:59 +03:00
} else if (
evsel - > attr . type = = PERF_TYPE_HW_CACHE & &
evsel - > attr . config = = ( PERF_COUNT_HW_CACHE_DTLB |
( ( PERF_COUNT_HW_CACHE_OP_READ ) < < 8 ) |
2016-01-30 20:06:49 +03:00
( ( PERF_COUNT_HW_CACHE_RESULT_MISS ) < < 16 ) ) ) {
if ( runtime_dtlb_cache_stats [ ctx ] [ cpu ] . n ! = 0 )
print_dtlb_cache_misses ( cpu , evsel , avg , out ) ;
else
print_metric ( ctxp , NULL , NULL , " of all dTLB cache hits " , 0 ) ;
2015-06-03 17:25:59 +03:00
} else if (
evsel - > attr . type = = PERF_TYPE_HW_CACHE & &
evsel - > attr . config = = ( PERF_COUNT_HW_CACHE_ITLB |
( ( PERF_COUNT_HW_CACHE_OP_READ ) < < 8 ) |
2016-01-30 20:06:49 +03:00
( ( PERF_COUNT_HW_CACHE_RESULT_MISS ) < < 16 ) ) ) {
if ( runtime_itlb_cache_stats [ ctx ] [ cpu ] . n ! = 0 )
print_itlb_cache_misses ( cpu , evsel , avg , out ) ;
else
print_metric ( ctxp , NULL , NULL , " of all iTLB cache hits " , 0 ) ;
2015-06-03 17:25:59 +03:00
} else if (
evsel - > attr . type = = PERF_TYPE_HW_CACHE & &
evsel - > attr . config = = ( PERF_COUNT_HW_CACHE_LL |
( ( PERF_COUNT_HW_CACHE_OP_READ ) < < 8 ) |
2016-01-30 20:06:49 +03:00
( ( PERF_COUNT_HW_CACHE_RESULT_MISS ) < < 16 ) ) ) {
if ( runtime_ll_cache_stats [ ctx ] [ cpu ] . n ! = 0 )
print_ll_cache_misses ( cpu , evsel , avg , out ) ;
else
print_metric ( ctxp , NULL , NULL , " of all LL-cache hits " , 0 ) ;
} else if ( perf_evsel__match ( evsel , HARDWARE , HW_CACHE_MISSES ) ) {
2015-06-03 17:25:59 +03:00
total = avg_stats ( & runtime_cacherefs_stats [ ctx ] [ cpu ] ) ;
if ( total )
ratio = avg * 100 / total ;
2016-01-30 20:06:49 +03:00
if ( runtime_cacherefs_stats [ ctx ] [ cpu ] . n ! = 0 )
print_metric ( ctxp , NULL , " %8.3f %% " ,
" of all cache refs " , ratio ) ;
else
print_metric ( ctxp , NULL , NULL , " of all cache refs " , 0 ) ;
2015-06-03 17:25:59 +03:00
} else if ( perf_evsel__match ( evsel , HARDWARE , HW_STALLED_CYCLES_FRONTEND ) ) {
2016-01-30 20:06:49 +03:00
print_stalled_cycles_frontend ( cpu , evsel , avg , out ) ;
2015-06-03 17:25:59 +03:00
} else if ( perf_evsel__match ( evsel , HARDWARE , HW_STALLED_CYCLES_BACKEND ) ) {
2016-01-30 20:06:49 +03:00
print_stalled_cycles_backend ( cpu , evsel , avg , out ) ;
2015-06-03 17:25:59 +03:00
} else if ( perf_evsel__match ( evsel , HARDWARE , HW_CPU_CYCLES ) ) {
total = avg_stats ( & runtime_nsecs_stats [ cpu ] ) ;
if ( total ) {
ratio = avg / total ;
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , " %8.3f " , " GHz " , ratio ) ;
2015-06-03 17:25:59 +03:00
} else {
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , NULL , " Ghz " , 0 ) ;
2015-06-03 17:25:59 +03:00
}
} else if ( perf_stat_evsel__is ( evsel , CYCLES_IN_TX ) ) {
total = avg_stats ( & runtime_cycles_stats [ ctx ] [ cpu ] ) ;
if ( total )
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL ,
" %7.2f%% " , " transactional cycles " ,
100.0 * ( avg / total ) ) ;
else
print_metric ( ctxp , NULL , NULL , " transactional cycles " ,
0 ) ;
2015-06-03 17:25:59 +03:00
} else if ( perf_stat_evsel__is ( evsel , CYCLES_IN_TX_CP ) ) {
total = avg_stats ( & runtime_cycles_stats [ ctx ] [ cpu ] ) ;
total2 = avg_stats ( & runtime_cycles_in_tx_stats [ ctx ] [ cpu ] ) ;
if ( total2 < avg )
total2 = avg ;
if ( total )
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , " %7.2f%% " , " aborted cycles " ,
2015-06-03 17:25:59 +03:00
100.0 * ( ( total2 - avg ) / total ) ) ;
2016-01-30 20:06:49 +03:00
else
print_metric ( ctxp , NULL , NULL , " aborted cycles " , 0 ) ;
} else if ( perf_stat_evsel__is ( evsel , TRANSACTION_START ) ) {
2015-06-03 17:25:59 +03:00
total = avg_stats ( & runtime_cycles_in_tx_stats [ ctx ] [ cpu ] ) ;
2015-07-28 02:24:51 +03:00
if ( avg )
2015-06-03 17:25:59 +03:00
ratio = total / avg ;
2016-01-30 20:06:49 +03:00
if ( runtime_cycles_in_tx_stats [ ctx ] [ cpu ] . n ! = 0 )
print_metric ( ctxp , NULL , " %8.0f " ,
" cycles / transaction " , ratio ) ;
else
print_metric ( ctxp , NULL , NULL , " cycles / transaction " ,
0 ) ;
} else if ( perf_stat_evsel__is ( evsel , ELISION_START ) ) {
2015-06-03 17:25:59 +03:00
total = avg_stats ( & runtime_cycles_in_tx_stats [ ctx ] [ cpu ] ) ;
2015-07-28 02:24:51 +03:00
if ( avg )
2015-06-03 17:25:59 +03:00
ratio = total / avg ;
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , " %8.0f " , " cycles / elision " , ratio ) ;
2015-11-03 04:50:20 +03:00
} else if ( perf_evsel__match ( evsel , SOFTWARE , SW_TASK_CLOCK ) ) {
if ( ( ratio = avg_stats ( & walltime_nsecs_stats ) ) ! = 0 )
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , " %8.3f " , " CPUs utilized " ,
avg / ratio ) ;
2015-11-03 04:50:20 +03:00
else
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , NULL , " CPUs utilized " , 0 ) ;
2015-06-03 17:25:59 +03:00
} else if ( runtime_nsecs_stats [ cpu ] . n ! = 0 ) {
char unit = ' M ' ;
2016-01-30 20:06:49 +03:00
char unit_buf [ 10 ] ;
2015-06-03 17:25:59 +03:00
total = avg_stats ( & runtime_nsecs_stats [ cpu ] ) ;
if ( total )
ratio = 1000.0 * avg / total ;
if ( ratio < 0.001 ) {
ratio * = 1000 ;
unit = ' K ' ;
}
2016-01-30 20:06:49 +03:00
snprintf ( unit_buf , sizeof ( unit_buf ) , " %c/sec " , unit ) ;
print_metric ( ctxp , NULL , " %8.3f " , unit_buf , ratio ) ;
2015-06-03 17:25:59 +03:00
} else {
2016-01-30 20:06:49 +03:00
print_metric ( ctxp , NULL , NULL , NULL , 0 ) ;
2015-06-03 17:25:59 +03:00
}
}