2009-04-20 17:00:56 +04:00
/*
2009-06-03 01:37:05 +04:00
* builtin - top . c
*
* Builtin top command : Display a continuously updated profile of
* any workload , CPU or specific PID .
*
* Copyright ( C ) 2008 , Red Hat Inc , Ingo Molnar < mingo @ redhat . com >
*
* Improvements and fixes by :
*
* Arjan van de Ven < arjan @ linux . intel . com >
* Yanmin Zhang < yanmin . zhang @ intel . com >
* Wu Fengguang < fengguang . wu @ intel . com >
* Mike Galbraith < efault @ gmx . de >
* Paul Mackerras < paulus @ samba . org >
*
* Released under the GPL v2 . ( and only v2 , not any later version )
2009-04-20 17:00:56 +04:00
*/
2009-06-03 01:37:05 +04:00
# include "builtin.h"
2009-04-20 17:00:56 +04:00
2009-05-23 20:28:58 +04:00
# include "perf.h"
2009-06-03 01:37:05 +04:00
2009-05-28 21:55:41 +04:00
# include "util/symbol.h"
2009-06-04 17:19:47 +04:00
# include "util/color.h"
2009-04-27 10:02:14 +04:00
# include "util/util.h"
2009-05-28 21:55:41 +04:00
# include "util/rbtree.h"
2009-05-26 11:17:18 +04:00
# include "util/parse-options.h"
# include "util/parse-events.h"
2009-04-20 17:00:56 +04:00
# include <assert.h>
# include <fcntl.h>
2009-05-26 11:17:18 +04:00
2009-04-20 17:00:56 +04:00
# include <stdio.h>
2009-05-26 11:17:18 +04:00
2009-04-20 17:00:56 +04:00
# include <errno.h>
# include <time.h>
# include <sched.h>
# include <pthread.h>
# include <sys/syscall.h>
# include <sys/ioctl.h>
# include <sys/poll.h>
# include <sys/prctl.h>
# include <sys/wait.h>
# include <sys/uio.h>
# include <sys/mman.h>
# include <linux/unistd.h>
# include <linux/types.h>
2009-06-06 11:58:57 +04:00
static int fd [ MAX_NR_CPUS ] [ MAX_COUNTERS ] ;
2009-04-20 17:00:56 +04:00
2009-06-06 11:58:57 +04:00
static int system_wide = 0 ;
2009-04-20 17:00:56 +04:00
2009-06-06 11:58:57 +04:00
static int default_interval = 100000 ;
2009-04-20 17:00:56 +04:00
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 16:21:42 +04:00
static u64 count_filter = 5 ;
2009-06-04 10:53:05 +04:00
static int print_entries = 15 ;
2009-04-20 17:00:56 +04:00
2009-06-04 10:53:05 +04:00
static int target_pid = - 1 ;
2009-04-20 17:00:56 +04:00
static int profile_cpu = - 1 ;
static int nr_cpus = 0 ;
static unsigned int realtime_prio = 0 ;
static int group = 0 ;
static unsigned int page_size ;
2009-06-05 15:27:02 +04:00
static unsigned int mmap_pages = 16 ;
static int freq = 0 ;
2009-06-07 19:39:02 +04:00
static int verbose = 0 ;
2009-04-20 17:00:56 +04:00
static char * sym_filter ;
static unsigned long filter_start ;
static unsigned long filter_end ;
static int delay_secs = 2 ;
static int zero ;
static int dump_symtab ;
/*
* Symbols
*/
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 16:21:42 +04:00
static u64 min_ip ;
static u64 max_ip = - 1ll ;
2009-04-20 17:00:56 +04:00
struct sym_entry {
2009-05-28 21:55:41 +04:00
struct rb_node rb_node ;
struct list_head node ;
2009-04-20 17:00:56 +04:00
unsigned long count [ MAX_COUNTERS ] ;
2009-05-30 00:03:07 +04:00
unsigned long snap_count ;
double weight ;
2009-04-20 17:00:56 +04:00
int skip ;
} ;
struct sym_entry * sym_filter_entry ;
2009-06-06 11:58:57 +04:00
struct dso * kernel_dso ;
2009-05-28 21:55:41 +04:00
/*
* Symbols will be added here in record_ip and will get out
* after decayed .
*/
static LIST_HEAD ( active_symbols ) ;
2009-05-30 00:03:07 +04:00
static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER ;
2009-04-20 17:00:56 +04:00
/*
* Ordering weight : count - 1 * count - 2 * . . . / count - n
*/
static double sym_weight ( const struct sym_entry * sym )
{
2009-05-30 00:03:07 +04:00
double weight = sym - > snap_count ;
2009-04-20 17:00:56 +04:00
int counter ;
for ( counter = 1 ; counter < nr_counters - 1 ; counter + + )
weight * = sym - > count [ counter ] ;
weight / = ( sym - > count [ counter ] + 1 ) ;
return weight ;
}
2009-06-05 16:29:10 +04:00
static long samples ;
static long userspace_samples ;
2009-04-20 17:00:56 +04:00
static const char CONSOLE_CLEAR [ ] = " [H [2J" ;
2009-05-30 00:03:07 +04:00
static void __list_insert_active_sym ( struct sym_entry * syme )
2009-05-28 21:55:41 +04:00
{
list_add ( & syme - > node , & active_symbols ) ;
}
2009-05-30 00:03:07 +04:00
static void list_remove_active_sym ( struct sym_entry * syme )
{
pthread_mutex_lock ( & active_symbols_lock ) ;
list_del_init ( & syme - > node ) ;
pthread_mutex_unlock ( & active_symbols_lock ) ;
}
2009-05-28 21:55:41 +04:00
static void rb_insert_active_sym ( struct rb_root * tree , struct sym_entry * se )
{
struct rb_node * * p = & tree - > rb_node ;
struct rb_node * parent = NULL ;
struct sym_entry * iter ;
while ( * p ! = NULL ) {
parent = * p ;
iter = rb_entry ( parent , struct sym_entry , rb_node ) ;
2009-05-30 00:03:07 +04:00
if ( se - > weight > iter - > weight )
2009-05-28 21:55:41 +04:00
p = & ( * p ) - > rb_left ;
else
p = & ( * p ) - > rb_right ;
}
rb_link_node ( & se - > rb_node , parent , p ) ;
rb_insert_color ( & se - > rb_node , tree ) ;
}
2009-04-20 17:00:56 +04:00
static void print_sym_table ( void )
{
2009-06-03 23:48:40 +04:00
int printed = 0 , j ;
2009-04-20 17:00:56 +04:00
int counter ;
2009-06-05 16:29:10 +04:00
float samples_per_sec = samples / delay_secs ;
float ksamples_per_sec = ( samples - userspace_samples ) / delay_secs ;
float sum_ksamples = 0.0 ;
2009-05-28 21:55:41 +04:00
struct sym_entry * syme , * n ;
struct rb_root tmp = RB_ROOT ;
struct rb_node * nd ;
2009-04-20 17:00:56 +04:00
2009-06-05 16:29:10 +04:00
samples = userspace_samples = 0 ;
2009-04-20 17:00:56 +04:00
2009-05-28 21:55:41 +04:00
/* Sort the active symbols */
2009-05-30 00:03:07 +04:00
pthread_mutex_lock ( & active_symbols_lock ) ;
syme = list_entry ( active_symbols . next , struct sym_entry , node ) ;
pthread_mutex_unlock ( & active_symbols_lock ) ;
list_for_each_entry_safe_from ( syme , n , & active_symbols , node ) {
syme - > snap_count = syme - > count [ 0 ] ;
if ( syme - > snap_count ! = 0 ) {
syme - > weight = sym_weight ( syme ) ;
2009-05-28 21:55:41 +04:00
rb_insert_active_sym ( & tmp , syme ) ;
2009-06-05 16:29:10 +04:00
sum_ksamples + = syme - > snap_count ;
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 11:57:56 +04:00
for ( j = 0 ; j < nr_counters ; j + + )
2009-05-28 21:55:41 +04:00
syme - > count [ j ] = zero ? 0 : syme - > count [ j ] * 7 / 8 ;
} else
2009-05-30 00:03:07 +04:00
list_remove_active_sym ( syme ) ;
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 11:57:56 +04:00
}
2009-06-04 22:48:04 +04:00
puts ( CONSOLE_CLEAR ) ;
2009-04-20 17:00:56 +04:00
printf (
" ------------------------------------------------------------------------------ \n " ) ;
2009-06-03 21:17:25 +04:00
printf ( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [ " ,
2009-06-05 16:29:10 +04:00
samples_per_sec ,
100.0 - ( 100.0 * ( ( samples_per_sec - ksamples_per_sec ) / samples_per_sec ) ) ) ;
2009-04-20 17:00:56 +04:00
2009-06-05 15:27:02 +04:00
if ( nr_counters = = 1 ) {
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 16:21:42 +04:00
printf ( " %Ld " , ( u64 ) attrs [ 0 ] . sample_period ) ;
2009-06-05 15:27:02 +04:00
if ( freq )
printf ( " Hz " ) ;
else
printf ( " " ) ;
}
2009-04-20 17:00:56 +04:00
for ( counter = 0 ; counter < nr_counters ; counter + + ) {
if ( counter )
printf ( " / " ) ;
printf ( " %s " , event_name ( counter ) ) ;
}
printf ( " ], " ) ;
2009-05-26 11:17:18 +04:00
if ( target_pid ! = - 1 )
printf ( " (target_pid: %d " , target_pid ) ;
2009-04-20 17:00:56 +04:00
else
printf ( " (all " ) ;
if ( profile_cpu ! = - 1 )
printf ( " , cpu: %d) \n " , profile_cpu ) ;
else {
2009-05-26 11:17:18 +04:00
if ( target_pid ! = - 1 )
2009-04-20 17:00:56 +04:00
printf ( " ) \n " ) ;
else
printf ( " , %d CPUs) \n " , nr_cpus ) ;
}
printf ( " ------------------------------------------------------------------------------ \n \n " ) ;
if ( nr_counters = = 1 )
2009-06-05 16:29:10 +04:00
printf ( " samples pcnt " ) ;
2009-04-20 17:00:56 +04:00
else
2009-06-05 16:29:10 +04:00
printf ( " weight samples pcnt " ) ;
2009-04-20 17:00:56 +04:00
printf ( " RIP kernel function \n "
2009-06-05 16:29:10 +04:00
" ______ _______ _____ ________________ _______________ \n \n "
2009-04-20 17:00:56 +04:00
) ;
2009-05-28 21:55:41 +04:00
for ( nd = rb_first ( & tmp ) ; nd ; nd = rb_next ( nd ) ) {
struct sym_entry * syme = rb_entry ( nd , struct sym_entry , rb_node ) ;
struct symbol * sym = ( struct symbol * ) ( syme + 1 ) ;
2009-06-04 17:19:47 +04:00
char * color = PERF_COLOR_NORMAL ;
double pcnt ;
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 11:57:56 +04:00
2009-06-04 10:53:05 +04:00
if ( + + printed > print_entries | | syme - > snap_count < count_filter )
2009-05-30 00:03:07 +04:00
continue ;
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 11:57:56 +04:00
2009-06-05 16:29:10 +04:00
pcnt = 100.0 - ( 100.0 * ( ( sum_ksamples - syme - > snap_count ) /
sum_ksamples ) ) ;
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 11:57:56 +04:00
2009-06-04 17:19:47 +04:00
/*
2009-06-09 01:15:28 +04:00
* We color high - overhead entries in red , mid - overhead
* entries in green - and keep the low overhead places
* normal :
2009-06-04 17:19:47 +04:00
*/
2009-06-09 01:15:28 +04:00
if ( pcnt > = 5.0 ) {
2009-06-04 17:19:47 +04:00
color = PERF_COLOR_RED ;
2009-06-09 01:15:28 +04:00
} else {
if ( pcnt > = 0.5 )
color = PERF_COLOR_GREEN ;
}
2009-06-04 17:19:47 +04:00
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 11:57:56 +04:00
if ( nr_counters = = 1 )
2009-06-05 16:29:10 +04:00
printf ( " %20.2f - " , syme - > weight ) ;
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 11:57:56 +04:00
else
2009-06-05 16:29:10 +04:00
printf ( " %9.1f %10ld - " , syme - > weight , syme - > snap_count ) ;
2009-06-04 17:19:47 +04:00
color_fprintf ( stdout , color , " %4.1f%% " , pcnt ) ;
printf ( " - %016llx : %s \n " , sym - > start , sym - > name ) ;
2009-04-20 17:00:56 +04:00
}
}
static void * display_thread ( void * arg )
{
2009-06-04 22:48:04 +04:00
struct pollfd stdin_poll = { . fd = 0 , . events = POLLIN } ;
int delay_msecs = delay_secs * 1000 ;
2009-06-03 21:17:25 +04:00
printf ( " PerfTop refresh period: %d seconds \n " , delay_secs ) ;
2009-04-20 17:00:56 +04:00
2009-06-04 22:48:04 +04:00
do {
2009-04-20 17:00:56 +04:00
print_sym_table ( ) ;
2009-06-04 22:48:04 +04:00
} while ( ! poll ( & stdin_poll , 1 , delay_msecs ) = = 1 ) ;
printf ( " key pressed - exiting. \n " ) ;
exit ( 0 ) ;
2009-04-20 17:00:56 +04:00
return NULL ;
}
2009-05-28 21:55:41 +04:00
static int symbol_filter ( struct dso * self , struct symbol * sym )
2009-04-20 17:00:56 +04:00
{
2009-05-28 21:55:41 +04:00
static int filter_match ;
struct sym_entry * syme ;
const char * name = sym - > name ;
if ( ! strcmp ( name , " _text " ) | |
! strcmp ( name , " _etext " ) | |
! strcmp ( name , " _sinittext " ) | |
! strncmp ( " init_module " , name , 11 ) | |
! strncmp ( " cleanup_module " , name , 14 ) | |
strstr ( name , " _text_start " ) | |
strstr ( name , " _text_end " ) )
2009-04-20 17:00:56 +04:00
return 1 ;
2009-05-28 21:55:41 +04:00
syme = dso__sym_priv ( self , sym ) ;
2009-06-05 16:29:10 +04:00
/* Tag samples to be skipped. */
2009-05-28 21:55:41 +04:00
if ( ! strcmp ( " default_idle " , name ) | |
! strcmp ( " cpu_idle " , name ) | |
! strcmp ( " enter_idle " , name ) | |
! strcmp ( " exit_idle " , name ) | |
! strcmp ( " mwait_idle " , name ) )
syme - > skip = 1 ;
2009-04-20 17:00:56 +04:00
if ( filter_match = = 1 ) {
2009-05-28 21:55:41 +04:00
filter_end = sym - > start ;
2009-04-20 17:00:56 +04:00
filter_match = - 1 ;
if ( filter_end - filter_start > 10000 ) {
2009-05-28 21:55:41 +04:00
fprintf ( stderr ,
" hm, too large filter symbol <%s> - skipping. \n " ,
2009-04-20 17:00:56 +04:00
sym_filter ) ;
2009-05-28 21:55:41 +04:00
fprintf ( stderr , " symbol filter start: %016lx \n " ,
filter_start ) ;
fprintf ( stderr , " end: %016lx \n " ,
filter_end ) ;
2009-04-20 17:00:56 +04:00
filter_end = filter_start = 0 ;
sym_filter = NULL ;
sleep ( 1 ) ;
}
}
2009-05-28 21:55:41 +04:00
if ( filter_match = = 0 & & sym_filter & & ! strcmp ( name , sym_filter ) ) {
2009-04-20 17:00:56 +04:00
filter_match = 1 ;
2009-05-28 21:55:41 +04:00
filter_start = sym - > start ;
2009-04-20 17:00:56 +04:00
}
2009-05-28 21:55:41 +04:00
2009-04-20 17:00:56 +04:00
return 0 ;
}
2009-05-28 21:55:41 +04:00
static int parse_symbols ( void )
2009-04-20 17:00:56 +04:00
{
2009-05-28 21:55:41 +04:00
struct rb_node * node ;
struct symbol * sym ;
2009-04-20 17:00:56 +04:00
2009-05-28 21:55:41 +04:00
kernel_dso = dso__new ( " [kernel] " , sizeof ( struct sym_entry ) ) ;
if ( kernel_dso = = NULL )
return - 1 ;
2009-04-20 17:00:56 +04:00
2009-06-04 16:13:04 +04:00
if ( dso__load_kernel ( kernel_dso , NULL , symbol_filter , 1 ) ! = 0 )
2009-05-28 21:55:41 +04:00
goto out_delete_dso ;
2009-04-20 17:00:56 +04:00
2009-05-28 21:55:41 +04:00
node = rb_first ( & kernel_dso - > syms ) ;
sym = rb_entry ( node , struct symbol , rb_node ) ;
min_ip = sym - > start ;
2009-04-20 17:00:56 +04:00
2009-05-28 21:55:41 +04:00
node = rb_last ( & kernel_dso - > syms ) ;
sym = rb_entry ( node , struct symbol , rb_node ) ;
2009-05-29 10:23:16 +04:00
max_ip = sym - > end ;
2009-04-20 17:00:56 +04:00
2009-05-28 21:55:41 +04:00
if ( dump_symtab )
2009-05-29 08:46:46 +04:00
dso__fprintf ( kernel_dso , stderr ) ;
2009-04-20 17:00:56 +04:00
2009-05-28 21:55:41 +04:00
return 0 ;
2009-04-20 17:00:56 +04:00
2009-05-28 21:55:41 +04:00
out_delete_dso :
dso__delete ( kernel_dso ) ;
kernel_dso = NULL ;
return - 1 ;
2009-04-20 17:00:56 +04:00
}
# define TRACE_COUNT 3
/*
* Binary search in the histogram table and record the hit :
*/
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 16:21:42 +04:00
static void record_ip ( u64 ip , int counter )
2009-04-20 17:00:56 +04:00
{
2009-05-28 21:55:41 +04:00
struct symbol * sym = dso__find_symbol ( kernel_dso , ip ) ;
2009-04-20 17:00:56 +04:00
2009-05-28 21:55:41 +04:00
if ( sym ! = NULL ) {
struct sym_entry * syme = dso__sym_priv ( kernel_dso , sym ) ;
2009-04-20 17:00:56 +04:00
2009-05-28 21:55:41 +04:00
if ( ! syme - > skip ) {
syme - > count [ counter ] + + ;
2009-05-30 00:03:07 +04:00
pthread_mutex_lock ( & active_symbols_lock ) ;
2009-05-28 21:55:41 +04:00
if ( list_empty ( & syme - > node ) | | ! syme - > node . next )
2009-05-30 00:03:07 +04:00
__list_insert_active_sym ( syme ) ;
pthread_mutex_unlock ( & active_symbols_lock ) ;
2009-05-28 21:55:41 +04:00
return ;
2009-04-20 17:00:56 +04:00
}
}
2009-06-05 16:29:10 +04:00
samples - - ;
2009-04-20 17:00:56 +04:00
}
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 16:21:42 +04:00
static void process_event ( u64 ip , int counter )
2009-04-20 17:00:56 +04:00
{
2009-06-05 16:29:10 +04:00
samples + + ;
2009-04-20 17:00:56 +04:00
if ( ip < min_ip | | ip > max_ip ) {
2009-06-05 16:29:10 +04:00
userspace_samples + + ;
2009-04-20 17:00:56 +04:00
return ;
}
record_ip ( ip , counter ) ;
}
struct mmap_data {
2009-06-06 11:58:57 +04:00
int counter ;
void * base ;
unsigned int mask ;
unsigned int prev ;
2009-04-20 17:00:56 +04:00
} ;
static unsigned int mmap_read_head ( struct mmap_data * md )
{
struct perf_counter_mmap_page * pc = md - > base ;
int head ;
head = pc - > data_head ;
rmb ( ) ;
return head ;
}
struct timeval last_read , this_read ;
2009-06-07 01:10:43 +04:00
static void mmap_read_counter ( struct mmap_data * md )
2009-04-20 17:00:56 +04:00
{
unsigned int head = mmap_read_head ( md ) ;
unsigned int old = md - > prev ;
unsigned char * data = md - > base + page_size ;
int diff ;
gettimeofday ( & this_read , NULL ) ;
/*
* If we ' re further behind than half the buffer , there ' s a chance
2009-06-05 16:29:10 +04:00
* the writer will bite our tail and mess up the samples under us .
2009-04-20 17:00:56 +04:00
*
* If we somehow ended up ahead of the head , we got messed up .
*
* In either case , truncate and restart at head .
*/
diff = head - old ;
if ( diff > md - > mask / 2 | | diff < 0 ) {
struct timeval iv ;
unsigned long msecs ;
timersub ( & this_read , & last_read , & iv ) ;
msecs = iv . tv_sec * 1000 + iv . tv_usec / 1000 ;
fprintf ( stderr , " WARNING: failed to keep up with mmap data. "
" Last read %lu msecs ago. \n " , msecs ) ;
/*
* head points to a known good entry , start there .
*/
old = head ;
}
last_read = this_read ;
for ( ; old ! = head ; ) {
struct ip_event {
struct perf_event_header header ;
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 16:21:42 +04:00
u64 ip ;
u32 pid , target_pid ;
2009-04-20 17:00:56 +04:00
} ;
struct mmap_event {
struct perf_event_header header ;
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 16:21:42 +04:00
u32 pid , target_pid ;
u64 start ;
u64 len ;
u64 pgoff ;
2009-04-20 17:00:56 +04:00
char filename [ PATH_MAX ] ;
} ;
typedef union event_union {
struct perf_event_header header ;
struct ip_event ip ;
struct mmap_event mmap ;
} event_t ;
event_t * event = ( event_t * ) & data [ old & md - > mask ] ;
event_t event_copy ;
2009-04-20 17:22:22 +04:00
size_t size = event - > header . size ;
2009-04-20 17:00:56 +04:00
/*
* Event straddles the mmap boundary - - header should always
* be inside due to u64 alignment of output .
*/
if ( ( old & md - > mask ) + size ! = ( ( old + size ) & md - > mask ) ) {
unsigned int offset = old ;
unsigned int len = min ( sizeof ( * event ) , size ) , cpy ;
void * dst = & event_copy ;
do {
cpy = min ( md - > mask + 1 - ( offset & md - > mask ) , len ) ;
memcpy ( dst , & data [ offset & md - > mask ] , cpy ) ;
offset + = cpy ;
dst + = cpy ;
len - = cpy ;
} while ( len ) ;
event = & event_copy ;
}
old + = size ;
if ( event - > header . misc & PERF_EVENT_MISC_OVERFLOW ) {
2009-06-02 19:38:21 +04:00
if ( event - > header . type & PERF_SAMPLE_IP )
2009-04-20 17:00:56 +04:00
process_event ( event - > ip . ip , md - > counter ) ;
}
}
md - > prev = old ;
}
2009-05-24 10:35:49 +04:00
static struct pollfd event_array [ MAX_NR_CPUS * MAX_COUNTERS ] ;
static struct mmap_data mmap_array [ MAX_NR_CPUS ] [ MAX_COUNTERS ] ;
2009-06-07 01:10:43 +04:00
static void mmap_read ( void )
{
int i , counter ;
for ( i = 0 ; i < nr_cpus ; i + + ) {
for ( counter = 0 ; counter < nr_counters ; counter + + )
mmap_read_counter ( & mmap_array [ i ] [ counter ] ) ;
}
}
2009-06-07 19:31:52 +04:00
int nr_poll ;
int group_fd ;
static void start_counter ( int i , int counter )
2009-04-20 17:00:56 +04:00
{
2009-06-06 11:58:57 +04:00
struct perf_counter_attr * attr ;
2009-04-20 17:00:56 +04:00
unsigned int cpu ;
2009-06-07 19:31:52 +04:00
cpu = profile_cpu ;
if ( target_pid = = - 1 & & profile_cpu = = - 1 )
cpu = i ;
attr = attrs + counter ;
attr - > sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID ;
attr - > freq = freq ;
try_again :
fd [ i ] [ counter ] = sys_perf_counter_open ( attr , target_pid , cpu , group_fd , 0 ) ;
if ( fd [ i ] [ counter ] < 0 ) {
int err = errno ;
if ( err = = EPERM )
2009-06-07 19:39:02 +04:00
die ( " No permission - are you root? \n " ) ;
2009-06-07 19:31:52 +04:00
/*
* If it ' s cycles then fall back to hrtimer
* based cpu - clock - tick sw counter , which
* is always available even if no PMU support :
*/
if ( attr - > type = = PERF_TYPE_HARDWARE
2009-06-11 16:06:28 +04:00
& & attr - > config = = PERF_COUNT_HW_CPU_CYCLES ) {
2009-06-07 19:31:52 +04:00
2009-06-07 19:39:02 +04:00
if ( verbose )
warning ( " ... trying to fall back to cpu-clock-ticks \n " ) ;
2009-06-07 19:31:52 +04:00
attr - > type = PERF_TYPE_SOFTWARE ;
2009-06-11 16:06:28 +04:00
attr - > config = PERF_COUNT_SW_CPU_CLOCK ;
2009-06-07 19:31:52 +04:00
goto try_again ;
}
2009-06-07 19:46:24 +04:00
printf ( " \n " ) ;
error ( " perfcounter syscall returned with %d (%s) \n " ,
fd [ i ] [ counter ] , strerror ( err ) ) ;
die ( " No CONFIG_PERF_COUNTERS=y kernel support configured? \n " ) ;
2009-06-07 19:31:52 +04:00
exit ( - 1 ) ;
}
assert ( fd [ i ] [ counter ] > = 0 ) ;
fcntl ( fd [ i ] [ counter ] , F_SETFL , O_NONBLOCK ) ;
/*
* First counter acts as the group leader :
*/
if ( group & & group_fd = = - 1 )
group_fd = fd [ i ] [ counter ] ;
event_array [ nr_poll ] . fd = fd [ i ] [ counter ] ;
event_array [ nr_poll ] . events = POLLIN ;
nr_poll + + ;
mmap_array [ i ] [ counter ] . counter = counter ;
mmap_array [ i ] [ counter ] . prev = 0 ;
mmap_array [ i ] [ counter ] . mask = mmap_pages * page_size - 1 ;
mmap_array [ i ] [ counter ] . base = mmap ( NULL , ( mmap_pages + 1 ) * page_size ,
PROT_READ , MAP_SHARED , fd [ i ] [ counter ] , 0 ) ;
if ( mmap_array [ i ] [ counter ] . base = = MAP_FAILED )
die ( " failed to mmap with %d (%s) \n " , errno , strerror ( errno ) ) ;
}
static int __cmd_top ( void )
{
pthread_t thread ;
int i , counter ;
2009-04-20 17:00:56 +04:00
int ret ;
for ( i = 0 ; i < nr_cpus ; i + + ) {
group_fd = - 1 ;
2009-06-07 19:31:52 +04:00
for ( counter = 0 ; counter < nr_counters ; counter + + )
start_counter ( i , counter ) ;
2009-04-20 17:00:56 +04:00
}
2009-06-07 01:10:43 +04:00
/* Wait for a minimal set of events before starting the snapshot */
poll ( event_array , nr_poll , 100 ) ;
mmap_read ( ) ;
2009-04-20 17:00:56 +04:00
if ( pthread_create ( & thread , NULL , display_thread , NULL ) ) {
printf ( " Could not create display thread. \n " ) ;
exit ( - 1 ) ;
}
if ( realtime_prio ) {
struct sched_param param ;
param . sched_priority = realtime_prio ;
if ( sched_setscheduler ( 0 , SCHED_FIFO , & param ) ) {
printf ( " Could not set realtime priority. \n " ) ;
exit ( - 1 ) ;
}
}
while ( 1 ) {
2009-06-05 16:29:10 +04:00
int hits = samples ;
2009-04-20 17:00:56 +04:00
2009-06-07 01:10:43 +04:00
mmap_read ( ) ;
2009-04-20 17:00:56 +04:00
2009-06-05 16:29:10 +04:00
if ( hits = = samples )
2009-04-20 17:00:56 +04:00
ret = poll ( event_array , nr_poll , 100 ) ;
}
return 0 ;
}
2009-05-26 11:17:18 +04:00
static const char * const top_usage [ ] = {
" perf top [<options>] " ,
NULL
} ;
static const struct option options [ ] = {
OPT_CALLBACK ( ' e ' , " event " , NULL , " event " ,
2009-06-06 14:24:17 +04:00
" event selector. use 'perf list' to list available events " ,
parse_events ) ,
2009-05-26 11:17:18 +04:00
OPT_INTEGER ( ' c ' , " count " , & default_interval ,
" event period to sample " ) ,
OPT_INTEGER ( ' p ' , " pid " , & target_pid ,
" profile events on existing pid " ) ,
OPT_BOOLEAN ( ' a ' , " all-cpus " , & system_wide ,
" system-wide collection from all CPUs " ) ,
OPT_INTEGER ( ' C ' , " CPU " , & profile_cpu ,
" CPU to profile on " ) ,
OPT_INTEGER ( ' m ' , " mmap-pages " , & mmap_pages ,
" number of mmap data pages " ) ,
OPT_INTEGER ( ' r ' , " realtime " , & realtime_prio ,
" collect data with this RT SCHED_FIFO priority " ) ,
2009-05-26 17:25:34 +04:00
OPT_INTEGER ( ' d ' , " delay " , & delay_secs ,
2009-05-26 11:17:18 +04:00
" number of seconds to delay between refreshes " ) ,
OPT_BOOLEAN ( ' D ' , " dump-symtab " , & dump_symtab ,
" dump the symbol table used for profiling " ) ,
2009-06-04 10:53:05 +04:00
OPT_INTEGER ( ' f ' , " count-filter " , & count_filter ,
2009-05-26 11:17:18 +04:00
" only display functions with more events than this " ) ,
OPT_BOOLEAN ( ' g ' , " group " , & group ,
" put the counters into a counter group " ) ,
OPT_STRING ( ' s ' , " sym-filter " , & sym_filter , " pattern " ,
" only display symbols matchig this pattern " ) ,
OPT_BOOLEAN ( ' z ' , " zero " , & group ,
" zero history across updates " ) ,
2009-06-04 10:53:05 +04:00
OPT_INTEGER ( ' F ' , " freq " , & freq ,
2009-05-26 11:17:18 +04:00
" profile at this frequency " ) ,
2009-06-04 10:53:05 +04:00
OPT_INTEGER ( ' E ' , " entries " , & print_entries ,
" display this many functions " ) ,
2009-06-07 19:39:02 +04:00
OPT_BOOLEAN ( ' v ' , " verbose " , & verbose ,
" be more verbose (show counter open errors, etc) " ) ,
2009-05-26 11:17:18 +04:00
OPT_END ( )
} ;
int cmd_top ( int argc , const char * * argv , const char * prefix )
{
int counter ;
page_size = sysconf ( _SC_PAGE_SIZE ) ;
argc = parse_options ( argc , argv , options , top_usage , 0 ) ;
if ( argc )
usage_with_options ( top_usage , options ) ;
if ( freq ) {
default_interval = freq ;
freq = 1 ;
}
/* CPU and PID are mutually exclusive */
if ( target_pid ! = - 1 & & profile_cpu ! = - 1 ) {
printf ( " WARNING: PID switch overriding CPU \n " ) ;
sleep ( 1 ) ;
profile_cpu = - 1 ;
}
2009-06-06 11:58:57 +04:00
if ( ! nr_counters )
2009-05-26 11:17:18 +04:00
nr_counters = 1 ;
2009-06-05 21:31:01 +04:00
if ( delay_secs < 1 )
delay_secs = 1 ;
2009-06-06 11:58:57 +04:00
parse_symbols ( ) ;
/*
* Fill in the ones not specifically initialized via - c :
*/
2009-05-26 11:17:18 +04:00
for ( counter = 0 ; counter < nr_counters ; counter + + ) {
2009-06-06 11:58:57 +04:00
if ( attrs [ counter ] . sample_period )
2009-05-26 11:17:18 +04:00
continue ;
2009-06-06 11:58:57 +04:00
attrs [ counter ] . sample_period = default_interval ;
2009-05-26 11:17:18 +04:00
}
nr_cpus = sysconf ( _SC_NPROCESSORS_ONLN ) ;
assert ( nr_cpus < = MAX_NR_CPUS ) ;
assert ( nr_cpus > = 0 ) ;
if ( target_pid ! = - 1 | | profile_cpu ! = - 1 )
nr_cpus = 1 ;
return __cmd_top ( ) ;
}