2019-06-01 11:08:47 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-03-07 16:45:20 +04:00
/*
* builtin - ftrace . c
*
* Copyright ( c ) 2013 LG Electronics , Namhyung Kim < namhyung @ kernel . org >
2020-08-08 05:31:41 +03:00
* Copyright ( c ) 2020 Changbin Du < changbin . du @ gmail . com > , significant enhancement .
2013-03-07 16:45:20 +04:00
*/
# include "builtin.h"
2017-04-18 16:46:11 +03:00
# include <errno.h>
2013-03-07 16:45:20 +04:00
# include <unistd.h>
# include <signal.h>
2019-08-30 20:45:20 +03:00
# include <stdlib.h>
2017-02-24 04:12:48 +03:00
# include <fcntl.h>
2017-04-20 01:06:30 +03:00
# include <poll.h>
2019-08-07 17:44:17 +03:00
# include <linux/capability.h>
2019-08-29 22:18:59 +03:00
# include <linux/string.h>
2013-03-07 16:45:20 +04:00
# include "debug.h"
2019-08-29 22:18:59 +03:00
# include <subcmd/pager.h>
2013-03-07 16:45:20 +04:00
# include <subcmd/parse-options.h>
2017-04-18 17:44:58 +03:00
# include <api/fs/tracing_path.h>
2013-03-07 16:45:20 +04:00
# include "evlist.h"
# include "target.h"
2017-02-24 04:12:50 +03:00
# include "cpumap.h"
2013-03-07 16:45:20 +04:00
# include "thread_map.h"
2020-09-04 18:23:57 +03:00
# include "strfilter.h"
2019-08-07 17:44:17 +03:00
# include "util/cap.h"
2017-01-31 14:38:29 +03:00
# include "util/config.h"
2020-08-08 05:31:27 +03:00
# include "util/units.h"
2020-08-08 05:31:31 +03:00
# include "util/parse-sublevel-options.h"
2013-03-07 16:45:20 +04:00
# define DEFAULT_TRACER "function_graph"
struct perf_ftrace {
2019-07-21 14:23:52 +03:00
struct evlist * evlist ;
2017-06-18 17:23:01 +03:00
struct target target ;
const char * tracer ;
struct list_head filters ;
struct list_head notrace ;
struct list_head graph_funcs ;
struct list_head nograph_funcs ;
2017-06-18 17:23:02 +03:00
int graph_depth ;
2020-08-08 05:31:27 +03:00
unsigned long percpu_buffer_size ;
2020-08-08 05:31:29 +03:00
bool inherit ;
2020-08-08 05:31:31 +03:00
int func_stack_trace ;
2020-08-08 05:31:34 +03:00
int func_irq_info ;
2020-08-08 05:31:32 +03:00
int graph_nosleep_time ;
2020-08-08 05:31:33 +03:00
int graph_noirqs ;
2020-08-08 05:31:35 +03:00
int graph_verbose ;
2020-08-08 05:31:36 +03:00
int graph_thresh ;
2020-08-08 05:31:38 +03:00
unsigned int initial_delay ;
2017-06-18 17:23:01 +03:00
} ;
struct filter_entry {
struct list_head list ;
char name [ ] ;
2013-03-07 16:45:20 +04:00
} ;
2020-05-10 18:06:11 +03:00
static volatile int workload_exec_errno ;
2013-03-07 16:45:20 +04:00
static bool done ;
static void sig_handler ( int sig __maybe_unused )
{
done = true ;
}
/*
2020-11-30 15:26:54 +03:00
* evlist__prepare_workload will send a SIGUSR1 if the fork fails , since
2013-03-07 16:45:20 +04:00
* we asked by setting its exec_error to the function below ,
* ftrace__workload_exec_failed_signal .
*
* XXX We need to handle this more appropriately , emitting an error , etc .
*/
static void ftrace__workload_exec_failed_signal ( int signo __maybe_unused ,
siginfo_t * info __maybe_unused ,
void * ucontext __maybe_unused )
{
2020-05-10 18:06:11 +03:00
workload_exec_errno = info - > si_value . sival_int ;
2013-03-07 16:45:20 +04:00
done = true ;
}
2017-02-24 04:12:48 +03:00
static int __write_tracing_file ( const char * name , const char * val , bool append )
2013-03-07 16:45:20 +04:00
{
char * file ;
int fd , ret = - 1 ;
ssize_t size = strlen ( val ) ;
2017-02-24 04:12:48 +03:00
int flags = O_WRONLY ;
2017-06-18 17:22:59 +03:00
char errbuf [ 512 ] ;
2018-02-19 05:33:29 +03:00
char * val_copy ;
2013-03-07 16:45:20 +04:00
file = get_tracing_file ( name ) ;
if ( ! file ) {
pr_debug ( " cannot get tracing file: %s \n " , name ) ;
return - 1 ;
}
2017-02-24 04:12:48 +03:00
if ( append )
flags | = O_APPEND ;
else
flags | = O_TRUNC ;
fd = open ( file , flags ) ;
2013-03-07 16:45:20 +04:00
if ( fd < 0 ) {
2017-06-18 17:22:59 +03:00
pr_debug ( " cannot open tracing file: %s: %s \n " ,
name , str_error_r ( errno , errbuf , sizeof ( errbuf ) ) ) ;
2013-03-07 16:45:20 +04:00
goto out ;
}
2018-02-19 05:33:29 +03:00
/*
* Copy the original value and append a ' \n ' . Without this ,
* the kernel can hide possible errors .
*/
val_copy = strdup ( val ) ;
if ( ! val_copy )
goto out_close ;
val_copy [ size ] = ' \n ' ;
if ( write ( fd , val_copy , size + 1 ) = = size + 1 )
2013-03-07 16:45:20 +04:00
ret = 0 ;
else
2017-06-18 17:22:59 +03:00
pr_debug ( " write '%s' to tracing/%s failed: %s \n " ,
val , name , str_error_r ( errno , errbuf , sizeof ( errbuf ) ) ) ;
2013-03-07 16:45:20 +04:00
2018-02-19 05:33:29 +03:00
free ( val_copy ) ;
out_close :
2013-03-07 16:45:20 +04:00
close ( fd ) ;
out :
put_tracing_file ( file ) ;
return ret ;
}
2017-02-24 04:12:48 +03:00
static int write_tracing_file ( const char * name , const char * val )
{
return __write_tracing_file ( name , val , false ) ;
}
static int append_tracing_file ( const char * name , const char * val )
{
return __write_tracing_file ( name , val , true ) ;
}
2020-08-08 05:31:25 +03:00
static int read_tracing_file_to_stdout ( const char * name )
{
char buf [ 4096 ] ;
char * file ;
int fd ;
int ret = - 1 ;
file = get_tracing_file ( name ) ;
if ( ! file ) {
pr_debug ( " cannot get tracing file: %s \n " , name ) ;
return - 1 ;
}
fd = open ( file , O_RDONLY ) ;
if ( fd < 0 ) {
pr_debug ( " cannot open tracing file: %s: %s \n " ,
name , str_error_r ( errno , buf , sizeof ( buf ) ) ) ;
goto out ;
}
/* read contents to stdout */
while ( true ) {
int n = read ( fd , buf , sizeof ( buf ) ) ;
if ( n = = 0 )
break ;
else if ( n < 0 )
goto out_close ;
if ( fwrite ( buf , n , 1 , stdout ) ! = 1 )
goto out_close ;
}
ret = 0 ;
out_close :
close ( fd ) ;
out :
put_tracing_file ( file ) ;
return ret ;
}
2020-09-04 18:23:57 +03:00
static int read_tracing_file_by_line ( const char * name ,
void ( * cb ) ( char * str , void * arg ) ,
void * cb_arg )
{
char * line = NULL ;
size_t len = 0 ;
char * file ;
FILE * fp ;
file = get_tracing_file ( name ) ;
if ( ! file ) {
pr_debug ( " cannot get tracing file: %s \n " , name ) ;
return - 1 ;
}
fp = fopen ( file , " r " ) ;
if ( fp = = NULL ) {
pr_debug ( " cannot open tracing file: %s \n " , name ) ;
put_tracing_file ( file ) ;
return - 1 ;
}
while ( getline ( & line , & len , fp ) ! = - 1 ) {
cb ( line , cb_arg ) ;
}
if ( line )
free ( line ) ;
fclose ( fp ) ;
put_tracing_file ( file ) ;
return 0 ;
}
2020-08-08 05:31:26 +03:00
static int write_tracing_file_int ( const char * name , int value )
{
char buf [ 16 ] ;
snprintf ( buf , sizeof ( buf ) , " %d " , value ) ;
if ( write_tracing_file ( name , buf ) < 0 )
return - 1 ;
return 0 ;
}
2020-08-08 05:31:29 +03:00
static int write_tracing_option_file ( const char * name , const char * val )
{
char * file ;
int ret ;
if ( asprintf ( & file , " options/%s " , name ) < 0 )
return - 1 ;
ret = __write_tracing_file ( file , val , false ) ;
free ( file ) ;
return ret ;
}
2017-02-24 04:12:50 +03:00
static int reset_tracing_cpu ( void ) ;
2017-06-18 17:23:01 +03:00
static void reset_tracing_filters ( void ) ;
2017-02-24 04:12:50 +03:00
2020-08-08 05:31:29 +03:00
static void reset_tracing_options ( struct perf_ftrace * ftrace __maybe_unused )
{
write_tracing_option_file ( " function-fork " , " 0 " ) ;
2020-08-08 05:31:31 +03:00
write_tracing_option_file ( " func_stack_trace " , " 0 " ) ;
2020-08-08 05:31:32 +03:00
write_tracing_option_file ( " sleep-time " , " 1 " ) ;
2020-08-08 05:31:33 +03:00
write_tracing_option_file ( " funcgraph-irqs " , " 1 " ) ;
2020-08-08 05:31:35 +03:00
write_tracing_option_file ( " funcgraph-proc " , " 0 " ) ;
write_tracing_option_file ( " funcgraph-abstime " , " 0 " ) ;
write_tracing_option_file ( " latency-format " , " 0 " ) ;
2020-08-08 05:31:34 +03:00
write_tracing_option_file ( " irq-info " , " 0 " ) ;
2020-08-08 05:31:29 +03:00
}
2013-03-07 16:45:20 +04:00
static int reset_tracing_files ( struct perf_ftrace * ftrace __maybe_unused )
{
if ( write_tracing_file ( " tracing_on " , " 0 " ) < 0 )
return - 1 ;
if ( write_tracing_file ( " current_tracer " , " nop " ) < 0 )
return - 1 ;
if ( write_tracing_file ( " set_ftrace_pid " , " " ) < 0 )
return - 1 ;
2017-02-24 04:12:50 +03:00
if ( reset_tracing_cpu ( ) < 0 )
return - 1 ;
2017-06-18 17:23:02 +03:00
if ( write_tracing_file ( " max_graph_depth " , " 0 " ) < 0 )
return - 1 ;
2020-08-08 05:31:36 +03:00
if ( write_tracing_file ( " tracing_thresh " , " 0 " ) < 0 )
return - 1 ;
2017-06-18 17:23:01 +03:00
reset_tracing_filters ( ) ;
2020-08-08 05:31:29 +03:00
reset_tracing_options ( ftrace ) ;
2013-03-07 16:45:20 +04:00
return 0 ;
}
2017-02-24 04:12:48 +03:00
static int set_tracing_pid ( struct perf_ftrace * ftrace )
{
int i ;
char buf [ 16 ] ;
if ( target__has_cpu ( & ftrace - > target ) )
return 0 ;
2019-08-22 14:11:41 +03:00
for ( i = 0 ; i < perf_thread_map__nr ( ftrace - > evlist - > core . threads ) ; i + + ) {
2017-02-24 04:12:48 +03:00
scnprintf ( buf , sizeof ( buf ) , " %d " ,
2021-04-21 15:04:00 +03:00
perf_thread_map__pid ( ftrace - > evlist - > core . threads , i ) ) ;
2017-02-24 04:12:48 +03:00
if ( append_tracing_file ( " set_ftrace_pid " , buf ) < 0 )
return - 1 ;
}
return 0 ;
}
2019-07-21 14:23:49 +03:00
static int set_tracing_cpumask ( struct perf_cpu_map * cpumap )
2017-02-24 04:12:50 +03:00
{
char * cpumask ;
size_t mask_size ;
int ret ;
int last_cpu ;
last_cpu = cpu_map__cpu ( cpumap , cpumap - > nr - 1 ) ;
2019-08-02 11:29:51 +03:00
mask_size = last_cpu / 4 + 2 ; /* one more byte for EOS */
2017-02-24 04:12:50 +03:00
mask_size + = last_cpu / 32 ; /* ',' is needed for every 32th cpus */
cpumask = malloc ( mask_size ) ;
if ( cpumask = = NULL ) {
pr_debug ( " failed to allocate cpu mask \n " ) ;
return - 1 ;
}
cpu_map__snprint_mask ( cpumap , cpumask , mask_size ) ;
ret = write_tracing_file ( " tracing_cpumask " , cpumask ) ;
free ( cpumask ) ;
return ret ;
}
static int set_tracing_cpu ( struct perf_ftrace * ftrace )
{
2019-07-21 14:24:41 +03:00
struct perf_cpu_map * cpumap = ftrace - > evlist - > core . cpus ;
2017-02-24 04:12:50 +03:00
if ( ! target__has_cpu ( & ftrace - > target ) )
return 0 ;
return set_tracing_cpumask ( cpumap ) ;
}
2020-08-08 05:31:31 +03:00
static int set_tracing_func_stack_trace ( struct perf_ftrace * ftrace )
{
if ( ! ftrace - > func_stack_trace )
return 0 ;
if ( write_tracing_option_file ( " func_stack_trace " , " 1 " ) < 0 )
return - 1 ;
return 0 ;
}
2020-08-08 05:31:34 +03:00
static int set_tracing_func_irqinfo ( struct perf_ftrace * ftrace )
{
if ( ! ftrace - > func_irq_info )
return 0 ;
if ( write_tracing_option_file ( " irq-info " , " 1 " ) < 0 )
return - 1 ;
return 0 ;
}
2017-02-24 04:12:50 +03:00
static int reset_tracing_cpu ( void )
{
2019-07-21 14:24:30 +03:00
struct perf_cpu_map * cpumap = perf_cpu_map__new ( NULL ) ;
2017-02-24 04:12:50 +03:00
int ret ;
ret = set_tracing_cpumask ( cpumap ) ;
2019-07-21 14:24:17 +03:00
perf_cpu_map__put ( cpumap ) ;
2017-02-24 04:12:50 +03:00
return ret ;
}
2017-06-18 17:23:01 +03:00
static int __set_tracing_filter ( const char * filter_file , struct list_head * funcs )
{
struct filter_entry * pos ;
list_for_each_entry ( pos , funcs , list ) {
if ( append_tracing_file ( filter_file , pos - > name ) < 0 )
return - 1 ;
}
return 0 ;
}
static int set_tracing_filters ( struct perf_ftrace * ftrace )
{
int ret ;
ret = __set_tracing_filter ( " set_ftrace_filter " , & ftrace - > filters ) ;
if ( ret < 0 )
return ret ;
ret = __set_tracing_filter ( " set_ftrace_notrace " , & ftrace - > notrace ) ;
if ( ret < 0 )
return ret ;
ret = __set_tracing_filter ( " set_graph_function " , & ftrace - > graph_funcs ) ;
if ( ret < 0 )
return ret ;
/* old kernels do not have this filter */
__set_tracing_filter ( " set_graph_notrace " , & ftrace - > nograph_funcs ) ;
return ret ;
}
static void reset_tracing_filters ( void )
{
write_tracing_file ( " set_ftrace_filter " , " " ) ;
write_tracing_file ( " set_ftrace_notrace " , " " ) ;
write_tracing_file ( " set_graph_function " , " " ) ;
write_tracing_file ( " set_graph_notrace " , " " ) ;
}
2017-06-18 17:23:02 +03:00
static int set_tracing_depth ( struct perf_ftrace * ftrace )
{
if ( ftrace - > graph_depth = = 0 )
return 0 ;
if ( ftrace - > graph_depth < 0 ) {
pr_err ( " invalid graph depth: %d \n " , ftrace - > graph_depth ) ;
return - 1 ;
}
2020-08-08 05:31:26 +03:00
if ( write_tracing_file_int ( " max_graph_depth " , ftrace - > graph_depth ) < 0 )
2017-06-18 17:23:02 +03:00
return - 1 ;
return 0 ;
}
2020-08-08 05:31:27 +03:00
static int set_tracing_percpu_buffer_size ( struct perf_ftrace * ftrace )
{
int ret ;
if ( ftrace - > percpu_buffer_size = = 0 )
return 0 ;
ret = write_tracing_file_int ( " buffer_size_kb " ,
ftrace - > percpu_buffer_size / 1024 ) ;
if ( ret < 0 )
return ret ;
return 0 ;
}
2020-08-08 05:31:29 +03:00
static int set_tracing_trace_inherit ( struct perf_ftrace * ftrace )
{
if ( ! ftrace - > inherit )
return 0 ;
if ( write_tracing_option_file ( " function-fork " , " 1 " ) < 0 )
return - 1 ;
return 0 ;
}
2020-08-08 05:31:32 +03:00
static int set_tracing_sleep_time ( struct perf_ftrace * ftrace )
{
if ( ! ftrace - > graph_nosleep_time )
return 0 ;
if ( write_tracing_option_file ( " sleep-time " , " 0 " ) < 0 )
return - 1 ;
return 0 ;
}
2020-08-08 05:31:33 +03:00
static int set_tracing_funcgraph_irqs ( struct perf_ftrace * ftrace )
{
if ( ! ftrace - > graph_noirqs )
return 0 ;
if ( write_tracing_option_file ( " funcgraph-irqs " , " 0 " ) < 0 )
return - 1 ;
return 0 ;
}
2020-08-08 05:31:35 +03:00
static int set_tracing_funcgraph_verbose ( struct perf_ftrace * ftrace )
{
if ( ! ftrace - > graph_verbose )
return 0 ;
if ( write_tracing_option_file ( " funcgraph-proc " , " 1 " ) < 0 )
return - 1 ;
if ( write_tracing_option_file ( " funcgraph-abstime " , " 1 " ) < 0 )
return - 1 ;
if ( write_tracing_option_file ( " latency-format " , " 1 " ) < 0 )
return - 1 ;
return 0 ;
}
2020-08-08 05:31:36 +03:00
static int set_tracing_thresh ( struct perf_ftrace * ftrace )
{
int ret ;
if ( ftrace - > graph_thresh = = 0 )
return 0 ;
ret = write_tracing_file_int ( " tracing_thresh " , ftrace - > graph_thresh ) ;
if ( ret < 0 )
return ret ;
return 0 ;
}
2020-08-08 05:31:40 +03:00
static int set_tracing_options ( struct perf_ftrace * ftrace )
2013-03-07 16:45:20 +04:00
{
2017-02-24 04:12:48 +03:00
if ( set_tracing_pid ( ftrace ) < 0 ) {
pr_err ( " failed to set ftrace pid \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2013-03-07 16:45:20 +04:00
}
2017-02-24 04:12:50 +03:00
if ( set_tracing_cpu ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing cpumask \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2017-02-24 04:12:50 +03:00
}
2020-08-08 05:31:31 +03:00
if ( set_tracing_func_stack_trace ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing option func_stack_trace \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2020-08-08 05:31:31 +03:00
}
2020-08-08 05:31:34 +03:00
if ( set_tracing_func_irqinfo ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing option irq-info \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2020-08-08 05:31:34 +03:00
}
2017-06-18 17:23:01 +03:00
if ( set_tracing_filters ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing filters \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2017-06-18 17:23:01 +03:00
}
2017-06-18 17:23:02 +03:00
if ( set_tracing_depth ( ftrace ) < 0 ) {
pr_err ( " failed to set graph depth \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2017-06-18 17:23:02 +03:00
}
2020-08-08 05:31:27 +03:00
if ( set_tracing_percpu_buffer_size ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing per-cpu buffer size \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2020-08-08 05:31:27 +03:00
}
2020-08-08 05:31:29 +03:00
if ( set_tracing_trace_inherit ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing option function-fork \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2020-08-08 05:31:29 +03:00
}
2020-08-08 05:31:32 +03:00
if ( set_tracing_sleep_time ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing option sleep-time \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2020-08-08 05:31:32 +03:00
}
2020-08-08 05:31:33 +03:00
if ( set_tracing_funcgraph_irqs ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing option funcgraph-irqs \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2020-08-08 05:31:33 +03:00
}
2020-08-08 05:31:35 +03:00
if ( set_tracing_funcgraph_verbose ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing option funcgraph-proc/funcgraph-abstime \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
2020-08-08 05:31:35 +03:00
}
2020-08-08 05:31:36 +03:00
if ( set_tracing_thresh ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing thresh \n " ) ;
2020-08-08 05:31:40 +03:00
return - 1 ;
}
return 0 ;
}
static int __cmd_ftrace ( struct perf_ftrace * ftrace , int argc , const char * * argv )
{
char * trace_file ;
int trace_fd ;
char buf [ 4096 ] ;
struct pollfd pollfd = {
. events = POLLIN ,
} ;
if ( ! ( perf_cap__capable ( CAP_PERFMON ) | |
perf_cap__capable ( CAP_SYS_ADMIN ) ) ) {
pr_err ( " ftrace only works for %s! \n " ,
# ifdef HAVE_LIBCAP_SUPPORT
" users with the CAP_PERFMON or CAP_SYS_ADMIN capability "
# else
" root "
# endif
) ;
return - 1 ;
2020-08-08 05:31:36 +03:00
}
2020-08-08 05:31:40 +03:00
signal ( SIGINT , sig_handler ) ;
signal ( SIGUSR1 , sig_handler ) ;
signal ( SIGCHLD , sig_handler ) ;
signal ( SIGPIPE , sig_handler ) ;
if ( reset_tracing_files ( ftrace ) < 0 ) {
pr_err ( " failed to reset ftrace \n " ) ;
goto out ;
}
/* reset ftrace buffer */
if ( write_tracing_file ( " trace " , " 0 " ) < 0 )
goto out ;
2020-11-30 15:26:54 +03:00
if ( argc & & evlist__prepare_workload ( ftrace - > evlist , & ftrace - > target , argv , false ,
ftrace__workload_exec_failed_signal ) < 0 ) {
2020-08-08 05:31:40 +03:00
goto out ;
}
if ( set_tracing_options ( ftrace ) < 0 )
goto out_reset ;
2017-02-24 04:12:48 +03:00
if ( write_tracing_file ( " current_tracer " , ftrace - > tracer ) < 0 ) {
pr_err ( " failed to set current_tracer to %s \n " , ftrace - > tracer ) ;
goto out_reset ;
2013-03-07 16:45:20 +04:00
}
2017-06-18 17:23:00 +03:00
setup_pager ( ) ;
2013-03-07 16:45:20 +04:00
trace_file = get_tracing_file ( " trace_pipe " ) ;
if ( ! trace_file ) {
pr_err ( " failed to open trace_pipe \n " ) ;
2017-02-24 04:12:48 +03:00
goto out_reset ;
2013-03-07 16:45:20 +04:00
}
trace_fd = open ( trace_file , O_RDONLY ) ;
put_tracing_file ( trace_file ) ;
if ( trace_fd < 0 ) {
pr_err ( " failed to open trace_pipe \n " ) ;
2017-02-24 04:12:48 +03:00
goto out_reset ;
2013-03-07 16:45:20 +04:00
}
fcntl ( trace_fd , F_SETFL , O_NONBLOCK ) ;
pollfd . fd = trace_fd ;
2020-08-08 05:31:28 +03:00
/* display column headers */
read_tracing_file_to_stdout ( " trace " ) ;
2020-08-08 05:31:38 +03:00
if ( ! ftrace - > initial_delay ) {
if ( write_tracing_file ( " tracing_on " , " 1 " ) < 0 ) {
pr_err ( " can't enable tracing \n " ) ;
goto out_close_fd ;
}
2013-03-07 16:45:20 +04:00
}
2020-11-30 15:26:54 +03:00
evlist__start_workload ( ftrace - > evlist ) ;
2013-03-07 16:45:20 +04:00
2020-08-08 05:31:38 +03:00
if ( ftrace - > initial_delay ) {
usleep ( ftrace - > initial_delay * 1000 ) ;
if ( write_tracing_file ( " tracing_on " , " 1 " ) < 0 ) {
pr_err ( " can't enable tracing \n " ) ;
goto out_close_fd ;
}
}
2013-03-07 16:45:20 +04:00
while ( ! done ) {
if ( poll ( & pollfd , 1 , - 1 ) < 0 )
break ;
if ( pollfd . revents & POLLIN ) {
int n = read ( trace_fd , buf , sizeof ( buf ) ) ;
if ( n < 0 )
break ;
if ( fwrite ( buf , n , 1 , stdout ) ! = 1 )
break ;
}
}
write_tracing_file ( " tracing_on " , " 0 " ) ;
2020-05-10 18:06:11 +03:00
if ( workload_exec_errno ) {
const char * emsg = str_error_r ( workload_exec_errno , buf , sizeof ( buf ) ) ;
/* flush stdout first so below error msg appears at the end. */
fflush ( stdout ) ;
pr_err ( " workload failed: %s \n " , emsg ) ;
goto out_close_fd ;
}
2013-03-07 16:45:20 +04:00
/* read remaining buffer contents */
while ( true ) {
int n = read ( trace_fd , buf , sizeof ( buf ) ) ;
if ( n < = 0 )
break ;
if ( fwrite ( buf , n , 1 , stdout ) ! = 1 )
break ;
}
out_close_fd :
close ( trace_fd ) ;
2017-02-24 04:12:48 +03:00
out_reset :
2013-03-07 16:45:20 +04:00
reset_tracing_files ( ftrace ) ;
2017-02-24 04:12:48 +03:00
out :
2020-05-10 18:06:11 +03:00
return ( done & & ! workload_exec_errno ) ? 0 : - 1 ;
2013-03-07 16:45:20 +04:00
}
2017-01-31 14:38:29 +03:00
static int perf_ftrace_config ( const char * var , const char * value , void * cb )
{
struct perf_ftrace * ftrace = cb ;
2017-07-20 21:27:39 +03:00
if ( ! strstarts ( var , " ftrace. " ) )
2017-01-31 14:38:29 +03:00
return 0 ;
if ( strcmp ( var , " ftrace.tracer " ) )
return - 1 ;
if ( ! strcmp ( value , " function_graph " ) | |
! strcmp ( value , " function " ) ) {
ftrace - > tracer = value ;
return 0 ;
}
pr_err ( " Please select \" function_graph \" (default) or \" function \" \n " ) ;
return - 1 ;
}
2020-09-04 18:23:57 +03:00
static void list_function_cb ( char * str , void * arg )
{
struct strfilter * filter = ( struct strfilter * ) arg ;
if ( strfilter__compare ( filter , str ) )
printf ( " %s " , str ) ;
}
static int opt_list_avail_functions ( const struct option * opt __maybe_unused ,
const char * str , int unset )
{
struct strfilter * filter ;
const char * err = NULL ;
int ret ;
if ( unset | | ! str )
return - 1 ;
filter = strfilter__new ( str , & err ) ;
if ( ! filter )
return err ? - EINVAL : - ENOMEM ;
ret = strfilter__or ( filter , str , & err ) ;
if ( ret = = - EINVAL ) {
pr_err ( " Filter parse error at %td. \n " , err - str + 1 ) ;
pr_err ( " Source: \" %s \" \n " , str ) ;
pr_err ( " %*c \n " , ( int ) ( err - str + 1 ) , ' ^ ' ) ;
strfilter__delete ( filter ) ;
return ret ;
}
ret = read_tracing_file_by_line ( " available_filter_functions " ,
list_function_cb , filter ) ;
strfilter__delete ( filter ) ;
if ( ret < 0 )
return ret ;
exit ( 0 ) ;
}
2017-06-18 17:23:01 +03:00
static int parse_filter_func ( const struct option * opt , const char * str ,
int unset __maybe_unused )
{
struct list_head * head = opt - > value ;
struct filter_entry * entry ;
entry = malloc ( sizeof ( * entry ) + strlen ( str ) + 1 ) ;
if ( entry = = NULL )
return - ENOMEM ;
strcpy ( entry - > name , str ) ;
list_add_tail ( & entry - > list , head ) ;
return 0 ;
}
static void delete_filter_func ( struct list_head * head )
{
struct filter_entry * pos , * tmp ;
list_for_each_entry_safe ( pos , tmp , head , list ) {
2019-07-04 18:13:46 +03:00
list_del_init ( & pos - > list ) ;
2017-06-18 17:23:01 +03:00
free ( pos ) ;
}
}
2020-08-08 05:31:27 +03:00
static int parse_buffer_size ( const struct option * opt ,
const char * str , int unset )
{
unsigned long * s = ( unsigned long * ) opt - > value ;
static struct parse_tag tags_size [ ] = {
{ . tag = ' B ' , . mult = 1 } ,
{ . tag = ' K ' , . mult = 1 < < 10 } ,
{ . tag = ' M ' , . mult = 1 < < 20 } ,
{ . tag = ' G ' , . mult = 1 < < 30 } ,
{ . tag = 0 } ,
} ;
unsigned long val ;
if ( unset ) {
* s = 0 ;
return 0 ;
}
val = parse_tag_value ( str , tags_size ) ;
if ( val ! = ( unsigned long ) - 1 ) {
if ( val < 1024 ) {
pr_err ( " buffer size too small, must larger than 1KB. " ) ;
return - 1 ;
}
* s = val ;
return 0 ;
}
return - 1 ;
}
2020-08-08 05:31:31 +03:00
static int parse_func_tracer_opts ( const struct option * opt ,
const char * str , int unset )
{
int ret ;
struct perf_ftrace * ftrace = ( struct perf_ftrace * ) opt - > value ;
struct sublevel_option func_tracer_opts [ ] = {
{ . name = " call-graph " , . value_ptr = & ftrace - > func_stack_trace } ,
2020-08-08 05:31:34 +03:00
{ . name = " irq-info " , . value_ptr = & ftrace - > func_irq_info } ,
2020-08-08 05:31:31 +03:00
{ . name = NULL , }
} ;
if ( unset )
return 0 ;
ret = perf_parse_sublevel_options ( str , func_tracer_opts ) ;
if ( ret )
return ret ;
return 0 ;
}
2020-08-08 05:31:32 +03:00
static int parse_graph_tracer_opts ( const struct option * opt ,
const char * str , int unset )
{
int ret ;
struct perf_ftrace * ftrace = ( struct perf_ftrace * ) opt - > value ;
struct sublevel_option graph_tracer_opts [ ] = {
{ . name = " nosleep-time " , . value_ptr = & ftrace - > graph_nosleep_time } ,
2020-08-08 05:31:33 +03:00
{ . name = " noirqs " , . value_ptr = & ftrace - > graph_noirqs } ,
2020-08-08 05:31:35 +03:00
{ . name = " verbose " , . value_ptr = & ftrace - > graph_verbose } ,
2020-08-08 05:31:36 +03:00
{ . name = " thresh " , . value_ptr = & ftrace - > graph_thresh } ,
2020-08-08 05:31:37 +03:00
{ . name = " depth " , . value_ptr = & ftrace - > graph_depth } ,
2020-08-08 05:31:32 +03:00
{ . name = NULL , }
} ;
if ( unset )
return 0 ;
ret = perf_parse_sublevel_options ( str , graph_tracer_opts ) ;
if ( ret )
return ret ;
return 0 ;
}
2020-08-08 05:31:24 +03:00
static void select_tracer ( struct perf_ftrace * ftrace )
{
bool graph = ! list_empty ( & ftrace - > graph_funcs ) | |
! list_empty ( & ftrace - > nograph_funcs ) ;
bool func = ! list_empty ( & ftrace - > filters ) | |
! list_empty ( & ftrace - > notrace ) ;
/* The function_graph has priority over function tracer. */
if ( graph )
ftrace - > tracer = " function_graph " ;
else if ( func )
ftrace - > tracer = " function " ;
/* Otherwise, the default tracer is used. */
pr_debug ( " %s tracer is used \n " , ftrace - > tracer ) ;
}
2017-03-27 17:47:20 +03:00
int cmd_ftrace ( int argc , const char * * argv )
2013-03-07 16:45:20 +04:00
{
int ret ;
struct perf_ftrace ftrace = {
2017-01-26 12:35:37 +03:00
. tracer = DEFAULT_TRACER ,
2013-03-07 16:45:20 +04:00
. target = { . uid = UINT_MAX , } ,
} ;
const char * const ftrace_usage [ ] = {
2017-02-24 04:12:48 +03:00
" perf ftrace [<options>] [<command>] " ,
2013-03-07 16:45:20 +04:00
" perf ftrace [<options>] -- <command> [<options>] " ,
NULL
} ;
const struct option ftrace_options [ ] = {
OPT_STRING ( ' t ' , " tracer " , & ftrace . tracer , " tracer " ,
2020-08-14 15:55:33 +03:00
" Tracer to use: function_graph(default) or function " ) ,
2020-09-04 18:23:57 +03:00
OPT_CALLBACK_DEFAULT ( ' F ' , " funcs " , NULL , " [FILTER] " ,
" Show available functions to filter " ,
opt_list_avail_functions , " * " ) ,
2017-02-24 04:12:48 +03:00
OPT_STRING ( ' p ' , " pid " , & ftrace . target . pid , " pid " ,
2020-08-14 15:55:33 +03:00
" Trace on existing process id " ) ,
2020-08-08 05:31:39 +03:00
/* TODO: Add short option -t after -t/--tracer can be removed. */
OPT_STRING ( 0 , " tid " , & ftrace . target . tid , " tid " ,
2020-08-14 15:55:33 +03:00
" Trace on existing thread id (exclusive to --pid) " ) ,
2013-03-07 16:45:20 +04:00
OPT_INCR ( ' v ' , " verbose " , & verbose ,
2020-08-14 15:55:33 +03:00
" Be more verbose " ) ,
2017-02-24 04:12:50 +03:00
OPT_BOOLEAN ( ' a ' , " all-cpus " , & ftrace . target . system_wide ,
2020-08-14 15:55:33 +03:00
" System-wide collection from all CPUs " ) ,
2017-02-24 04:12:50 +03:00
OPT_STRING ( ' C ' , " cpu " , & ftrace . target . cpu_list , " cpu " ,
2020-08-14 15:55:33 +03:00
" List of cpus to monitor " ) ,
2017-06-18 17:23:01 +03:00
OPT_CALLBACK ( ' T ' , " trace-funcs " , & ftrace . filters , " func " ,
2020-08-14 15:55:33 +03:00
" Trace given functions using function tracer " ,
2020-08-08 05:31:24 +03:00
parse_filter_func ) ,
2017-06-18 17:23:01 +03:00
OPT_CALLBACK ( ' N ' , " notrace-funcs " , & ftrace . notrace , " func " ,
2020-08-14 15:55:33 +03:00
" Do not trace given functions " , parse_filter_func ) ,
2020-08-08 05:31:31 +03:00
OPT_CALLBACK ( 0 , " func-opts " , & ftrace , " options " ,
2020-08-14 15:55:33 +03:00
" Function tracer options, available options: call-graph,irq-info " ,
2020-08-08 05:31:31 +03:00
parse_func_tracer_opts ) ,
2017-06-18 17:23:01 +03:00
OPT_CALLBACK ( ' G ' , " graph-funcs " , & ftrace . graph_funcs , " func " ,
2020-08-14 15:55:33 +03:00
" Trace given functions using function_graph tracer " ,
2020-08-08 05:31:24 +03:00
parse_filter_func ) ,
2017-06-18 17:23:01 +03:00
OPT_CALLBACK ( ' g ' , " nograph-funcs " , & ftrace . nograph_funcs , " func " ,
" Set nograph filter on given functions " , parse_filter_func ) ,
2020-08-08 05:31:32 +03:00
OPT_CALLBACK ( 0 , " graph-opts " , & ftrace , " options " ,
2020-08-14 15:55:33 +03:00
" Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n> " ,
2020-08-08 05:31:32 +03:00
parse_graph_tracer_opts ) ,
2020-08-08 05:31:27 +03:00
OPT_CALLBACK ( ' m ' , " buffer-size " , & ftrace . percpu_buffer_size , " size " ,
2020-08-14 15:55:33 +03:00
" Size of per cpu buffer, needs to use a B, K, M or G suffix. " , parse_buffer_size ) ,
2020-08-08 05:31:29 +03:00
OPT_BOOLEAN ( 0 , " inherit " , & ftrace . inherit ,
2020-08-14 15:55:33 +03:00
" Trace children processes " ) ,
2020-08-08 05:31:38 +03:00
OPT_UINTEGER ( ' D ' , " delay " , & ftrace . initial_delay ,
2020-08-14 15:55:33 +03:00
" Number of milliseconds to wait before starting tracing after program start " ) ,
2013-03-07 16:45:20 +04:00
OPT_END ( )
} ;
2017-06-18 17:23:01 +03:00
INIT_LIST_HEAD ( & ftrace . filters ) ;
INIT_LIST_HEAD ( & ftrace . notrace ) ;
INIT_LIST_HEAD ( & ftrace . graph_funcs ) ;
INIT_LIST_HEAD ( & ftrace . nograph_funcs ) ;
2017-01-31 14:38:29 +03:00
ret = perf_config ( perf_ftrace_config , & ftrace ) ;
if ( ret < 0 )
return - 1 ;
2013-03-07 16:45:20 +04:00
argc = parse_options ( argc , argv , ftrace_options , ftrace_usage ,
PARSE_OPT_STOP_AT_NON_OPTION ) ;
2017-02-24 04:12:48 +03:00
if ( ! argc & & target__none ( & ftrace . target ) )
2020-05-10 18:06:10 +03:00
ftrace . target . system_wide = true ;
2013-03-07 16:45:20 +04:00
2020-08-08 05:31:24 +03:00
select_tracer ( & ftrace ) ;
2017-02-24 04:12:48 +03:00
ret = target__validate ( & ftrace . target ) ;
if ( ret ) {
char errbuf [ 512 ] ;
target__strerror ( & ftrace . target , ret , errbuf , 512 ) ;
pr_err ( " %s \n " , errbuf ) ;
2017-06-18 17:23:01 +03:00
goto out_delete_filters ;
2017-02-24 04:12:48 +03:00
}
2019-07-21 14:23:55 +03:00
ftrace . evlist = evlist__new ( ) ;
2017-06-18 17:23:01 +03:00
if ( ftrace . evlist = = NULL ) {
ret = - ENOMEM ;
goto out_delete_filters ;
}
2013-03-07 16:45:20 +04:00
2020-11-30 20:56:52 +03:00
ret = evlist__create_maps ( ftrace . evlist , & ftrace . target ) ;
2013-03-07 16:45:20 +04:00
if ( ret < 0 )
goto out_delete_evlist ;
ret = __cmd_ftrace ( & ftrace , argc , argv ) ;
out_delete_evlist :
2019-07-21 14:23:56 +03:00
evlist__delete ( ftrace . evlist ) ;
2013-03-07 16:45:20 +04:00
2017-06-18 17:23:01 +03:00
out_delete_filters :
delete_filter_func ( & ftrace . filters ) ;
delete_filter_func ( & ftrace . notrace ) ;
delete_filter_func ( & ftrace . graph_funcs ) ;
delete_filter_func ( & ftrace . nograph_funcs ) ;
2013-03-07 16:45:20 +04:00
return ret ;
}