2019-06-01 11:08:47 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-03-07 16:45:20 +04:00
/*
* builtin - ftrace . c
*
* Copyright ( c ) 2013 LG Electronics , Namhyung Kim < namhyung @ kernel . org >
*/
# include "builtin.h"
2017-04-18 16:46:11 +03:00
# include <errno.h>
2013-03-07 16:45:20 +04:00
# include <unistd.h>
# include <signal.h>
2019-08-30 20:45:20 +03:00
# include <stdlib.h>
2017-02-24 04:12:48 +03:00
# include <fcntl.h>
2017-04-20 01:06:30 +03:00
# include <poll.h>
2019-08-07 17:44:17 +03:00
# include <linux/capability.h>
2019-08-29 22:18:59 +03:00
# include <linux/string.h>
2013-03-07 16:45:20 +04:00
# include "debug.h"
2019-08-29 22:18:59 +03:00
# include <subcmd/pager.h>
2013-03-07 16:45:20 +04:00
# include <subcmd/parse-options.h>
2017-04-18 17:44:58 +03:00
# include <api/fs/tracing_path.h>
2013-03-07 16:45:20 +04:00
# include "evlist.h"
# include "target.h"
2017-02-24 04:12:50 +03:00
# include "cpumap.h"
2013-03-07 16:45:20 +04:00
# include "thread_map.h"
2019-08-07 17:44:17 +03:00
# include "util/cap.h"
2017-01-31 14:38:29 +03:00
# include "util/config.h"
2013-03-07 16:45:20 +04:00
# define DEFAULT_TRACER "function_graph"
struct perf_ftrace {
2019-07-21 14:23:52 +03:00
struct evlist * evlist ;
2017-06-18 17:23:01 +03:00
struct target target ;
const char * tracer ;
struct list_head filters ;
struct list_head notrace ;
struct list_head graph_funcs ;
struct list_head nograph_funcs ;
2017-06-18 17:23:02 +03:00
int graph_depth ;
2017-06-18 17:23:01 +03:00
} ;
struct filter_entry {
struct list_head list ;
char name [ ] ;
2013-03-07 16:45:20 +04:00
} ;
static bool done ;
static void sig_handler ( int sig __maybe_unused )
{
done = true ;
}
/*
* perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails , since
* we asked by setting its exec_error to the function below ,
* ftrace__workload_exec_failed_signal .
*
* XXX We need to handle this more appropriately , emitting an error , etc .
*/
static void ftrace__workload_exec_failed_signal ( int signo __maybe_unused ,
siginfo_t * info __maybe_unused ,
void * ucontext __maybe_unused )
{
/* workload_exec_errno = info->si_value.sival_int; */
done = true ;
}
2017-02-24 04:12:48 +03:00
static int __write_tracing_file ( const char * name , const char * val , bool append )
2013-03-07 16:45:20 +04:00
{
char * file ;
int fd , ret = - 1 ;
ssize_t size = strlen ( val ) ;
2017-02-24 04:12:48 +03:00
int flags = O_WRONLY ;
2017-06-18 17:22:59 +03:00
char errbuf [ 512 ] ;
2018-02-19 05:33:29 +03:00
char * val_copy ;
2013-03-07 16:45:20 +04:00
file = get_tracing_file ( name ) ;
if ( ! file ) {
pr_debug ( " cannot get tracing file: %s \n " , name ) ;
return - 1 ;
}
2017-02-24 04:12:48 +03:00
if ( append )
flags | = O_APPEND ;
else
flags | = O_TRUNC ;
fd = open ( file , flags ) ;
2013-03-07 16:45:20 +04:00
if ( fd < 0 ) {
2017-06-18 17:22:59 +03:00
pr_debug ( " cannot open tracing file: %s: %s \n " ,
name , str_error_r ( errno , errbuf , sizeof ( errbuf ) ) ) ;
2013-03-07 16:45:20 +04:00
goto out ;
}
2018-02-19 05:33:29 +03:00
/*
* Copy the original value and append a ' \n ' . Without this ,
* the kernel can hide possible errors .
*/
val_copy = strdup ( val ) ;
if ( ! val_copy )
goto out_close ;
val_copy [ size ] = ' \n ' ;
if ( write ( fd , val_copy , size + 1 ) = = size + 1 )
2013-03-07 16:45:20 +04:00
ret = 0 ;
else
2017-06-18 17:22:59 +03:00
pr_debug ( " write '%s' to tracing/%s failed: %s \n " ,
val , name , str_error_r ( errno , errbuf , sizeof ( errbuf ) ) ) ;
2013-03-07 16:45:20 +04:00
2018-02-19 05:33:29 +03:00
free ( val_copy ) ;
out_close :
2013-03-07 16:45:20 +04:00
close ( fd ) ;
out :
put_tracing_file ( file ) ;
return ret ;
}
2017-02-24 04:12:48 +03:00
static int write_tracing_file ( const char * name , const char * val )
{
return __write_tracing_file ( name , val , false ) ;
}
static int append_tracing_file ( const char * name , const char * val )
{
return __write_tracing_file ( name , val , true ) ;
}
2017-02-24 04:12:50 +03:00
static int reset_tracing_cpu ( void ) ;
2017-06-18 17:23:01 +03:00
static void reset_tracing_filters ( void ) ;
2017-02-24 04:12:50 +03:00
2013-03-07 16:45:20 +04:00
static int reset_tracing_files ( struct perf_ftrace * ftrace __maybe_unused )
{
if ( write_tracing_file ( " tracing_on " , " 0 " ) < 0 )
return - 1 ;
if ( write_tracing_file ( " current_tracer " , " nop " ) < 0 )
return - 1 ;
if ( write_tracing_file ( " set_ftrace_pid " , " " ) < 0 )
return - 1 ;
2017-02-24 04:12:50 +03:00
if ( reset_tracing_cpu ( ) < 0 )
return - 1 ;
2017-06-18 17:23:02 +03:00
if ( write_tracing_file ( " max_graph_depth " , " 0 " ) < 0 )
return - 1 ;
2017-06-18 17:23:01 +03:00
reset_tracing_filters ( ) ;
2013-03-07 16:45:20 +04:00
return 0 ;
}
2017-02-24 04:12:48 +03:00
static int set_tracing_pid ( struct perf_ftrace * ftrace )
{
int i ;
char buf [ 16 ] ;
if ( target__has_cpu ( & ftrace - > target ) )
return 0 ;
2019-08-22 14:11:41 +03:00
for ( i = 0 ; i < perf_thread_map__nr ( ftrace - > evlist - > core . threads ) ; i + + ) {
2017-02-24 04:12:48 +03:00
scnprintf ( buf , sizeof ( buf ) , " %d " ,
2019-07-21 14:24:42 +03:00
ftrace - > evlist - > core . threads - > map [ i ] ) ;
2017-02-24 04:12:48 +03:00
if ( append_tracing_file ( " set_ftrace_pid " , buf ) < 0 )
return - 1 ;
}
return 0 ;
}
2019-07-21 14:23:49 +03:00
static int set_tracing_cpumask ( struct perf_cpu_map * cpumap )
2017-02-24 04:12:50 +03:00
{
char * cpumask ;
size_t mask_size ;
int ret ;
int last_cpu ;
last_cpu = cpu_map__cpu ( cpumap , cpumap - > nr - 1 ) ;
2019-08-02 11:29:51 +03:00
mask_size = last_cpu / 4 + 2 ; /* one more byte for EOS */
2017-02-24 04:12:50 +03:00
mask_size + = last_cpu / 32 ; /* ',' is needed for every 32th cpus */
cpumask = malloc ( mask_size ) ;
if ( cpumask = = NULL ) {
pr_debug ( " failed to allocate cpu mask \n " ) ;
return - 1 ;
}
cpu_map__snprint_mask ( cpumap , cpumask , mask_size ) ;
ret = write_tracing_file ( " tracing_cpumask " , cpumask ) ;
free ( cpumask ) ;
return ret ;
}
static int set_tracing_cpu ( struct perf_ftrace * ftrace )
{
2019-07-21 14:24:41 +03:00
struct perf_cpu_map * cpumap = ftrace - > evlist - > core . cpus ;
2017-02-24 04:12:50 +03:00
if ( ! target__has_cpu ( & ftrace - > target ) )
return 0 ;
return set_tracing_cpumask ( cpumap ) ;
}
static int reset_tracing_cpu ( void )
{
2019-07-21 14:24:30 +03:00
struct perf_cpu_map * cpumap = perf_cpu_map__new ( NULL ) ;
2017-02-24 04:12:50 +03:00
int ret ;
ret = set_tracing_cpumask ( cpumap ) ;
2019-07-21 14:24:17 +03:00
perf_cpu_map__put ( cpumap ) ;
2017-02-24 04:12:50 +03:00
return ret ;
}
2017-06-18 17:23:01 +03:00
static int __set_tracing_filter ( const char * filter_file , struct list_head * funcs )
{
struct filter_entry * pos ;
list_for_each_entry ( pos , funcs , list ) {
if ( append_tracing_file ( filter_file , pos - > name ) < 0 )
return - 1 ;
}
return 0 ;
}
static int set_tracing_filters ( struct perf_ftrace * ftrace )
{
int ret ;
ret = __set_tracing_filter ( " set_ftrace_filter " , & ftrace - > filters ) ;
if ( ret < 0 )
return ret ;
ret = __set_tracing_filter ( " set_ftrace_notrace " , & ftrace - > notrace ) ;
if ( ret < 0 )
return ret ;
ret = __set_tracing_filter ( " set_graph_function " , & ftrace - > graph_funcs ) ;
if ( ret < 0 )
return ret ;
/* old kernels do not have this filter */
__set_tracing_filter ( " set_graph_notrace " , & ftrace - > nograph_funcs ) ;
return ret ;
}
static void reset_tracing_filters ( void )
{
write_tracing_file ( " set_ftrace_filter " , " " ) ;
write_tracing_file ( " set_ftrace_notrace " , " " ) ;
write_tracing_file ( " set_graph_function " , " " ) ;
write_tracing_file ( " set_graph_notrace " , " " ) ;
}
2017-06-18 17:23:02 +03:00
static int set_tracing_depth ( struct perf_ftrace * ftrace )
{
char buf [ 16 ] ;
if ( ftrace - > graph_depth = = 0 )
return 0 ;
if ( ftrace - > graph_depth < 0 ) {
pr_err ( " invalid graph depth: %d \n " , ftrace - > graph_depth ) ;
return - 1 ;
}
snprintf ( buf , sizeof ( buf ) , " %d " , ftrace - > graph_depth ) ;
if ( write_tracing_file ( " max_graph_depth " , buf ) < 0 )
return - 1 ;
return 0 ;
}
2013-03-07 16:45:20 +04:00
static int __cmd_ftrace ( struct perf_ftrace * ftrace , int argc , const char * * argv )
{
char * trace_file ;
int trace_fd ;
char buf [ 4096 ] ;
struct pollfd pollfd = {
. events = POLLIN ,
} ;
2019-08-07 17:44:17 +03:00
if ( ! perf_cap__capable ( CAP_SYS_ADMIN ) ) {
2019-08-12 23:27:11 +03:00
pr_err ( " ftrace only works for %s! \n " ,
# ifdef HAVE_LIBCAP_SUPPORT
" users with the SYS_ADMIN capability "
# else
" root "
# endif
) ;
2013-03-07 16:45:20 +04:00
return - 1 ;
}
signal ( SIGINT , sig_handler ) ;
signal ( SIGUSR1 , sig_handler ) ;
signal ( SIGCHLD , sig_handler ) ;
2017-02-24 04:12:51 +03:00
signal ( SIGPIPE , sig_handler ) ;
2013-03-07 16:45:20 +04:00
2018-02-19 05:33:29 +03:00
if ( reset_tracing_files ( ftrace ) < 0 ) {
pr_err ( " failed to reset ftrace \n " ) ;
2017-02-24 04:12:48 +03:00
goto out ;
2018-02-19 05:33:29 +03:00
}
2013-03-07 16:45:20 +04:00
/* reset ftrace buffer */
if ( write_tracing_file ( " trace " , " 0 " ) < 0 )
goto out ;
2017-02-24 04:12:48 +03:00
if ( argc & & perf_evlist__prepare_workload ( ftrace - > evlist ,
& ftrace - > target , argv , false ,
ftrace__workload_exec_failed_signal ) < 0 ) {
2013-03-07 16:45:20 +04:00
goto out ;
}
2017-02-24 04:12:48 +03:00
if ( set_tracing_pid ( ftrace ) < 0 ) {
pr_err ( " failed to set ftrace pid \n " ) ;
goto out_reset ;
2013-03-07 16:45:20 +04:00
}
2017-02-24 04:12:50 +03:00
if ( set_tracing_cpu ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing cpumask \n " ) ;
goto out_reset ;
}
2017-06-18 17:23:01 +03:00
if ( set_tracing_filters ( ftrace ) < 0 ) {
pr_err ( " failed to set tracing filters \n " ) ;
goto out_reset ;
}
2017-06-18 17:23:02 +03:00
if ( set_tracing_depth ( ftrace ) < 0 ) {
pr_err ( " failed to set graph depth \n " ) ;
goto out_reset ;
}
2017-02-24 04:12:48 +03:00
if ( write_tracing_file ( " current_tracer " , ftrace - > tracer ) < 0 ) {
pr_err ( " failed to set current_tracer to %s \n " , ftrace - > tracer ) ;
goto out_reset ;
2013-03-07 16:45:20 +04:00
}
2017-06-18 17:23:00 +03:00
setup_pager ( ) ;
2013-03-07 16:45:20 +04:00
trace_file = get_tracing_file ( " trace_pipe " ) ;
if ( ! trace_file ) {
pr_err ( " failed to open trace_pipe \n " ) ;
2017-02-24 04:12:48 +03:00
goto out_reset ;
2013-03-07 16:45:20 +04:00
}
trace_fd = open ( trace_file , O_RDONLY ) ;
put_tracing_file ( trace_file ) ;
if ( trace_fd < 0 ) {
pr_err ( " failed to open trace_pipe \n " ) ;
2017-02-24 04:12:48 +03:00
goto out_reset ;
2013-03-07 16:45:20 +04:00
}
fcntl ( trace_fd , F_SETFL , O_NONBLOCK ) ;
pollfd . fd = trace_fd ;
if ( write_tracing_file ( " tracing_on " , " 1 " ) < 0 ) {
pr_err ( " can't enable tracing \n " ) ;
goto out_close_fd ;
}
perf_evlist__start_workload ( ftrace - > evlist ) ;
while ( ! done ) {
if ( poll ( & pollfd , 1 , - 1 ) < 0 )
break ;
if ( pollfd . revents & POLLIN ) {
int n = read ( trace_fd , buf , sizeof ( buf ) ) ;
if ( n < 0 )
break ;
if ( fwrite ( buf , n , 1 , stdout ) ! = 1 )
break ;
}
}
write_tracing_file ( " tracing_on " , " 0 " ) ;
/* read remaining buffer contents */
while ( true ) {
int n = read ( trace_fd , buf , sizeof ( buf ) ) ;
if ( n < = 0 )
break ;
if ( fwrite ( buf , n , 1 , stdout ) ! = 1 )
break ;
}
out_close_fd :
close ( trace_fd ) ;
2017-02-24 04:12:48 +03:00
out_reset :
2013-03-07 16:45:20 +04:00
reset_tracing_files ( ftrace ) ;
2017-02-24 04:12:48 +03:00
out :
2013-03-07 16:45:20 +04:00
return done ? 0 : - 1 ;
}
2017-01-31 14:38:29 +03:00
static int perf_ftrace_config ( const char * var , const char * value , void * cb )
{
struct perf_ftrace * ftrace = cb ;
2017-07-20 21:27:39 +03:00
if ( ! strstarts ( var , " ftrace. " ) )
2017-01-31 14:38:29 +03:00
return 0 ;
if ( strcmp ( var , " ftrace.tracer " ) )
return - 1 ;
if ( ! strcmp ( value , " function_graph " ) | |
! strcmp ( value , " function " ) ) {
ftrace - > tracer = value ;
return 0 ;
}
pr_err ( " Please select \" function_graph \" (default) or \" function \" \n " ) ;
return - 1 ;
}
2017-06-18 17:23:01 +03:00
static int parse_filter_func ( const struct option * opt , const char * str ,
int unset __maybe_unused )
{
struct list_head * head = opt - > value ;
struct filter_entry * entry ;
entry = malloc ( sizeof ( * entry ) + strlen ( str ) + 1 ) ;
if ( entry = = NULL )
return - ENOMEM ;
strcpy ( entry - > name , str ) ;
list_add_tail ( & entry - > list , head ) ;
return 0 ;
}
static void delete_filter_func ( struct list_head * head )
{
struct filter_entry * pos , * tmp ;
list_for_each_entry_safe ( pos , tmp , head , list ) {
2019-07-04 18:13:46 +03:00
list_del_init ( & pos - > list ) ;
2017-06-18 17:23:01 +03:00
free ( pos ) ;
}
}
2017-03-27 17:47:20 +03:00
int cmd_ftrace ( int argc , const char * * argv )
2013-03-07 16:45:20 +04:00
{
int ret ;
struct perf_ftrace ftrace = {
2017-01-26 12:35:37 +03:00
. tracer = DEFAULT_TRACER ,
2013-03-07 16:45:20 +04:00
. target = { . uid = UINT_MAX , } ,
} ;
const char * const ftrace_usage [ ] = {
2017-02-24 04:12:48 +03:00
" perf ftrace [<options>] [<command>] " ,
2013-03-07 16:45:20 +04:00
" perf ftrace [<options>] -- <command> [<options>] " ,
NULL
} ;
const struct option ftrace_options [ ] = {
OPT_STRING ( ' t ' , " tracer " , & ftrace . tracer , " tracer " ,
2017-01-19 03:49:14 +03:00
" tracer to use: function_graph(default) or function " ) ,
2017-02-24 04:12:48 +03:00
OPT_STRING ( ' p ' , " pid " , & ftrace . target . pid , " pid " ,
" trace on existing process id " ) ,
2013-03-07 16:45:20 +04:00
OPT_INCR ( ' v ' , " verbose " , & verbose ,
" be more verbose " ) ,
2017-02-24 04:12:50 +03:00
OPT_BOOLEAN ( ' a ' , " all-cpus " , & ftrace . target . system_wide ,
" system-wide collection from all CPUs " ) ,
OPT_STRING ( ' C ' , " cpu " , & ftrace . target . cpu_list , " cpu " ,
" list of cpus to monitor " ) ,
2017-06-18 17:23:01 +03:00
OPT_CALLBACK ( ' T ' , " trace-funcs " , & ftrace . filters , " func " ,
" trace given functions only " , parse_filter_func ) ,
OPT_CALLBACK ( ' N ' , " notrace-funcs " , & ftrace . notrace , " func " ,
" do not trace given functions " , parse_filter_func ) ,
OPT_CALLBACK ( ' G ' , " graph-funcs " , & ftrace . graph_funcs , " func " ,
" Set graph filter on given functions " , parse_filter_func ) ,
OPT_CALLBACK ( ' g ' , " nograph-funcs " , & ftrace . nograph_funcs , " func " ,
" Set nograph filter on given functions " , parse_filter_func ) ,
2017-06-18 17:23:02 +03:00
OPT_INTEGER ( ' D ' , " graph-depth " , & ftrace . graph_depth ,
" Max depth for function graph tracer " ) ,
2013-03-07 16:45:20 +04:00
OPT_END ( )
} ;
2017-06-18 17:23:01 +03:00
INIT_LIST_HEAD ( & ftrace . filters ) ;
INIT_LIST_HEAD ( & ftrace . notrace ) ;
INIT_LIST_HEAD ( & ftrace . graph_funcs ) ;
INIT_LIST_HEAD ( & ftrace . nograph_funcs ) ;
2017-01-31 14:38:29 +03:00
ret = perf_config ( perf_ftrace_config , & ftrace ) ;
if ( ret < 0 )
return - 1 ;
2013-03-07 16:45:20 +04:00
argc = parse_options ( argc , argv , ftrace_options , ftrace_usage ,
PARSE_OPT_STOP_AT_NON_OPTION ) ;
2017-02-24 04:12:48 +03:00
if ( ! argc & & target__none ( & ftrace . target ) )
2013-03-07 16:45:20 +04:00
usage_with_options ( ftrace_usage , ftrace_options ) ;
2017-02-24 04:12:48 +03:00
ret = target__validate ( & ftrace . target ) ;
if ( ret ) {
char errbuf [ 512 ] ;
target__strerror ( & ftrace . target , ret , errbuf , 512 ) ;
pr_err ( " %s \n " , errbuf ) ;
2017-06-18 17:23:01 +03:00
goto out_delete_filters ;
2017-02-24 04:12:48 +03:00
}
2019-07-21 14:23:55 +03:00
ftrace . evlist = evlist__new ( ) ;
2017-06-18 17:23:01 +03:00
if ( ftrace . evlist = = NULL ) {
ret = - ENOMEM ;
goto out_delete_filters ;
}
2013-03-07 16:45:20 +04:00
ret = perf_evlist__create_maps ( ftrace . evlist , & ftrace . target ) ;
if ( ret < 0 )
goto out_delete_evlist ;
ret = __cmd_ftrace ( & ftrace , argc , argv ) ;
out_delete_evlist :
2019-07-21 14:23:56 +03:00
evlist__delete ( ftrace . evlist ) ;
2013-03-07 16:45:20 +04:00
2017-06-18 17:23:01 +03:00
out_delete_filters :
delete_filter_func ( & ftrace . filters ) ;
delete_filter_func ( & ftrace . notrace ) ;
delete_filter_func ( & ftrace . graph_funcs ) ;
delete_filter_func ( & ftrace . nograph_funcs ) ;
2013-03-07 16:45:20 +04:00
return ret ;
}