2012-12-08 00:39:39 +04:00
# include "callchain.h"
2012-10-06 23:26:02 +04:00
# include "debug.h"
# include "event.h"
2012-12-08 00:39:39 +04:00
# include "evsel.h"
# include "hist.h"
2012-10-06 22:43:20 +04:00
# include "machine.h"
# include "map.h"
2012-12-08 00:39:39 +04:00
# include "sort.h"
2012-11-09 18:32:52 +04:00
# include "strlist.h"
2012-10-06 22:43:20 +04:00
# include "thread.h"
2014-07-23 15:23:00 +04:00
# include "vdso.h"
2012-10-06 22:43:20 +04:00
# include <stdbool.h>
2013-12-11 16:15:00 +04:00
# include <symbol/kallsyms.h>
2012-12-08 00:39:39 +04:00
# include "unwind.h"
2012-10-06 22:43:20 +04:00
perf machine: Add missing dsos->root rbtree root initialization
A segfault happens on 'perf test hists_link' because we end up using a
struct machines on the stack, and then machines__init() was not
initializing the newly introduced rb_root, just the existing list_head.
When we introduced struct dsos, to group the two ways to store dsos,
i.e. the linked list and the rbtree, we didn't turned the initialization
done in:
machines__init(machines->host) ->
machine__init() ->
INIT_LIST_HEAD
into a dsos__init() to keep on initializing the list_head but _as well_
initializing the rb_root, oops.
All worked because outside perf-test we probably zalloc the whole thing
which ends up initializing it in to NULL.
So the problem looks contained to 'perf test' that uses it on stack,
etc.
Reported-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Waiman Long <Waiman.Long@hp.com>,
Cc: Adrian Hunter <adrian.hunter@intel.com>,
Cc: Don Zickus <dzickus@redhat.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Waiman Long <Waiman.Long@hp.com>,
Link: http://lkml.kernel.org/r/20141014180353.GF3198@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-10-14 22:07:48 +04:00
static void dsos__init ( struct dsos * dsos )
{
INIT_LIST_HEAD ( & dsos - > head ) ;
dsos - > root = RB_ROOT ;
}
2012-11-09 18:32:52 +04:00
int machine__init ( struct machine * machine , const char * root_dir , pid_t pid )
{
2014-10-22 00:29:02 +04:00
map_groups__init ( & machine - > kmaps , machine ) ;
2012-11-09 18:32:52 +04:00
RB_CLEAR_NODE ( & machine - > rb_node ) ;
perf machine: Add missing dsos->root rbtree root initialization
A segfault happens on 'perf test hists_link' because we end up using a
struct machines on the stack, and then machines__init() was not
initializing the newly introduced rb_root, just the existing list_head.
When we introduced struct dsos, to group the two ways to store dsos,
i.e. the linked list and the rbtree, we didn't turned the initialization
done in:
machines__init(machines->host) ->
machine__init() ->
INIT_LIST_HEAD
into a dsos__init() to keep on initializing the list_head but _as well_
initializing the rb_root, oops.
All worked because outside perf-test we probably zalloc the whole thing
which ends up initializing it in to NULL.
So the problem looks contained to 'perf test' that uses it on stack,
etc.
Reported-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Waiman Long <Waiman.Long@hp.com>,
Cc: Adrian Hunter <adrian.hunter@intel.com>,
Cc: Don Zickus <dzickus@redhat.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Waiman Long <Waiman.Long@hp.com>,
Link: http://lkml.kernel.org/r/20141014180353.GF3198@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-10-14 22:07:48 +04:00
dsos__init ( & machine - > user_dsos ) ;
dsos__init ( & machine - > kernel_dsos ) ;
2012-11-09 18:32:52 +04:00
machine - > threads = RB_ROOT ;
INIT_LIST_HEAD ( & machine - > dead_threads ) ;
machine - > last_match = NULL ;
2014-07-23 15:23:00 +04:00
machine - > vdso_info = NULL ;
2012-11-09 18:32:52 +04:00
machine - > pid = pid ;
2013-08-08 15:32:20 +04:00
machine - > symbol_filter = NULL ;
2014-01-07 16:47:19 +04:00
machine - > id_hdr_size = 0 ;
2014-07-31 10:00:45 +04:00
machine - > comm_exec = false ;
2014-08-15 23:08:39 +04:00
machine - > kernel_start = 0 ;
2013-08-08 15:32:20 +04:00
2012-11-09 18:32:52 +04:00
machine - > root_dir = strdup ( root_dir ) ;
if ( machine - > root_dir = = NULL )
return - ENOMEM ;
if ( pid ! = HOST_KERNEL_ID ) {
2014-07-14 14:02:25 +04:00
struct thread * thread = machine__findnew_thread ( machine , - 1 ,
2013-08-27 12:23:03 +04:00
pid ) ;
2012-11-09 18:32:52 +04:00
char comm [ 64 ] ;
if ( thread = = NULL )
return - ENOMEM ;
snprintf ( comm , sizeof ( comm ) , " [guest/%d] " , pid ) ;
2013-09-11 18:18:24 +04:00
thread__set_comm ( thread , comm , 0 ) ;
2012-11-09 18:32:52 +04:00
}
2014-07-22 17:17:25 +04:00
machine - > current_tid = NULL ;
2012-11-09 18:32:52 +04:00
return 0 ;
}
2013-09-28 23:13:00 +04:00
struct machine * machine__new_host ( void )
{
struct machine * machine = malloc ( sizeof ( * machine ) ) ;
if ( machine ! = NULL ) {
machine__init ( machine , " " , HOST_KERNEL_ID ) ;
if ( machine__create_kernel_maps ( machine ) < 0 )
goto out_delete ;
}
return machine ;
out_delete :
free ( machine ) ;
return NULL ;
}
2014-09-30 00:07:28 +04:00
static void dsos__delete ( struct dsos * dsos )
2012-11-09 18:32:52 +04:00
{
struct dso * pos , * n ;
2014-09-30 00:07:28 +04:00
list_for_each_entry_safe ( pos , n , & dsos - > head , node ) {
2014-09-30 21:36:15 +04:00
RB_CLEAR_NODE ( & pos - > rb_node ) ;
2012-11-09 18:32:52 +04:00
list_del ( & pos - > node ) ;
dso__delete ( pos ) ;
}
}
2012-12-08 00:39:39 +04:00
void machine__delete_dead_threads ( struct machine * machine )
{
struct thread * n , * t ;
list_for_each_entry_safe ( t , n , & machine - > dead_threads , node ) {
list_del ( & t - > node ) ;
thread__delete ( t ) ;
}
}
void machine__delete_threads ( struct machine * machine )
{
struct rb_node * nd = rb_first ( & machine - > threads ) ;
while ( nd ) {
struct thread * t = rb_entry ( nd , struct thread , rb_node ) ;
rb_erase ( & t - > rb_node , & machine - > threads ) ;
nd = rb_next ( nd ) ;
thread__delete ( t ) ;
}
}
2012-11-09 18:32:52 +04:00
void machine__exit ( struct machine * machine )
{
map_groups__exit ( & machine - > kmaps ) ;
dsos__delete ( & machine - > user_dsos ) ;
dsos__delete ( & machine - > kernel_dsos ) ;
2014-07-23 15:23:00 +04:00
vdso__exit ( machine ) ;
2013-12-27 00:41:15 +04:00
zfree ( & machine - > root_dir ) ;
2014-07-22 17:17:25 +04:00
zfree ( & machine - > current_tid ) ;
2012-11-09 18:32:52 +04:00
}
void machine__delete ( struct machine * machine )
{
machine__exit ( machine ) ;
free ( machine ) ;
}
2012-12-19 02:15:48 +04:00
void machines__init ( struct machines * machines )
{
machine__init ( & machines - > host , " " , HOST_KERNEL_ID ) ;
machines - > guests = RB_ROOT ;
2013-08-08 15:32:20 +04:00
machines - > symbol_filter = NULL ;
2012-12-19 02:15:48 +04:00
}
void machines__exit ( struct machines * machines )
{
machine__exit ( & machines - > host ) ;
/* XXX exit guest */
}
struct machine * machines__add ( struct machines * machines , pid_t pid ,
2012-11-09 18:32:52 +04:00
const char * root_dir )
{
2012-12-19 02:15:48 +04:00
struct rb_node * * p = & machines - > guests . rb_node ;
2012-11-09 18:32:52 +04:00
struct rb_node * parent = NULL ;
struct machine * pos , * machine = malloc ( sizeof ( * machine ) ) ;
if ( machine = = NULL )
return NULL ;
if ( machine__init ( machine , root_dir , pid ) ! = 0 ) {
free ( machine ) ;
return NULL ;
}
2013-08-08 15:32:20 +04:00
machine - > symbol_filter = machines - > symbol_filter ;
2012-11-09 18:32:52 +04:00
while ( * p ! = NULL ) {
parent = * p ;
pos = rb_entry ( parent , struct machine , rb_node ) ;
if ( pid < pos - > pid )
p = & ( * p ) - > rb_left ;
else
p = & ( * p ) - > rb_right ;
}
rb_link_node ( & machine - > rb_node , parent , p ) ;
2012-12-19 02:15:48 +04:00
rb_insert_color ( & machine - > rb_node , & machines - > guests ) ;
2012-11-09 18:32:52 +04:00
return machine ;
}
2013-08-08 15:32:20 +04:00
void machines__set_symbol_filter ( struct machines * machines ,
symbol_filter_t symbol_filter )
{
struct rb_node * nd ;
machines - > symbol_filter = symbol_filter ;
machines - > host . symbol_filter = symbol_filter ;
for ( nd = rb_first ( & machines - > guests ) ; nd ; nd = rb_next ( nd ) ) {
struct machine * machine = rb_entry ( nd , struct machine , rb_node ) ;
machine - > symbol_filter = symbol_filter ;
}
}
2014-07-31 10:00:45 +04:00
void machines__set_comm_exec ( struct machines * machines , bool comm_exec )
{
struct rb_node * nd ;
machines - > host . comm_exec = comm_exec ;
for ( nd = rb_first ( & machines - > guests ) ; nd ; nd = rb_next ( nd ) ) {
struct machine * machine = rb_entry ( nd , struct machine , rb_node ) ;
machine - > comm_exec = comm_exec ;
}
}
2012-12-19 02:15:48 +04:00
struct machine * machines__find ( struct machines * machines , pid_t pid )
2012-11-09 18:32:52 +04:00
{
2012-12-19 02:15:48 +04:00
struct rb_node * * p = & machines - > guests . rb_node ;
2012-11-09 18:32:52 +04:00
struct rb_node * parent = NULL ;
struct machine * machine ;
struct machine * default_machine = NULL ;
2012-12-19 02:15:48 +04:00
if ( pid = = HOST_KERNEL_ID )
return & machines - > host ;
2012-11-09 18:32:52 +04:00
while ( * p ! = NULL ) {
parent = * p ;
machine = rb_entry ( parent , struct machine , rb_node ) ;
if ( pid < machine - > pid )
p = & ( * p ) - > rb_left ;
else if ( pid > machine - > pid )
p = & ( * p ) - > rb_right ;
else
return machine ;
if ( ! machine - > pid )
default_machine = machine ;
}
return default_machine ;
}
2012-12-19 02:15:48 +04:00
struct machine * machines__findnew ( struct machines * machines , pid_t pid )
2012-11-09 18:32:52 +04:00
{
char path [ PATH_MAX ] ;
const char * root_dir = " " ;
struct machine * machine = machines__find ( machines , pid ) ;
if ( machine & & ( machine - > pid = = pid ) )
goto out ;
if ( ( pid ! = HOST_KERNEL_ID ) & &
( pid ! = DEFAULT_GUEST_KERNEL_ID ) & &
( symbol_conf . guestmount ) ) {
sprintf ( path , " %s/%d " , symbol_conf . guestmount , pid ) ;
if ( access ( path , R_OK ) ) {
static struct strlist * seen ;
if ( ! seen )
seen = strlist__new ( true , NULL ) ;
if ( ! strlist__has_entry ( seen , path ) ) {
pr_err ( " Can't access file %s \n " , path ) ;
strlist__add ( seen , path ) ;
}
machine = NULL ;
goto out ;
}
root_dir = path ;
}
machine = machines__add ( machines , pid , root_dir ) ;
out :
return machine ;
}
2012-12-19 02:15:48 +04:00
void machines__process_guests ( struct machines * machines ,
machine__process_t process , void * data )
2012-11-09 18:32:52 +04:00
{
struct rb_node * nd ;
2012-12-19 02:15:48 +04:00
for ( nd = rb_first ( & machines - > guests ) ; nd ; nd = rb_next ( nd ) ) {
2012-11-09 18:32:52 +04:00
struct machine * pos = rb_entry ( nd , struct machine , rb_node ) ;
process ( pos , data ) ;
}
}
char * machine__mmap_name ( struct machine * machine , char * bf , size_t size )
{
if ( machine__is_host ( machine ) )
snprintf ( bf , size , " [%s] " , " kernel.kallsyms " ) ;
else if ( machine__is_default_guest ( machine ) )
snprintf ( bf , size , " [%s] " , " guest.kernel.kallsyms " ) ;
else {
snprintf ( bf , size , " [%s.%d] " , " guest.kernel.kallsyms " ,
machine - > pid ) ;
}
return bf ;
}
2012-12-19 02:15:48 +04:00
void machines__set_id_hdr_size ( struct machines * machines , u16 id_hdr_size )
2012-11-09 18:32:52 +04:00
{
struct rb_node * node ;
struct machine * machine ;
2012-12-19 02:15:48 +04:00
machines - > host . id_hdr_size = id_hdr_size ;
for ( node = rb_first ( & machines - > guests ) ; node ; node = rb_next ( node ) ) {
2012-11-09 18:32:52 +04:00
machine = rb_entry ( node , struct machine , rb_node ) ;
machine - > id_hdr_size = id_hdr_size ;
}
return ;
}
2014-07-16 12:07:13 +04:00
static void machine__update_thread_pid ( struct machine * machine ,
struct thread * th , pid_t pid )
{
struct thread * leader ;
if ( pid = = th - > pid_ | | pid = = - 1 | | th - > pid_ ! = - 1 )
return ;
th - > pid_ = pid ;
if ( th - > pid_ = = th - > tid )
return ;
leader = machine__findnew_thread ( machine , th - > pid_ , th - > pid_ ) ;
if ( ! leader )
goto out_err ;
if ( ! leader - > mg )
2014-10-22 00:29:02 +04:00
leader - > mg = map_groups__new ( machine ) ;
2014-07-16 12:07:13 +04:00
if ( ! leader - > mg )
goto out_err ;
if ( th - > mg = = leader - > mg )
return ;
if ( th - > mg ) {
/*
* Maps are created from MMAP events which provide the pid and
* tid . Consequently there never should be any maps on a thread
* with an unknown pid . Just print an error if there are .
*/
if ( ! map_groups__empty ( th - > mg ) )
pr_err ( " Discarding thread maps for %d:%d \n " ,
th - > pid_ , th - > tid ) ;
map_groups__delete ( th - > mg ) ;
}
th - > mg = map_groups__get ( leader - > mg ) ;
return ;
out_err :
pr_err ( " Failed to join map groups for %d:%d \n " , th - > pid_ , th - > tid ) ;
}
2013-08-26 17:00:19 +04:00
static struct thread * __machine__findnew_thread ( struct machine * machine ,
pid_t pid , pid_t tid ,
2012-10-06 22:43:20 +04:00
bool create )
{
struct rb_node * * p = & machine - > threads . rb_node ;
struct rb_node * parent = NULL ;
struct thread * th ;
/*
2013-07-04 17:20:31 +04:00
* Front - end cache - TID lookups come in blocks ,
2012-10-06 22:43:20 +04:00
* so most of the time we dont have to look up
* the full rbtree :
*/
2014-07-16 12:07:13 +04:00
th = machine - > last_match ;
if ( th & & th - > tid = = tid ) {
machine__update_thread_pid ( machine , th , pid ) ;
return th ;
2013-08-26 17:00:19 +04:00
}
2012-10-06 22:43:20 +04:00
while ( * p ! = NULL ) {
parent = * p ;
th = rb_entry ( parent , struct thread , rb_node ) ;
2013-07-04 17:20:31 +04:00
if ( th - > tid = = tid ) {
2012-10-06 22:43:20 +04:00
machine - > last_match = th ;
2014-07-16 12:07:13 +04:00
machine__update_thread_pid ( machine , th , pid ) ;
2012-10-06 22:43:20 +04:00
return th ;
}
2013-07-04 17:20:31 +04:00
if ( tid < th - > tid )
2012-10-06 22:43:20 +04:00
p = & ( * p ) - > rb_left ;
else
p = & ( * p ) - > rb_right ;
}
if ( ! create )
return NULL ;
2013-08-26 17:00:19 +04:00
th = thread__new ( pid , tid ) ;
2012-10-06 22:43:20 +04:00
if ( th ! = NULL ) {
rb_link_node ( & th - > rb_node , parent , p ) ;
rb_insert_color ( & th - > rb_node , & machine - > threads ) ;
machine - > last_match = th ;
2014-04-09 22:54:29 +04:00
/*
* We have to initialize map_groups separately
* after rb tree is updated .
*
* The reason is that we call machine__findnew_thread
* within thread__init_map_groups to find the thread
* leader and that would screwed the rb tree .
*/
2014-07-16 11:19:44 +04:00
if ( thread__init_map_groups ( th , machine ) ) {
thread__delete ( th ) ;
2014-04-09 22:54:29 +04:00
return NULL ;
2014-07-16 11:19:44 +04:00
}
2012-10-06 22:43:20 +04:00
}
return th ;
}
2013-08-27 12:23:03 +04:00
struct thread * machine__findnew_thread ( struct machine * machine , pid_t pid ,
pid_t tid )
2012-10-06 22:43:20 +04:00
{
2013-08-27 12:23:03 +04:00
return __machine__findnew_thread ( machine , pid , tid , true ) ;
2012-10-06 22:43:20 +04:00
}
2014-03-14 18:00:03 +04:00
struct thread * machine__find_thread ( struct machine * machine , pid_t pid ,
pid_t tid )
2012-10-06 22:43:20 +04:00
{
2014-03-14 18:00:03 +04:00
return __machine__findnew_thread ( machine , pid , tid , false ) ;
2012-10-06 22:43:20 +04:00
}
2012-10-06 23:26:02 +04:00
2014-07-31 10:00:45 +04:00
struct comm * machine__thread_exec_comm ( struct machine * machine ,
struct thread * thread )
{
if ( machine - > comm_exec )
return thread__exec_comm ( thread ) ;
else
return thread__comm ( thread ) ;
}
2013-09-11 18:18:24 +04:00
int machine__process_comm_event ( struct machine * machine , union perf_event * event ,
struct perf_sample * sample )
2012-10-06 23:26:02 +04:00
{
2013-08-27 12:23:03 +04:00
struct thread * thread = machine__findnew_thread ( machine ,
event - > comm . pid ,
event - > comm . tid ) ;
2014-07-31 10:00:44 +04:00
bool exec = event - > header . misc & PERF_RECORD_MISC_COMM_EXEC ;
2012-10-06 23:26:02 +04:00
2014-07-31 10:00:45 +04:00
if ( exec )
machine - > comm_exec = true ;
2012-10-06 23:26:02 +04:00
if ( dump_trace )
perf_event__fprintf_comm ( event , stdout ) ;
2014-07-31 10:00:44 +04:00
if ( thread = = NULL | |
__thread__set_comm ( thread , event - > comm . comm , sample - > time , exec ) ) {
2012-10-06 23:26:02 +04:00
dump_printf ( " problem processing PERF_RECORD_COMM, skipping event. \n " ) ;
return - 1 ;
}
return 0 ;
}
int machine__process_lost_event ( struct machine * machine __maybe_unused ,
2013-09-11 18:18:24 +04:00
union perf_event * event , struct perf_sample * sample __maybe_unused )
2012-10-06 23:26:02 +04:00
{
dump_printf ( " : id:% " PRIu64 " : lost:% " PRIu64 " \n " ,
event - > lost . id , event - > lost . lost ) ;
return 0 ;
}
2012-12-08 00:39:39 +04:00
struct map * machine__new_module ( struct machine * machine , u64 start ,
const char * filename )
{
struct map * map ;
struct dso * dso = __dsos__findnew ( & machine - > kernel_dsos , filename ) ;
2014-11-04 04:14:27 +03:00
bool compressed ;
2012-12-08 00:39:39 +04:00
if ( dso = = NULL )
return NULL ;
map = map__new2 ( start , dso , MAP__FUNCTION ) ;
if ( map = = NULL )
return NULL ;
if ( machine__is_host ( machine ) )
dso - > symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ;
else
dso - > symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE ;
2014-11-04 04:14:27 +03:00
/* _KMODULE_COMP should be next to _KMODULE */
if ( is_kernel_module ( filename , & compressed ) & & compressed )
dso - > symtab_type + + ;
2012-12-08 00:39:39 +04:00
map_groups__insert ( & machine - > kmaps , map ) ;
return map ;
}
2012-12-19 02:15:48 +04:00
size_t machines__fprintf_dsos ( struct machines * machines , FILE * fp )
2012-12-08 00:39:39 +04:00
{
struct rb_node * nd ;
2014-09-30 00:07:28 +04:00
size_t ret = __dsos__fprintf ( & machines - > host . kernel_dsos . head , fp ) +
__dsos__fprintf ( & machines - > host . user_dsos . head , fp ) ;
2012-12-08 00:39:39 +04:00
2012-12-19 02:15:48 +04:00
for ( nd = rb_first ( & machines - > guests ) ; nd ; nd = rb_next ( nd ) ) {
2012-12-08 00:39:39 +04:00
struct machine * pos = rb_entry ( nd , struct machine , rb_node ) ;
2014-09-30 00:07:28 +04:00
ret + = __dsos__fprintf ( & pos - > kernel_dsos . head , fp ) ;
ret + = __dsos__fprintf ( & pos - > user_dsos . head , fp ) ;
2012-12-08 00:39:39 +04:00
}
return ret ;
}
2014-09-30 00:07:28 +04:00
size_t machine__fprintf_dsos_buildid ( struct machine * m , FILE * fp ,
2012-12-08 00:39:39 +04:00
bool ( skip ) ( struct dso * dso , int parm ) , int parm )
{
2014-09-30 00:07:28 +04:00
return __dsos__fprintf_buildid ( & m - > kernel_dsos . head , fp , skip , parm ) +
__dsos__fprintf_buildid ( & m - > user_dsos . head , fp , skip , parm ) ;
2012-12-08 00:39:39 +04:00
}
2012-12-19 02:15:48 +04:00
size_t machines__fprintf_dsos_buildid ( struct machines * machines , FILE * fp ,
2012-12-08 00:39:39 +04:00
bool ( skip ) ( struct dso * dso , int parm ) , int parm )
{
struct rb_node * nd ;
2012-12-19 02:15:48 +04:00
size_t ret = machine__fprintf_dsos_buildid ( & machines - > host , fp , skip , parm ) ;
2012-12-08 00:39:39 +04:00
2012-12-19 02:15:48 +04:00
for ( nd = rb_first ( & machines - > guests ) ; nd ; nd = rb_next ( nd ) ) {
2012-12-08 00:39:39 +04:00
struct machine * pos = rb_entry ( nd , struct machine , rb_node ) ;
ret + = machine__fprintf_dsos_buildid ( pos , fp , skip , parm ) ;
}
return ret ;
}
size_t machine__fprintf_vmlinux_path ( struct machine * machine , FILE * fp )
{
int i ;
size_t printed = 0 ;
struct dso * kdso = machine - > vmlinux_maps [ MAP__FUNCTION ] - > dso ;
if ( kdso - > has_build_id ) {
char filename [ PATH_MAX ] ;
if ( dso__build_id_filename ( kdso , filename , sizeof ( filename ) ) )
printed + = fprintf ( fp , " [0] %s \n " , filename ) ;
}
for ( i = 0 ; i < vmlinux_path__nr_entries ; + + i )
printed + = fprintf ( fp , " [%d] %s \n " ,
i + kdso - > has_build_id , vmlinux_path [ i ] ) ;
return printed ;
}
size_t machine__fprintf ( struct machine * machine , FILE * fp )
{
size_t ret = 0 ;
struct rb_node * nd ;
for ( nd = rb_first ( & machine - > threads ) ; nd ; nd = rb_next ( nd ) ) {
struct thread * pos = rb_entry ( nd , struct thread , rb_node ) ;
ret + = thread__fprintf ( pos , fp ) ;
}
return ret ;
}
static struct dso * machine__get_kernel ( struct machine * machine )
{
const char * vmlinux_name = NULL ;
struct dso * kernel ;
if ( machine__is_host ( machine ) ) {
vmlinux_name = symbol_conf . vmlinux_name ;
if ( ! vmlinux_name )
vmlinux_name = " [kernel.kallsyms] " ;
kernel = dso__kernel_findnew ( machine , vmlinux_name ,
" [kernel] " ,
DSO_TYPE_KERNEL ) ;
} else {
char bf [ PATH_MAX ] ;
if ( machine__is_default_guest ( machine ) )
vmlinux_name = symbol_conf . default_guest_vmlinux_name ;
if ( ! vmlinux_name )
vmlinux_name = machine__mmap_name ( machine , bf ,
sizeof ( bf ) ) ;
kernel = dso__kernel_findnew ( machine , vmlinux_name ,
" [guest.kernel] " ,
DSO_TYPE_GUEST_KERNEL ) ;
}
if ( kernel ! = NULL & & ( ! kernel - > has_build_id ) )
dso__read_running_kernel_build_id ( kernel , machine ) ;
return kernel ;
}
struct process_args {
u64 start ;
} ;
2014-01-29 18:14:38 +04:00
static void machine__get_kallsyms_filename ( struct machine * machine , char * buf ,
size_t bufsz )
{
if ( machine__is_default_guest ( machine ) )
scnprintf ( buf , bufsz , " %s " , symbol_conf . default_guest_kallsyms ) ;
else
scnprintf ( buf , bufsz , " %s/proc/kallsyms " , machine - > root_dir ) ;
}
2014-06-16 22:32:09 +04:00
const char * ref_reloc_sym_names [ ] = { " _text " , " _stext " , NULL } ;
/* Figure out the start address of kernel map from /proc/kallsyms.
* Returns the name of the start symbol in * symbol_name . Pass in NULL as
* symbol_name if it ' s not that important .
*/
2014-08-15 23:08:38 +04:00
static u64 machine__get_running_kernel_start ( struct machine * machine ,
const char * * symbol_name )
2012-12-08 00:39:39 +04:00
{
2014-01-29 18:14:38 +04:00
char filename [ PATH_MAX ] ;
2014-06-16 22:32:09 +04:00
int i ;
const char * name ;
u64 addr = 0 ;
2012-12-08 00:39:39 +04:00
2014-01-29 18:14:38 +04:00
machine__get_kallsyms_filename ( machine , filename , PATH_MAX ) ;
2012-12-08 00:39:39 +04:00
if ( symbol__restricted_filename ( filename , " /proc/kallsyms " ) )
return 0 ;
2014-06-16 22:32:09 +04:00
for ( i = 0 ; ( name = ref_reloc_sym_names [ i ] ) ! = NULL ; i + + ) {
addr = kallsyms__get_function_start ( filename , name ) ;
if ( addr )
break ;
}
if ( symbol_name )
* symbol_name = name ;
2012-12-08 00:39:39 +04:00
2014-06-16 22:32:09 +04:00
return addr ;
2012-12-08 00:39:39 +04:00
}
int __machine__create_kernel_maps ( struct machine * machine , struct dso * kernel )
{
enum map_type type ;
2014-08-15 23:08:38 +04:00
u64 start = machine__get_running_kernel_start ( machine , NULL ) ;
2012-12-08 00:39:39 +04:00
for ( type = 0 ; type < MAP__NR_TYPES ; + + type ) {
struct kmap * kmap ;
machine - > vmlinux_maps [ type ] = map__new2 ( start , kernel , type ) ;
if ( machine - > vmlinux_maps [ type ] = = NULL )
return - 1 ;
machine - > vmlinux_maps [ type ] - > map_ip =
machine - > vmlinux_maps [ type ] - > unmap_ip =
identity__map_ip ;
kmap = map__kmap ( machine - > vmlinux_maps [ type ] ) ;
kmap - > kmaps = & machine - > kmaps ;
map_groups__insert ( & machine - > kmaps ,
machine - > vmlinux_maps [ type ] ) ;
}
return 0 ;
}
void machine__destroy_kernel_maps ( struct machine * machine )
{
enum map_type type ;
for ( type = 0 ; type < MAP__NR_TYPES ; + + type ) {
struct kmap * kmap ;
if ( machine - > vmlinux_maps [ type ] = = NULL )
continue ;
kmap = map__kmap ( machine - > vmlinux_maps [ type ] ) ;
map_groups__remove ( & machine - > kmaps ,
machine - > vmlinux_maps [ type ] ) ;
if ( kmap - > ref_reloc_sym ) {
/*
* ref_reloc_sym is shared among all maps , so free just
* on one of them .
*/
if ( type = = MAP__FUNCTION ) {
2013-12-27 00:41:15 +04:00
zfree ( ( char * * ) & kmap - > ref_reloc_sym - > name ) ;
zfree ( & kmap - > ref_reloc_sym ) ;
} else
kmap - > ref_reloc_sym = NULL ;
2012-12-08 00:39:39 +04:00
}
map__delete ( machine - > vmlinux_maps [ type ] ) ;
machine - > vmlinux_maps [ type ] = NULL ;
}
}
2012-12-19 02:15:48 +04:00
int machines__create_guest_kernel_maps ( struct machines * machines )
2012-12-08 00:39:39 +04:00
{
int ret = 0 ;
struct dirent * * namelist = NULL ;
int i , items = 0 ;
char path [ PATH_MAX ] ;
pid_t pid ;
char * endp ;
if ( symbol_conf . default_guest_vmlinux_name | |
symbol_conf . default_guest_modules | |
symbol_conf . default_guest_kallsyms ) {
machines__create_kernel_maps ( machines , DEFAULT_GUEST_KERNEL_ID ) ;
}
if ( symbol_conf . guestmount ) {
items = scandir ( symbol_conf . guestmount , & namelist , NULL , NULL ) ;
if ( items < = 0 )
return - ENOENT ;
for ( i = 0 ; i < items ; i + + ) {
if ( ! isdigit ( namelist [ i ] - > d_name [ 0 ] ) ) {
/* Filter out . and .. */
continue ;
}
pid = ( pid_t ) strtol ( namelist [ i ] - > d_name , & endp , 10 ) ;
if ( ( * endp ! = ' \0 ' ) | |
( endp = = namelist [ i ] - > d_name ) | |
( errno = = ERANGE ) ) {
pr_debug ( " invalid directory (%s). Skipping. \n " ,
namelist [ i ] - > d_name ) ;
continue ;
}
sprintf ( path , " %s/%s/proc/kallsyms " ,
symbol_conf . guestmount ,
namelist [ i ] - > d_name ) ;
ret = access ( path , R_OK ) ;
if ( ret ) {
pr_debug ( " Can't access file %s \n " , path ) ;
goto failure ;
}
machines__create_kernel_maps ( machines , pid ) ;
}
failure :
free ( namelist ) ;
}
return ret ;
}
2012-12-19 02:15:48 +04:00
void machines__destroy_kernel_maps ( struct machines * machines )
2012-12-08 00:39:39 +04:00
{
2012-12-19 02:15:48 +04:00
struct rb_node * next = rb_first ( & machines - > guests ) ;
machine__destroy_kernel_maps ( & machines - > host ) ;
2012-12-08 00:39:39 +04:00
while ( next ) {
struct machine * pos = rb_entry ( next , struct machine , rb_node ) ;
next = rb_next ( & pos - > rb_node ) ;
2012-12-19 02:15:48 +04:00
rb_erase ( & pos - > rb_node , & machines - > guests ) ;
2012-12-08 00:39:39 +04:00
machine__delete ( pos ) ;
}
}
2012-12-19 02:15:48 +04:00
int machines__create_kernel_maps ( struct machines * machines , pid_t pid )
2012-12-08 00:39:39 +04:00
{
struct machine * machine = machines__findnew ( machines , pid ) ;
if ( machine = = NULL )
return - 1 ;
return machine__create_kernel_maps ( machine ) ;
}
int machine__load_kallsyms ( struct machine * machine , const char * filename ,
enum map_type type , symbol_filter_t filter )
{
struct map * map = machine - > vmlinux_maps [ type ] ;
int ret = dso__load_kallsyms ( map - > dso , filename , map , filter ) ;
if ( ret > 0 ) {
dso__set_loaded ( map - > dso , type ) ;
/*
* Since / proc / kallsyms will have multiple sessions for the
* kernel , with modules between them , fixup the end of all
* sections .
*/
__map_groups__fixup_end ( & machine - > kmaps , type ) ;
}
return ret ;
}
int machine__load_vmlinux_path ( struct machine * machine , enum map_type type ,
symbol_filter_t filter )
{
struct map * map = machine - > vmlinux_maps [ type ] ;
int ret = dso__load_vmlinux_path ( map - > dso , map , filter ) ;
2013-08-07 15:38:47 +04:00
if ( ret > 0 )
2012-12-08 00:39:39 +04:00
dso__set_loaded ( map - > dso , type ) ;
return ret ;
}
static void map_groups__fixup_end ( struct map_groups * mg )
{
int i ;
for ( i = 0 ; i < MAP__NR_TYPES ; + + i )
__map_groups__fixup_end ( mg , i ) ;
}
static char * get_kernel_version ( const char * root_dir )
{
char version [ PATH_MAX ] ;
FILE * file ;
char * name , * tmp ;
const char * prefix = " Linux version " ;
sprintf ( version , " %s/proc/version " , root_dir ) ;
file = fopen ( version , " r " ) ;
if ( ! file )
return NULL ;
version [ 0 ] = ' \0 ' ;
tmp = fgets ( version , sizeof ( version ) , file ) ;
fclose ( file ) ;
name = strstr ( version , prefix ) ;
if ( ! name )
return NULL ;
name + = strlen ( prefix ) ;
tmp = strchr ( name , ' ' ) ;
if ( tmp )
* tmp = ' \0 ' ;
return strdup ( name ) ;
}
static int map_groups__set_modules_path_dir ( struct map_groups * mg ,
2014-04-26 21:17:55 +04:00
const char * dir_name , int depth )
2012-12-08 00:39:39 +04:00
{
struct dirent * dent ;
DIR * dir = opendir ( dir_name ) ;
int ret = 0 ;
if ( ! dir ) {
pr_debug ( " %s: cannot open %s dir \n " , __func__ , dir_name ) ;
return - 1 ;
}
while ( ( dent = readdir ( dir ) ) ! = NULL ) {
char path [ PATH_MAX ] ;
struct stat st ;
/*sshfs might return bad dent->d_type, so we have to stat*/
snprintf ( path , sizeof ( path ) , " %s/%s " , dir_name , dent - > d_name ) ;
if ( stat ( path , & st ) )
continue ;
if ( S_ISDIR ( st . st_mode ) ) {
if ( ! strcmp ( dent - > d_name , " . " ) | |
! strcmp ( dent - > d_name , " .. " ) )
continue ;
2014-04-26 21:17:55 +04:00
/* Do not follow top-level source and build symlinks */
if ( depth = = 0 ) {
if ( ! strcmp ( dent - > d_name , " source " ) | |
! strcmp ( dent - > d_name , " build " ) )
continue ;
}
ret = map_groups__set_modules_path_dir ( mg , path ,
depth + 1 ) ;
2012-12-08 00:39:39 +04:00
if ( ret < 0 )
goto out ;
} else {
char * dot = strrchr ( dent - > d_name , ' . ' ) ,
dso_name [ PATH_MAX ] ;
struct map * map ;
char * long_name ;
2014-11-04 04:14:27 +03:00
if ( dot = = NULL )
2012-12-08 00:39:39 +04:00
continue ;
2014-11-04 04:14:27 +03:00
/* On some system, modules are compressed like .ko.gz */
if ( is_supported_compression ( dot + 1 ) & &
is_kmodule_extension ( dot - 2 ) )
dot - = 3 ;
2012-12-08 00:39:39 +04:00
snprintf ( dso_name , sizeof ( dso_name ) , " [%.*s] " ,
( int ) ( dot - dent - > d_name ) , dent - > d_name ) ;
strxfrchar ( dso_name , ' - ' , ' _ ' ) ;
map = map_groups__find_by_name ( mg , MAP__FUNCTION ,
dso_name ) ;
if ( map = = NULL )
continue ;
long_name = strdup ( path ) ;
if ( long_name = = NULL ) {
ret = - 1 ;
goto out ;
}
2013-12-10 22:08:44 +04:00
dso__set_long_name ( map - > dso , long_name , true ) ;
2012-12-08 00:39:39 +04:00
dso__kernel_module_get_build_id ( map - > dso , " " ) ;
}
}
out :
closedir ( dir ) ;
return ret ;
}
static int machine__set_modules_path ( struct machine * machine )
{
char * version ;
char modules_path [ PATH_MAX ] ;
version = get_kernel_version ( machine - > root_dir ) ;
if ( ! version )
return - 1 ;
2014-04-26 21:17:55 +04:00
snprintf ( modules_path , sizeof ( modules_path ) , " %s/lib/modules/%s " ,
2012-12-08 00:39:39 +04:00
machine - > root_dir , version ) ;
free ( version ) ;
2014-04-26 21:17:55 +04:00
return map_groups__set_modules_path_dir ( & machine - > kmaps , modules_path , 0 ) ;
2012-12-08 00:39:39 +04:00
}
2013-10-08 12:45:48 +04:00
static int machine__create_module ( void * arg , const char * name , u64 start )
2012-12-08 00:39:39 +04:00
{
2013-10-08 12:45:48 +04:00
struct machine * machine = arg ;
2012-12-08 00:39:39 +04:00
struct map * map ;
2013-10-08 12:45:48 +04:00
map = machine__new_module ( machine , start , name ) ;
if ( map = = NULL )
return - 1 ;
dso__kernel_module_get_build_id ( map - > dso , machine - > root_dir ) ;
return 0 ;
}
static int machine__create_modules ( struct machine * machine )
{
2012-12-08 00:39:39 +04:00
const char * modules ;
char path [ PATH_MAX ] ;
2013-09-22 14:22:09 +04:00
if ( machine__is_default_guest ( machine ) ) {
2012-12-08 00:39:39 +04:00
modules = symbol_conf . default_guest_modules ;
2013-09-22 14:22:09 +04:00
} else {
snprintf ( path , PATH_MAX , " %s/proc/modules " , machine - > root_dir ) ;
2012-12-08 00:39:39 +04:00
modules = path ;
}
2013-09-22 14:22:09 +04:00
if ( symbol__restricted_filename ( modules , " /proc/modules " ) )
2012-12-08 00:39:39 +04:00
return - 1 ;
2013-10-08 12:45:48 +04:00
if ( modules__parse ( modules , machine , machine__create_module ) )
2012-12-08 00:39:39 +04:00
return - 1 ;
2013-10-08 12:45:48 +04:00
if ( ! machine__set_modules_path ( machine ) )
return 0 ;
2012-12-08 00:39:39 +04:00
2013-10-08 12:45:48 +04:00
pr_debug ( " Problems setting modules path maps, continuing anyway... \n " ) ;
2012-12-08 00:39:39 +04:00
2013-07-16 00:27:53 +04:00
return 0 ;
2012-12-08 00:39:39 +04:00
}
int machine__create_kernel_maps ( struct machine * machine )
{
struct dso * kernel = machine__get_kernel ( machine ) ;
2014-01-29 18:14:39 +04:00
const char * name ;
2014-08-15 23:08:38 +04:00
u64 addr = machine__get_running_kernel_start ( machine , & name ) ;
2014-01-29 18:14:39 +04:00
if ( ! addr )
return - 1 ;
2012-12-08 00:39:39 +04:00
if ( kernel = = NULL | |
__machine__create_kernel_maps ( machine , kernel ) < 0 )
return - 1 ;
if ( symbol_conf . use_modules & & machine__create_modules ( machine ) < 0 ) {
if ( machine__is_host ( machine ) )
pr_debug ( " Problems creating module maps, "
" continuing anyway... \n " ) ;
else
pr_debug ( " Problems creating module maps for guest %d, "
" continuing anyway... \n " , machine - > pid ) ;
}
/*
* Now that we have all the maps created , just set the - > end of them :
*/
map_groups__fixup_end ( & machine - > kmaps ) ;
2014-01-29 18:14:39 +04:00
if ( maps__set_kallsyms_ref_reloc_sym ( machine - > vmlinux_maps , name ,
addr ) ) {
machine__destroy_kernel_maps ( machine ) ;
return - 1 ;
}
2012-12-08 00:39:39 +04:00
return 0 ;
}
2012-10-06 23:26:02 +04:00
static void machine__set_kernel_mmap_len ( struct machine * machine ,
union perf_event * event )
{
2012-11-07 11:27:10 +04:00
int i ;
for ( i = 0 ; i < MAP__NR_TYPES ; i + + ) {
machine - > vmlinux_maps [ i ] - > start = event - > mmap . start ;
machine - > vmlinux_maps [ i ] - > end = ( event - > mmap . start +
event - > mmap . len ) ;
/*
* Be a bit paranoid here , some perf . data file came with
* a zero sized synthesized MMAP event for the kernel .
*/
if ( machine - > vmlinux_maps [ i ] - > end = = 0 )
machine - > vmlinux_maps [ i ] - > end = ~ 0ULL ;
}
2012-10-06 23:26:02 +04:00
}
2013-08-07 15:38:51 +04:00
static bool machine__uses_kcore ( struct machine * machine )
{
struct dso * dso ;
2014-09-30 00:07:28 +04:00
list_for_each_entry ( dso , & machine - > kernel_dsos . head , node ) {
2013-08-07 15:38:51 +04:00
if ( dso__is_kcore ( dso ) )
return true ;
}
return false ;
}
2012-10-06 23:26:02 +04:00
static int machine__process_kernel_mmap_event ( struct machine * machine ,
union perf_event * event )
{
struct map * map ;
char kmmap_prefix [ PATH_MAX ] ;
enum dso_kernel_type kernel_type ;
bool is_kernel_mmap ;
2013-08-07 15:38:51 +04:00
/* If we have maps from kcore then we do not need or want any others */
if ( machine__uses_kcore ( machine ) )
return 0 ;
2012-10-06 23:26:02 +04:00
machine__mmap_name ( machine , kmmap_prefix , sizeof ( kmmap_prefix ) ) ;
if ( machine__is_host ( machine ) )
kernel_type = DSO_TYPE_KERNEL ;
else
kernel_type = DSO_TYPE_GUEST_KERNEL ;
is_kernel_mmap = memcmp ( event - > mmap . filename ,
kmmap_prefix ,
strlen ( kmmap_prefix ) - 1 ) = = 0 ;
if ( event - > mmap . filename [ 0 ] = = ' / ' | |
( ! is_kernel_mmap & & event - > mmap . filename [ 0 ] = = ' [ ' ) ) {
char short_module_name [ 1024 ] ;
char * name , * dot ;
if ( event - > mmap . filename [ 0 ] = = ' / ' ) {
name = strrchr ( event - > mmap . filename , ' / ' ) ;
if ( name = = NULL )
goto out_problem ;
+ + name ; /* skip / */
dot = strrchr ( name , ' . ' ) ;
if ( dot = = NULL )
goto out_problem ;
2014-11-04 04:14:27 +03:00
/* On some system, modules are compressed like .ko.gz */
if ( is_supported_compression ( dot + 1 ) )
dot - = 3 ;
if ( ! is_kmodule_extension ( dot + 1 ) )
goto out_problem ;
2012-10-06 23:26:02 +04:00
snprintf ( short_module_name , sizeof ( short_module_name ) ,
" [%.*s] " , ( int ) ( dot - name ) , name ) ;
strxfrchar ( short_module_name , ' - ' , ' _ ' ) ;
} else
strcpy ( short_module_name , event - > mmap . filename ) ;
map = machine__new_module ( machine , event - > mmap . start ,
event - > mmap . filename ) ;
if ( map = = NULL )
goto out_problem ;
name = strdup ( short_module_name ) ;
if ( name = = NULL )
goto out_problem ;
2013-12-10 18:11:46 +04:00
dso__set_short_name ( map - > dso , name , true ) ;
2012-10-06 23:26:02 +04:00
map - > end = map - > start + event - > mmap . len ;
} else if ( is_kernel_mmap ) {
const char * symbol_name = ( event - > mmap . filename +
strlen ( kmmap_prefix ) ) ;
/*
* Should be there already , from the build - id table in
* the header .
*/
perf tools: Fix build-id matching on vmlinux
There's a problem on finding correct kernel symbols when perf report
runs on a different kernel. Although a part of the problem was solved
by the prior commit 0a7e6d1b6844 ("perf tools: Check recorded kernel
version when finding vmlinux"), there's a remaining problem still.
When perf records samples, it synthesizes the kernel map using
machine__mmap_name() and ref_reloc_sym like "[kernel.kallsyms]_text".
You can easily see it using 'perf report -D' command.
After finishing record, it goes through the recorded events to find
maps/dsos actually used. And then record build-id info of them.
During this process, it needs to load symbols in a dso and it'd call
dso__load_vmlinux_path() since the default value of the symbol_conf.
try_vmlinux_path is true. However it changes dso->long_name to a real
path of the vmlinux file (e.g. /lib/modules/3.16.4/build/vmlinux) if one
is running on a custom kernel.
It resulted in that perf report reads the build-id of the vmlinux, but
cannot use it since it only knows about the [kernel.kallsyms] map. It
then falls back to possible vmlinux paths by using the recorded kernel
version (in case of a recent version) or a running kernel silently.
Even with the recent tools, this still has a possibility of breaking
the result. As the build directory is a symbolic link, if one built a
new kernel in the same directory with different source/config, the old
link to vmlinux will point the new file. So it's absolutely needed to
use build-id when finding a kernel image.
In this patch, it's now changed to try to search a kernel dso in the
existing dso list which was constructed during build-id table parsing
so it'll always have a build-id. If not found, search "[kernel.kallsyms]".
Before:
$ perf report
# Children Self Command Shared Object Symbol
# ........ ........ ....... ................. ...............................
#
72.15% 0.00% swapper [kernel.kallsyms] [k] set_curr_task_rt
72.15% 0.00% swapper [kernel.kallsyms] [k] native_calibrate_tsc
72.15% 0.00% swapper [kernel.kallsyms] [k] tsc_refine_calibration_work
71.87% 71.87% swapper [kernel.kallsyms] [k] module_finalize
...
After (for the same perf.data):
72.15% 0.00% swapper vmlinux [k] cpu_startup_entry
72.15% 0.00% swapper vmlinux [k] arch_cpu_idle
72.15% 0.00% swapper vmlinux [k] default_idle
71.87% 71.87% swapper vmlinux [k] native_safe_halt
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ingo Molnar <mingo@kernel.org>
Link: http://lkml.kernel.org/r/20140924073356.GB1962@gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung.kim@lge.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1415063674-17206-8-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-11-04 04:14:33 +03:00
struct dso * kernel = NULL ;
struct dso * dso ;
list_for_each_entry ( dso , & machine - > kernel_dsos . head , node ) {
if ( is_kernel_module ( dso - > long_name , NULL ) )
continue ;
kernel = dso ;
break ;
}
if ( kernel = = NULL )
kernel = __dsos__findnew ( & machine - > kernel_dsos ,
kmmap_prefix ) ;
2012-10-06 23:26:02 +04:00
if ( kernel = = NULL )
goto out_problem ;
kernel - > kernel = kernel_type ;
if ( __machine__create_kernel_maps ( machine , kernel ) < 0 )
goto out_problem ;
machine__set_kernel_mmap_len ( machine , event ) ;
/*
* Avoid using a zero address ( kptr_restrict ) for the ref reloc
* symbol . Effectively having zero here means that at record
* time / proc / sys / kernel / kptr_restrict was non zero .
*/
if ( event - > mmap . pgoff ! = 0 ) {
maps__set_kallsyms_ref_reloc_sym ( machine - > vmlinux_maps ,
symbol_name ,
event - > mmap . pgoff ) ;
}
if ( machine__is_default_guest ( machine ) ) {
/*
* preload dso of guest kernel and modules
*/
dso__load ( kernel , machine - > vmlinux_maps [ MAP__FUNCTION ] ,
NULL ) ;
}
}
return 0 ;
out_problem :
return - 1 ;
}
2013-08-21 14:10:25 +04:00
int machine__process_mmap2_event ( struct machine * machine ,
2013-09-11 18:18:24 +04:00
union perf_event * event ,
struct perf_sample * sample __maybe_unused )
2013-08-21 14:10:25 +04:00
{
u8 cpumode = event - > header . misc & PERF_RECORD_MISC_CPUMODE_MASK ;
struct thread * thread ;
struct map * map ;
enum map_type type ;
int ret = 0 ;
if ( dump_trace )
perf_event__fprintf_mmap2 ( event , stdout ) ;
if ( cpumode = = PERF_RECORD_MISC_GUEST_KERNEL | |
cpumode = = PERF_RECORD_MISC_KERNEL ) {
ret = machine__process_kernel_mmap_event ( machine , event ) ;
if ( ret < 0 )
goto out_problem ;
return 0 ;
}
thread = machine__findnew_thread ( machine , event - > mmap2 . pid ,
2014-02-26 19:45:27 +04:00
event - > mmap2 . tid ) ;
2013-08-21 14:10:25 +04:00
if ( thread = = NULL )
goto out_problem ;
if ( event - > header . misc & PERF_RECORD_MISC_MMAP_DATA )
type = MAP__VARIABLE ;
else
type = MAP__FUNCTION ;
2014-07-22 17:17:53 +04:00
map = map__new ( machine , event - > mmap2 . start ,
2013-08-21 14:10:25 +04:00
event - > mmap2 . len , event - > mmap2 . pgoff ,
event - > mmap2 . pid , event - > mmap2 . maj ,
event - > mmap2 . min , event - > mmap2 . ino ,
event - > mmap2 . ino_generation ,
2014-05-19 23:13:49 +04:00
event - > mmap2 . prot ,
event - > mmap2 . flags ,
2014-07-22 17:18:00 +04:00
event - > mmap2 . filename , type , thread ) ;
2013-08-21 14:10:25 +04:00
if ( map = = NULL )
goto out_problem ;
thread__insert_map ( thread , map ) ;
return 0 ;
out_problem :
dump_printf ( " problem processing PERF_RECORD_MMAP2, skipping event. \n " ) ;
return 0 ;
}
2013-09-11 18:18:24 +04:00
int machine__process_mmap_event ( struct machine * machine , union perf_event * event ,
struct perf_sample * sample __maybe_unused )
2012-10-06 23:26:02 +04:00
{
u8 cpumode = event - > header . misc & PERF_RECORD_MISC_CPUMODE_MASK ;
struct thread * thread ;
struct map * map ;
2013-01-24 19:10:40 +04:00
enum map_type type ;
2012-10-06 23:26:02 +04:00
int ret = 0 ;
if ( dump_trace )
perf_event__fprintf_mmap ( event , stdout ) ;
if ( cpumode = = PERF_RECORD_MISC_GUEST_KERNEL | |
cpumode = = PERF_RECORD_MISC_KERNEL ) {
ret = machine__process_kernel_mmap_event ( machine , event ) ;
if ( ret < 0 )
goto out_problem ;
return 0 ;
}
2013-08-27 12:23:03 +04:00
thread = machine__findnew_thread ( machine , event - > mmap . pid ,
2014-02-26 19:45:27 +04:00
event - > mmap . tid ) ;
2012-10-06 23:26:02 +04:00
if ( thread = = NULL )
goto out_problem ;
2013-01-24 19:10:40 +04:00
if ( event - > header . misc & PERF_RECORD_MISC_MMAP_DATA )
type = MAP__VARIABLE ;
else
type = MAP__FUNCTION ;
2014-07-22 17:17:53 +04:00
map = map__new ( machine , event - > mmap . start ,
2012-10-06 23:26:02 +04:00
event - > mmap . len , event - > mmap . pgoff ,
2014-05-19 23:13:49 +04:00
event - > mmap . pid , 0 , 0 , 0 , 0 , 0 , 0 ,
2013-08-21 14:10:25 +04:00
event - > mmap . filename ,
2014-07-22 17:18:00 +04:00
type , thread ) ;
2013-01-24 19:10:40 +04:00
2012-10-06 23:26:02 +04:00
if ( map = = NULL )
goto out_problem ;
thread__insert_map ( thread , map ) ;
return 0 ;
out_problem :
dump_printf ( " problem processing PERF_RECORD_MMAP, skipping event. \n " ) ;
return 0 ;
}
2013-08-14 18:49:27 +04:00
static void machine__remove_thread ( struct machine * machine , struct thread * th )
{
machine - > last_match = NULL ;
rb_erase ( & th - > rb_node , & machine - > threads ) ;
/*
* We may have references to this thread , for instance in some hist_entry
* instances , so just move them to a separate list .
*/
list_add_tail ( & th - > node , & machine - > dead_threads ) ;
}
2013-09-11 18:18:24 +04:00
int machine__process_fork_event ( struct machine * machine , union perf_event * event ,
struct perf_sample * sample )
2012-10-06 23:26:02 +04:00
{
2014-03-14 18:00:03 +04:00
struct thread * thread = machine__find_thread ( machine ,
event - > fork . pid ,
event - > fork . tid ) ;
2013-08-27 12:23:03 +04:00
struct thread * parent = machine__findnew_thread ( machine ,
event - > fork . ppid ,
event - > fork . ptid ) ;
2012-10-06 23:26:02 +04:00
2013-08-14 18:49:27 +04:00
/* if a thread currently exists for the thread id remove it */
if ( thread ! = NULL )
machine__remove_thread ( machine , thread ) ;
2013-08-27 12:23:03 +04:00
thread = machine__findnew_thread ( machine , event - > fork . pid ,
event - > fork . tid ) ;
2012-10-06 23:26:02 +04:00
if ( dump_trace )
perf_event__fprintf_task ( event , stdout ) ;
if ( thread = = NULL | | parent = = NULL | |
2013-09-11 18:18:24 +04:00
thread__fork ( thread , parent , sample - > time ) < 0 ) {
2012-10-06 23:26:02 +04:00
dump_printf ( " problem processing PERF_RECORD_FORK, skipping event. \n " ) ;
return - 1 ;
}
return 0 ;
}
2013-09-11 18:18:24 +04:00
int machine__process_exit_event ( struct machine * machine , union perf_event * event ,
struct perf_sample * sample __maybe_unused )
2012-10-06 23:26:02 +04:00
{
2014-03-14 18:00:03 +04:00
struct thread * thread = machine__find_thread ( machine ,
event - > fork . pid ,
event - > fork . tid ) ;
2012-10-06 23:26:02 +04:00
if ( dump_trace )
perf_event__fprintf_task ( event , stdout ) ;
if ( thread ! = NULL )
2013-08-14 18:49:27 +04:00
thread__exited ( thread ) ;
2012-10-06 23:26:02 +04:00
return 0 ;
}
2013-09-11 18:18:24 +04:00
int machine__process_event ( struct machine * machine , union perf_event * event ,
struct perf_sample * sample )
2012-10-06 23:26:02 +04:00
{
int ret ;
switch ( event - > header . type ) {
case PERF_RECORD_COMM :
2013-09-11 18:18:24 +04:00
ret = machine__process_comm_event ( machine , event , sample ) ; break ;
2012-10-06 23:26:02 +04:00
case PERF_RECORD_MMAP :
2013-09-11 18:18:24 +04:00
ret = machine__process_mmap_event ( machine , event , sample ) ; break ;
2013-08-21 14:10:25 +04:00
case PERF_RECORD_MMAP2 :
2013-09-11 18:18:24 +04:00
ret = machine__process_mmap2_event ( machine , event , sample ) ; break ;
2012-10-06 23:26:02 +04:00
case PERF_RECORD_FORK :
2013-09-11 18:18:24 +04:00
ret = machine__process_fork_event ( machine , event , sample ) ; break ;
2012-10-06 23:26:02 +04:00
case PERF_RECORD_EXIT :
2013-09-11 18:18:24 +04:00
ret = machine__process_exit_event ( machine , event , sample ) ; break ;
2012-10-06 23:26:02 +04:00
case PERF_RECORD_LOST :
2013-09-11 18:18:24 +04:00
ret = machine__process_lost_event ( machine , event , sample ) ; break ;
2012-10-06 23:26:02 +04:00
default :
ret = - 1 ;
break ;
}
return ret ;
}
2012-12-08 00:39:39 +04:00
2012-12-07 09:48:05 +04:00
static bool symbol__match_regex ( struct symbol * sym , regex_t * regex )
2012-12-08 00:39:39 +04:00
{
2012-12-07 09:48:05 +04:00
if ( sym - > name & & ! regexec ( regex , sym - > name , 0 , NULL , 0 ) )
2012-12-08 00:39:39 +04:00
return 1 ;
return 0 ;
}
2014-10-23 19:50:25 +04:00
static void ip__resolve_ams ( struct thread * thread ,
2012-12-08 00:39:39 +04:00
struct addr_map_symbol * ams ,
u64 ip )
{
struct addr_location al ;
memset ( & al , 0 , sizeof ( al ) ) ;
2014-03-11 23:16:49 +04:00
/*
* We cannot use the header . misc hint to determine whether a
* branch stack address is user , kernel , guest , hypervisor .
* Branches may straddle the kernel / user / hypervisor boundaries .
* Thus , we have to try consecutively until we find a match
* or else , the symbol is unknown
*/
2014-10-23 19:50:25 +04:00
thread__find_cpumode_addr_location ( thread , MAP__FUNCTION , ip , & al ) ;
2012-12-08 00:39:39 +04:00
ams - > addr = ip ;
ams - > al_addr = al . addr ;
ams - > sym = al . sym ;
ams - > map = al . map ;
}
2014-10-23 19:50:25 +04:00
static void ip__resolve_data ( struct thread * thread ,
2013-01-24 19:10:35 +04:00
u8 m , struct addr_map_symbol * ams , u64 addr )
{
struct addr_location al ;
memset ( & al , 0 , sizeof ( al ) ) ;
2014-10-23 19:50:25 +04:00
thread__find_addr_location ( thread , m , MAP__VARIABLE , addr , & al ) ;
2014-08-21 07:25:11 +04:00
if ( al . map = = NULL ) {
/*
* some shared data regions have execute bit set which puts
* their mapping in the MAP__FUNCTION type array .
* Check there as a fallback option before dropping the sample .
*/
2014-10-23 19:50:25 +04:00
thread__find_addr_location ( thread , m , MAP__FUNCTION , addr , & al ) ;
2014-08-21 07:25:11 +04:00
}
2013-01-24 19:10:35 +04:00
ams - > addr = addr ;
ams - > al_addr = al . addr ;
ams - > sym = al . sym ;
ams - > map = al . map ;
}
2014-01-22 20:05:06 +04:00
struct mem_info * sample__resolve_mem ( struct perf_sample * sample ,
struct addr_location * al )
2013-01-24 19:10:35 +04:00
{
struct mem_info * mi = zalloc ( sizeof ( * mi ) ) ;
if ( ! mi )
return NULL ;
2014-10-23 19:50:25 +04:00
ip__resolve_ams ( al - > thread , & mi - > iaddr , sample - > ip ) ;
ip__resolve_data ( al - > thread , al - > cpumode , & mi - > daddr , sample - > addr ) ;
2013-01-24 19:10:35 +04:00
mi - > data_src . val = sample - > data_src ;
return mi ;
}
2014-01-22 20:15:36 +04:00
struct branch_info * sample__resolve_bstack ( struct perf_sample * sample ,
struct addr_location * al )
2012-12-08 00:39:39 +04:00
{
unsigned int i ;
2014-01-22 20:15:36 +04:00
const struct branch_stack * bs = sample - > branch_stack ;
struct branch_info * bi = calloc ( bs - > nr , sizeof ( struct branch_info ) ) ;
2012-12-08 00:39:39 +04:00
if ( ! bi )
return NULL ;
for ( i = 0 ; i < bs - > nr ; i + + ) {
2014-10-23 19:50:25 +04:00
ip__resolve_ams ( al - > thread , & bi [ i ] . to , bs - > entries [ i ] . to ) ;
ip__resolve_ams ( al - > thread , & bi [ i ] . from , bs - > entries [ i ] . from ) ;
2012-12-08 00:39:39 +04:00
bi [ i ] . flags = bs - > entries [ i ] . flags ;
}
return bi ;
}
2014-10-23 19:50:25 +04:00
static int thread__resolve_callchain_sample ( struct thread * thread ,
2012-12-08 00:39:39 +04:00
struct ip_callchain * chain ,
2012-12-07 09:48:05 +04:00
struct symbol * * parent ,
perf report: Add --max-stack option to limit callchain stack scan
When callgraph data was included in the perf data file, it may take a
long time to scan all those data and merge them together especially if
the stored callchains are long and the perf data file itself is large,
like a Gbyte or so.
The callchain stack is currently limited to PERF_MAX_STACK_DEPTH (127).
This is a large value. Usually the callgraph data that developers are
most interested in are the first few levels, the rests are usually not
looked at.
This patch adds a new --max-stack option to perf-report to limit the
depth of callchain stack data to look at to reduce the time it takes for
perf-report to finish its processing. It trades the presence of trailing
stack information with faster speed.
The following table shows the elapsed time of doing perf-report on a
perf.data file of size 985,531,828 bytes.
--max_stack Elapsed Time Output data size
----------- ------------ ----------------
not set 88.0s 124,422,651
64 87.5s 116,303,213
32 87.2s 112,023,804
16 86.6s 94,326,380
8 59.9s 33,697,248
4 40.7s 10,116,637
-g none 27.1s 2,555,810
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1382107129-2010-4-git-send-email-Waiman.Long@hp.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 18:38:48 +04:00
struct addr_location * root_al ,
int max_stack )
2012-12-08 00:39:39 +04:00
{
u8 cpumode = PERF_RECORD_MISC_USER ;
perf report: Add --max-stack option to limit callchain stack scan
When callgraph data was included in the perf data file, it may take a
long time to scan all those data and merge them together especially if
the stored callchains are long and the perf data file itself is large,
like a Gbyte or so.
The callchain stack is currently limited to PERF_MAX_STACK_DEPTH (127).
This is a large value. Usually the callgraph data that developers are
most interested in are the first few levels, the rests are usually not
looked at.
This patch adds a new --max-stack option to perf-report to limit the
depth of callchain stack data to look at to reduce the time it takes for
perf-report to finish its processing. It trades the presence of trailing
stack information with faster speed.
The following table shows the elapsed time of doing perf-report on a
perf.data file of size 985,531,828 bytes.
--max_stack Elapsed Time Output data size
----------- ------------ ----------------
not set 88.0s 124,422,651
64 87.5s 116,303,213
32 87.2s 112,023,804
16 86.6s 94,326,380
8 59.9s 33,697,248
4 40.7s 10,116,637
-g none 27.1s 2,555,810
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1382107129-2010-4-git-send-email-Waiman.Long@hp.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 18:38:48 +04:00
int chain_nr = min ( max_stack , ( int ) chain - > nr ) ;
int i ;
2014-06-25 19:49:03 +04:00
int j ;
2012-12-08 00:39:39 +04:00
int err ;
2014-06-25 19:49:03 +04:00
int skip_idx __maybe_unused ;
2012-12-08 00:39:39 +04:00
callchain_cursor_reset ( & callchain_cursor ) ;
if ( chain - > nr > PERF_MAX_STACK_DEPTH ) {
pr_warning ( " corrupted callchain. skipping... \n " ) ;
return 0 ;
}
2014-06-25 19:49:03 +04:00
/*
* Based on DWARF debug information , some architectures skip
* a callchain entry saved by the kernel .
*/
2014-10-23 19:50:25 +04:00
skip_idx = arch_skip_callchain_idx ( thread , chain ) ;
2014-06-25 19:49:03 +04:00
perf report: Add --max-stack option to limit callchain stack scan
When callgraph data was included in the perf data file, it may take a
long time to scan all those data and merge them together especially if
the stored callchains are long and the perf data file itself is large,
like a Gbyte or so.
The callchain stack is currently limited to PERF_MAX_STACK_DEPTH (127).
This is a large value. Usually the callgraph data that developers are
most interested in are the first few levels, the rests are usually not
looked at.
This patch adds a new --max-stack option to perf-report to limit the
depth of callchain stack data to look at to reduce the time it takes for
perf-report to finish its processing. It trades the presence of trailing
stack information with faster speed.
The following table shows the elapsed time of doing perf-report on a
perf.data file of size 985,531,828 bytes.
--max_stack Elapsed Time Output data size
----------- ------------ ----------------
not set 88.0s 124,422,651
64 87.5s 116,303,213
32 87.2s 112,023,804
16 86.6s 94,326,380
8 59.9s 33,697,248
4 40.7s 10,116,637
-g none 27.1s 2,555,810
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1382107129-2010-4-git-send-email-Waiman.Long@hp.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 18:38:48 +04:00
for ( i = 0 ; i < chain_nr ; i + + ) {
2012-12-08 00:39:39 +04:00
u64 ip ;
struct addr_location al ;
if ( callchain_param . order = = ORDER_CALLEE )
2014-06-25 19:49:03 +04:00
j = i ;
2012-12-08 00:39:39 +04:00
else
2014-06-25 19:49:03 +04:00
j = chain - > nr - i - 1 ;
# ifdef HAVE_SKIP_CALLCHAIN_IDX
if ( j = = skip_idx )
continue ;
# endif
ip = chain - > ips [ j ] ;
2012-12-08 00:39:39 +04:00
if ( ip > = PERF_CONTEXT_MAX ) {
switch ( ip ) {
case PERF_CONTEXT_HV :
cpumode = PERF_RECORD_MISC_HYPERVISOR ;
break ;
case PERF_CONTEXT_KERNEL :
cpumode = PERF_RECORD_MISC_KERNEL ;
break ;
case PERF_CONTEXT_USER :
cpumode = PERF_RECORD_MISC_USER ;
break ;
default :
pr_debug ( " invalid callchain context: "
" % " PRId64 " \n " , ( s64 ) ip ) ;
/*
* It seems the callchain is corrupted .
* Discard all .
*/
callchain_cursor_reset ( & callchain_cursor ) ;
return 0 ;
}
continue ;
}
2014-03-17 23:59:21 +04:00
al . filtered = 0 ;
2014-10-23 19:50:25 +04:00
thread__find_addr_location ( thread , cpumode ,
2013-08-08 15:32:26 +04:00
MAP__FUNCTION , ip , & al ) ;
2012-12-08 00:39:39 +04:00
if ( al . sym ! = NULL ) {
if ( sort__has_parent & & ! * parent & &
2012-12-07 09:48:05 +04:00
symbol__match_regex ( al . sym , & parent_regex ) )
2012-12-08 00:39:39 +04:00
* parent = al . sym ;
2012-12-07 09:48:05 +04:00
else if ( have_ignore_callees & & root_al & &
symbol__match_regex ( al . sym , & ignore_callees_regex ) ) {
/* Treat this symbol as the root,
forgetting its callees . */
* root_al = al ;
callchain_cursor_reset ( & callchain_cursor ) ;
}
2012-12-08 00:39:39 +04:00
}
err = callchain_cursor_append ( & callchain_cursor ,
ip , al . map , al . sym ) ;
if ( err )
return err ;
}
return 0 ;
}
static int unwind_entry ( struct unwind_entry * entry , void * arg )
{
struct callchain_cursor * cursor = arg ;
return callchain_cursor_append ( cursor , entry - > ip ,
entry - > map , entry - > sym ) ;
}
2014-10-23 22:26:17 +04:00
int thread__resolve_callchain ( struct thread * thread ,
struct perf_evsel * evsel ,
struct perf_sample * sample ,
struct symbol * * parent ,
struct addr_location * root_al ,
int max_stack )
2012-12-08 00:39:39 +04:00
{
2014-10-23 19:50:25 +04:00
int ret = thread__resolve_callchain_sample ( thread , sample - > callchain ,
parent , root_al , max_stack ) ;
2012-12-08 00:39:39 +04:00
if ( ret )
return ret ;
/* Can we do dwarf post unwind? */
if ( ! ( ( evsel - > attr . sample_type & PERF_SAMPLE_REGS_USER ) & &
( evsel - > attr . sample_type & PERF_SAMPLE_STACK_USER ) ) )
return 0 ;
/* Bail out if nothing was captured. */
if ( ( ! sample - > user_regs . regs ) | |
( ! sample - > user_stack . size ) )
return 0 ;
2014-10-23 23:42:19 +04:00
return unwind__get_entries ( unwind_entry , & callchain_cursor ,
2014-01-07 16:47:25 +04:00
thread , sample , max_stack ) ;
2012-12-08 00:39:39 +04:00
}
2013-09-28 23:12:58 +04:00
int machine__for_each_thread ( struct machine * machine ,
int ( * fn ) ( struct thread * thread , void * p ) ,
void * priv )
{
struct rb_node * nd ;
struct thread * thread ;
int rc = 0 ;
for ( nd = rb_first ( & machine - > threads ) ; nd ; nd = rb_next ( nd ) ) {
thread = rb_entry ( nd , struct thread , rb_node ) ;
rc = fn ( thread , priv ) ;
if ( rc ! = 0 )
return rc ;
}
list_for_each_entry ( thread , & machine - > dead_threads , node ) {
rc = fn ( thread , priv ) ;
if ( rc ! = 0 )
return rc ;
}
return rc ;
}
2013-11-11 18:28:02 +04:00
2013-11-11 18:36:12 +04:00
int __machine__synthesize_threads ( struct machine * machine , struct perf_tool * tool ,
2013-11-12 23:46:16 +04:00
struct target * target , struct thread_map * threads ,
2013-11-11 18:36:12 +04:00
perf_event__handler_t process , bool data_mmap )
2013-11-11 18:28:02 +04:00
{
2013-11-12 23:46:16 +04:00
if ( target__has_task ( target ) )
2013-11-11 18:28:02 +04:00
return perf_event__synthesize_thread_map ( tool , threads , process , machine , data_mmap ) ;
2013-11-12 23:46:16 +04:00
else if ( target__has_cpu ( target ) )
2013-11-11 18:28:02 +04:00
return perf_event__synthesize_threads ( tool , process , machine , data_mmap ) ;
/* command specified */
return 0 ;
}
2014-07-22 17:17:25 +04:00
pid_t machine__get_current_tid ( struct machine * machine , int cpu )
{
if ( cpu < 0 | | cpu > = MAX_NR_CPUS | | ! machine - > current_tid )
return - 1 ;
return machine - > current_tid [ cpu ] ;
}
int machine__set_current_tid ( struct machine * machine , int cpu , pid_t pid ,
pid_t tid )
{
struct thread * thread ;
if ( cpu < 0 )
return - EINVAL ;
if ( ! machine - > current_tid ) {
int i ;
machine - > current_tid = calloc ( MAX_NR_CPUS , sizeof ( pid_t ) ) ;
if ( ! machine - > current_tid )
return - ENOMEM ;
for ( i = 0 ; i < MAX_NR_CPUS ; i + + )
machine - > current_tid [ i ] = - 1 ;
}
if ( cpu > = MAX_NR_CPUS ) {
pr_err ( " Requested CPU %d too large. " , cpu ) ;
pr_err ( " Consider raising MAX_NR_CPUS \n " ) ;
return - EINVAL ;
}
machine - > current_tid [ cpu ] = tid ;
thread = machine__findnew_thread ( machine , pid , tid ) ;
if ( ! thread )
return - ENOMEM ;
thread - > cpu = cpu ;
return 0 ;
}
2014-08-15 23:08:39 +04:00
int machine__get_kernel_start ( struct machine * machine )
{
struct map * map = machine__kernel_map ( machine , MAP__FUNCTION ) ;
int err = 0 ;
/*
* The only addresses above 2 ^ 63 are kernel addresses of a 64 - bit
* kernel . Note that addresses are unsigned so that on a 32 - bit system
* all addresses including kernel addresses are less than 2 ^ 32. In
* that case ( 32 - bit system ) , if the kernel mapping is unknown , all
* addresses will be assumed to be in user space - see
* machine__kernel_ip ( ) .
*/
machine - > kernel_start = 1ULL < < 63 ;
if ( map ) {
err = map__load ( map , machine - > symbol_filter ) ;
if ( map - > start )
machine - > kernel_start = map - > start ;
}
return err ;
}