2019-01-27 15:44:29 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef __PERF_MAP_GROUPS_H
# define __PERF_MAP_GROUPS_H
# include <linux/refcount.h>
# include <linux/rbtree.h>
# include <stdio.h>
# include <stdbool.h>
# include <linux/types.h>
# include "rwsem.h"
struct ref_reloc_sym ;
struct machine ;
struct map ;
struct thread ;
struct maps {
struct rb_root entries ;
struct rw_semaphore lock ;
} ;
void maps__insert ( struct maps * maps , struct map * map ) ;
void maps__remove ( struct maps * maps , struct map * map ) ;
struct map * maps__find ( struct maps * maps , u64 addr ) ;
struct map * maps__first ( struct maps * maps ) ;
struct map * map__next ( struct map * map ) ;
2019-10-28 17:31:38 +03:00
# define maps__for_each_entry(maps, map) \
for ( map = maps__first ( maps ) ; map ; map = map__next ( map ) )
# define maps__for_each_entry_safe(maps, map, next) \
for ( map = maps__first ( maps ) , next = map__next ( map ) ; map ; map = next , next = map__next ( map ) )
2019-01-27 15:44:29 +03:00
struct symbol * maps__find_symbol_by_name ( struct maps * maps , const char * name , struct map * * mapp ) ;
struct map_groups {
struct maps maps ;
struct machine * machine ;
perf map_groups: Add a front end cache for map lookups by name
Lets see if it helps:
First look at the probeable lines for the function that does lookups by
name in a map_groups struct:
# perf probe -x ~/bin/perf -L map_groups__find_by_name
<map_groups__find_by_name@/home/acme/git/perf/tools/perf/util/symbol.c:0>
0 struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1 {
2 struct maps *maps = &mg->maps;
struct map *map;
5 down_read(&maps->lock);
7 if (mg->last_search_by_name && strcmp(mg->last_search_by_name->dso->short_name, name) == 0) {
8 map = mg->last_search_by_name;
9 goto out_unlock;
}
12 maps__for_each_entry(maps, map)
13 if (strcmp(map->dso->short_name, name) == 0) {
14 mg->last_search_by_name = map;
15 goto out_unlock;
}
18 map = NULL;
out_unlock:
21 up_read(&maps->lock);
22 return map;
23 }
int dso__load_vmlinux(struct dso *dso, struct map *map,
const char *vmlinux, bool vmlinux_allocated)
#
Now add a probe to the place where we reuse the last search:
# perf probe -x ~/bin/perf map_groups__find_by_name:8
Added new event:
probe_perf:map_groups__find_by_name (on map_groups__find_by_name:8 in /home/acme/bin/perf)
You can now use it in all perf tools, such as:
perf record -e probe_perf:map_groups__find_by_name -aR sleep 1
#
Now lets do a system wide 'perf stat' counting those events:
# perf stat -e probe_perf:*
Leave it running and lets do a 'perf top', then, after a while, stop the
'perf stat':
# perf stat -e probe_perf:*
^C
Performance counter stats for 'system wide':
3,603 probe_perf:map_groups__find_by_name
44.565253139 seconds time elapsed
#
yeah, good to have.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-tcz37g3nxv3tvxw3q90vga3p@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-11-13 22:33:33 +03:00
struct map * last_search_by_name ;
2019-01-27 15:44:29 +03:00
refcount_t refcnt ;
2019-08-15 13:01:45 +03:00
# ifdef HAVE_LIBUNWIND_SUPPORT
void * addr_space ;
struct unwind_libunwind_ops * unwind_libunwind_ops ;
# endif
2019-01-27 15:44:29 +03:00
} ;
# define KMAP_NAME_LEN 256
struct kmap {
struct ref_reloc_sym * ref_reloc_sym ;
struct map_groups * kmaps ;
char name [ KMAP_NAME_LEN ] ;
} ;
struct map_groups * map_groups__new ( struct machine * machine ) ;
void map_groups__delete ( struct map_groups * mg ) ;
bool map_groups__empty ( struct map_groups * mg ) ;
static inline struct map_groups * map_groups__get ( struct map_groups * mg )
{
if ( mg )
refcount_inc ( & mg - > refcnt ) ;
return mg ;
}
void map_groups__put ( struct map_groups * mg ) ;
void map_groups__init ( struct map_groups * mg , struct machine * machine ) ;
void map_groups__exit ( struct map_groups * mg ) ;
int map_groups__clone ( struct thread * thread , struct map_groups * parent ) ;
size_t map_groups__fprintf ( struct map_groups * mg , FILE * fp ) ;
void map_groups__insert ( struct map_groups * mg , struct map * map ) ;
perf map_groups: Add a front end cache for map lookups by name
Lets see if it helps:
First look at the probeable lines for the function that does lookups by
name in a map_groups struct:
# perf probe -x ~/bin/perf -L map_groups__find_by_name
<map_groups__find_by_name@/home/acme/git/perf/tools/perf/util/symbol.c:0>
0 struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1 {
2 struct maps *maps = &mg->maps;
struct map *map;
5 down_read(&maps->lock);
7 if (mg->last_search_by_name && strcmp(mg->last_search_by_name->dso->short_name, name) == 0) {
8 map = mg->last_search_by_name;
9 goto out_unlock;
}
12 maps__for_each_entry(maps, map)
13 if (strcmp(map->dso->short_name, name) == 0) {
14 mg->last_search_by_name = map;
15 goto out_unlock;
}
18 map = NULL;
out_unlock:
21 up_read(&maps->lock);
22 return map;
23 }
int dso__load_vmlinux(struct dso *dso, struct map *map,
const char *vmlinux, bool vmlinux_allocated)
#
Now add a probe to the place where we reuse the last search:
# perf probe -x ~/bin/perf map_groups__find_by_name:8
Added new event:
probe_perf:map_groups__find_by_name (on map_groups__find_by_name:8 in /home/acme/bin/perf)
You can now use it in all perf tools, such as:
perf record -e probe_perf:map_groups__find_by_name -aR sleep 1
#
Now lets do a system wide 'perf stat' counting those events:
# perf stat -e probe_perf:*
Leave it running and lets do a 'perf top', then, after a while, stop the
'perf stat':
# perf stat -e probe_perf:*
^C
Performance counter stats for 'system wide':
3,603 probe_perf:map_groups__find_by_name
44.565253139 seconds time elapsed
#
yeah, good to have.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-tcz37g3nxv3tvxw3q90vga3p@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-11-13 22:33:33 +03:00
void map_groups__remove ( struct map_groups * mg , struct map * map ) ;
2019-01-27 15:44:29 +03:00
static inline struct map * map_groups__find ( struct map_groups * mg , u64 addr )
{
return maps__find ( & mg - > maps , addr ) ;
}
2019-10-28 17:55:28 +03:00
# define map_groups__for_each_entry(mg, map) \
for ( map = maps__first ( & mg - > maps ) ; map ; map = map__next ( map ) )
2019-01-27 15:44:29 +03:00
2019-10-28 17:55:28 +03:00
# define map_groups__for_each_entry_safe(mg, map, next) \
for ( map = maps__first ( & mg - > maps ) , next = map__next ( map ) ; map ; map = next , next = map__next ( map ) )
2019-01-27 15:44:29 +03:00
struct symbol * map_groups__find_symbol ( struct map_groups * mg , u64 addr , struct map * * mapp ) ;
struct symbol * map_groups__find_symbol_by_name ( struct map_groups * mg , const char * name , struct map * * mapp ) ;
struct addr_map_symbol ;
2019-11-04 16:14:05 +03:00
int map_groups__find_ams ( struct map_groups * mg , struct addr_map_symbol * ams ) ;
2019-01-27 15:44:29 +03:00
int map_groups__fixup_overlappings ( struct map_groups * mg , struct map * map , FILE * fp ) ;
struct map * map_groups__find_by_name ( struct map_groups * mg , const char * name ) ;
2019-05-08 16:20:07 +03:00
int map_groups__merge_in ( struct map_groups * kmaps , struct map * new_map ) ;
2019-01-27 15:44:29 +03:00
# endif // __PERF_MAP_GROUPS_H