2022-07-29 13:07:55 -07:00
// SPDX-License-Identifier: GPL-2.0
# include "util/debug.h"
2022-07-29 13:07:56 -07:00
# include "util/evlist.h"
2022-07-29 13:07:55 -07:00
# include "util/machine.h"
# include "util/map.h"
# include "util/symbol.h"
2022-07-29 13:07:56 -07:00
# include "util/target.h"
# include "util/thread_map.h"
2022-07-29 13:07:55 -07:00
# include "util/lock-contention.h"
# include <linux/zalloc.h>
# include <bpf/bpf.h>
# include "bpf_skel/lock_contention.skel.h"
static struct lock_contention_bpf * skel ;
/* should be same as bpf_skel/lock_contention.bpf.c */
struct lock_contention_key {
2022-08-02 12:10:04 -07:00
s32 stack_id ;
2022-07-29 13:07:55 -07:00
} ;
struct lock_contention_data {
u64 total_time ;
u64 min_time ;
u64 max_time ;
u32 count ;
u32 flags ;
} ;
2022-08-02 12:10:02 -07:00
int lock_contention_prepare ( struct lock_contention * con )
2022-07-29 13:07:55 -07:00
{
2022-07-29 13:07:56 -07:00
int i , fd ;
int ncpus = 1 , ntasks = 1 ;
2022-08-02 12:10:02 -07:00
struct evlist * evlist = con - > evlist ;
struct target * target = con - > target ;
2022-07-29 13:07:56 -07:00
2022-07-29 13:07:55 -07:00
skel = lock_contention_bpf__open ( ) ;
if ( ! skel ) {
pr_err ( " Failed to open lock-contention BPF skeleton \n " ) ;
return - 1 ;
}
2022-08-02 12:10:03 -07:00
bpf_map__set_max_entries ( skel - > maps . stacks , con - > map_nr_entries ) ;
bpf_map__set_max_entries ( skel - > maps . lock_stat , con - > map_nr_entries ) ;
2022-07-29 13:07:56 -07:00
if ( target__has_cpu ( target ) )
ncpus = perf_cpu_map__nr ( evlist - > core . user_requested_cpus ) ;
if ( target__has_task ( target ) )
ntasks = perf_thread_map__nr ( evlist - > core . threads ) ;
bpf_map__set_max_entries ( skel - > maps . cpu_filter , ncpus ) ;
bpf_map__set_max_entries ( skel - > maps . task_filter , ntasks ) ;
2022-07-29 13:07:55 -07:00
if ( lock_contention_bpf__load ( skel ) < 0 ) {
pr_err ( " Failed to load lock-contention BPF skeleton \n " ) ;
return - 1 ;
}
2022-07-29 13:07:56 -07:00
if ( target__has_cpu ( target ) ) {
u32 cpu ;
u8 val = 1 ;
skel - > bss - > has_cpu = 1 ;
fd = bpf_map__fd ( skel - > maps . cpu_filter ) ;
for ( i = 0 ; i < ncpus ; i + + ) {
cpu = perf_cpu_map__cpu ( evlist - > core . user_requested_cpus , i ) . cpu ;
bpf_map_update_elem ( fd , & cpu , & val , BPF_ANY ) ;
}
}
if ( target__has_task ( target ) ) {
u32 pid ;
u8 val = 1 ;
skel - > bss - > has_task = 1 ;
fd = bpf_map__fd ( skel - > maps . task_filter ) ;
for ( i = 0 ; i < ntasks ; i + + ) {
pid = perf_thread_map__pid ( evlist - > core . threads , i ) ;
bpf_map_update_elem ( fd , & pid , & val , BPF_ANY ) ;
}
}
if ( target__none ( target ) & & evlist - > workload . pid > 0 ) {
u32 pid = evlist - > workload . pid ;
u8 val = 1 ;
skel - > bss - > has_task = 1 ;
fd = bpf_map__fd ( skel - > maps . task_filter ) ;
bpf_map_update_elem ( fd , & pid , & val , BPF_ANY ) ;
}
2022-07-29 13:07:55 -07:00
lock_contention_bpf__attach ( skel ) ;
return 0 ;
}
int lock_contention_start ( void )
{
skel - > bss - > enabled = 1 ;
return 0 ;
}
int lock_contention_stop ( void )
{
skel - > bss - > enabled = 0 ;
return 0 ;
}
2022-08-02 12:10:02 -07:00
int lock_contention_read ( struct lock_contention * con )
2022-07-29 13:07:55 -07:00
{
int fd , stack ;
2022-08-02 12:10:04 -07:00
s32 prev_key , key ;
2022-07-29 13:07:55 -07:00
struct lock_contention_data data ;
struct lock_stat * st ;
2022-08-02 12:10:02 -07:00
struct machine * machine = con - > machine ;
2022-07-29 13:07:55 -07:00
u64 stack_trace [ CONTENTION_STACK_DEPTH ] ;
fd = bpf_map__fd ( skel - > maps . lock_stat ) ;
stack = bpf_map__fd ( skel - > maps . stacks ) ;
2022-08-02 12:10:04 -07:00
con - > lost = skel - > bss - > lost ;
2022-07-29 13:07:55 -07:00
prev_key = 0 ;
while ( ! bpf_map_get_next_key ( fd , & prev_key , & key ) ) {
struct map * kmap ;
struct symbol * sym ;
int idx ;
bpf_map_lookup_elem ( fd , & key , & data ) ;
st = zalloc ( sizeof ( * st ) ) ;
if ( st = = NULL )
return - 1 ;
st - > nr_contended = data . count ;
st - > wait_time_total = data . total_time ;
st - > wait_time_max = data . max_time ;
st - > wait_time_min = data . min_time ;
if ( data . count )
st - > avg_wait_time = data . total_time / data . count ;
st - > flags = data . flags ;
bpf_map_lookup_elem ( stack , & key , stack_trace ) ;
/* skip BPF + lock internal functions */
idx = CONTENTION_STACK_SKIP ;
while ( is_lock_function ( machine , stack_trace [ idx ] ) & &
idx < CONTENTION_STACK_DEPTH - 1 )
idx + + ;
st - > addr = stack_trace [ idx ] ;
sym = machine__find_kernel_symbol ( machine , st - > addr , & kmap ) ;
if ( sym ) {
unsigned long offset ;
int ret = 0 ;
offset = kmap - > map_ip ( kmap , st - > addr ) - sym - > start ;
if ( offset )
ret = asprintf ( & st - > name , " %s+%#lx " , sym - > name , offset ) ;
else
st - > name = strdup ( sym - > name ) ;
if ( ret < 0 | | st - > name = = NULL )
return - 1 ;
} else if ( asprintf ( & st - > name , " %#lx " , ( unsigned long ) st - > addr ) < 0 ) {
free ( st ) ;
return - 1 ;
}
2022-08-02 12:10:02 -07:00
hlist_add_head ( & st - > hash_entry , con - > result ) ;
2022-07-29 13:07:55 -07:00
prev_key = key ;
}
return 0 ;
}
int lock_contention_finish ( void )
{
if ( skel ) {
skel - > bss - > enabled = 0 ;
lock_contention_bpf__destroy ( skel ) ;
}
return 0 ;
}