2015-09-02 09:56:43 +02:00
# include <api/fs/fs.h>
2012-11-10 01:46:43 +01:00
# include "evsel.h"
# include "tests.h"
# include "thread_map.h"
# include "cpumap.h"
# include "debug.h"
2015-06-14 10:19:26 +02:00
# include "stat.h"
2012-11-10 01:46:43 +01:00
2015-04-16 16:52:53 +03:00
int test__openat_syscall_event_on_all_cpus ( void )
2012-11-10 01:46:43 +01:00
{
int err = - 1 , fd , cpu ;
struct cpu_map * cpus ;
struct perf_evsel * evsel ;
2015-04-16 16:52:53 +03:00
unsigned int nr_openat_calls = 111 , i ;
2012-11-10 01:46:43 +01:00
cpu_set_t cpu_set ;
2012-12-10 15:11:43 -03:00
struct thread_map * threads = thread_map__new ( - 1 , getpid ( ) , UINT_MAX ) ;
2014-08-14 02:22:45 +00:00
char sbuf [ STRERR_BUFSIZE ] ;
2012-11-10 01:46:43 +01:00
if ( threads = = NULL ) {
pr_debug ( " thread_map__new \n " ) ;
return - 1 ;
}
cpus = cpu_map__new ( NULL ) ;
if ( cpus = = NULL ) {
pr_debug ( " cpu_map__new \n " ) ;
goto out_thread_map_delete ;
}
CPU_ZERO ( & cpu_set ) ;
2015-04-16 16:52:53 +03:00
evsel = perf_evsel__newtp ( " syscalls " , " sys_enter_openat " ) ;
2012-11-10 01:46:43 +01:00
if ( evsel = = NULL ) {
2015-09-02 09:56:43 +02:00
if ( tracefs__configured ( ) )
2015-02-02 14:35:07 -05:00
pr_debug ( " is tracefs mounted on /sys/kernel/tracing? \n " ) ;
2015-09-02 09:56:43 +02:00
else if ( debugfs__configured ( ) )
2015-02-02 14:35:07 -05:00
pr_debug ( " is debugfs mounted on /sys/kernel/debug? \n " ) ;
else
pr_debug ( " Neither tracefs or debugfs is enabled in this kernel \n " ) ;
2012-11-10 01:46:43 +01:00
goto out_thread_map_delete ;
}
if ( perf_evsel__open ( evsel , cpus , threads ) < 0 ) {
pr_debug ( " failed to open counter: %s, "
" tweak /proc/sys/kernel/perf_event_paranoid? \n " ,
2014-08-14 02:22:45 +00:00
strerror_r ( errno , sbuf , sizeof ( sbuf ) ) ) ;
2012-11-10 01:46:43 +01:00
goto out_evsel_delete ;
}
for ( cpu = 0 ; cpu < cpus - > nr ; + + cpu ) {
2015-04-16 16:52:53 +03:00
unsigned int ncalls = nr_openat_calls + cpu ;
2012-11-10 01:46:43 +01:00
/*
* XXX eventually lift this restriction in a way that
* keeps perf building on older glibc installations
* without CPU_ALLOC . 1024 cpus in 2010 still seems
* a reasonable upper limit tho : - )
*/
if ( cpus - > map [ cpu ] > = CPU_SETSIZE ) {
pr_debug ( " Ignoring CPU %d \n " , cpus - > map [ cpu ] ) ;
continue ;
}
CPU_SET ( cpus - > map [ cpu ] , & cpu_set ) ;
if ( sched_setaffinity ( 0 , sizeof ( cpu_set ) , & cpu_set ) < 0 ) {
pr_debug ( " sched_setaffinity() failed on CPU %d: %s " ,
cpus - > map [ cpu ] ,
2014-08-14 02:22:45 +00:00
strerror_r ( errno , sbuf , sizeof ( sbuf ) ) ) ;
2012-11-10 01:46:43 +01:00
goto out_close_fd ;
}
for ( i = 0 ; i < ncalls ; + + i ) {
2015-04-16 16:52:53 +03:00
fd = openat ( 0 , " /etc/passwd " , O_RDONLY ) ;
2012-11-10 01:46:43 +01:00
close ( fd ) ;
}
CPU_CLR ( cpus - > map [ cpu ] , & cpu_set ) ;
}
/*
* Here we need to explicitely preallocate the counts , as if
* we use the auto allocation it will allocate just for 1 cpu ,
* as we start by cpu 0.
*/
2015-06-26 11:29:11 +02:00
if ( perf_evsel__alloc_counts ( evsel , cpus - > nr , 1 ) < 0 ) {
2012-11-10 01:46:43 +01:00
pr_debug ( " perf_evsel__alloc_counts(ncpus=%d) \n " , cpus - > nr ) ;
goto out_close_fd ;
}
err = 0 ;
for ( cpu = 0 ; cpu < cpus - > nr ; + + cpu ) {
unsigned int expected ;
if ( cpus - > map [ cpu ] > = CPU_SETSIZE )
continue ;
if ( perf_evsel__read_on_cpu ( evsel , cpu , 0 ) < 0 ) {
pr_debug ( " perf_evsel__read_on_cpu \n " ) ;
err = - 1 ;
break ;
}
2015-04-16 16:52:53 +03:00
expected = nr_openat_calls + cpu ;
2015-06-26 11:29:11 +02:00
if ( perf_counts ( evsel - > counts , cpu , 0 ) - > val ! = expected ) {
2012-11-10 01:46:43 +01:00
pr_debug ( " perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got % " PRIu64 " \n " ,
2015-06-26 11:29:11 +02:00
expected , cpus - > map [ cpu ] , perf_counts ( evsel - > counts , cpu , 0 ) - > val ) ;
2012-11-10 01:46:43 +01:00
err = - 1 ;
}
}
2013-01-25 10:44:44 +09:00
perf_evsel__free_counts ( evsel ) ;
2012-11-10 01:46:43 +01:00
out_close_fd :
perf_evsel__close_fd ( evsel , 1 , threads - > nr ) ;
out_evsel_delete :
perf_evsel__delete ( evsel ) ;
out_thread_map_delete :
2015-06-23 00:36:05 +02:00
thread_map__put ( threads ) ;
2012-11-10 01:46:43 +01:00
return err ;
}