2011-01-12 03:30:02 +03:00
# include <poll.h>
2011-01-12 01:56:53 +03:00
# include "evlist.h"
# include "evsel.h"
# include "util.h"
2011-01-13 03:39:13 +03:00
# include <linux/bitops.h>
# include <linux/hash.h>
2011-01-12 01:56:53 +03:00
struct perf_evlist * perf_evlist__new ( void )
{
struct perf_evlist * evlist = zalloc ( sizeof ( * evlist ) ) ;
if ( evlist ! = NULL ) {
2011-01-13 03:39:13 +03:00
int i ;
for ( i = 0 ; i < PERF_EVLIST__HLIST_SIZE ; + + i )
INIT_HLIST_HEAD ( & evlist - > heads [ i ] ) ;
2011-01-12 01:56:53 +03:00
INIT_LIST_HEAD ( & evlist - > entries ) ;
}
return evlist ;
}
static void perf_evlist__purge ( struct perf_evlist * evlist )
{
struct perf_evsel * pos , * n ;
list_for_each_entry_safe ( pos , n , & evlist - > entries , node ) {
list_del_init ( & pos - > node ) ;
perf_evsel__delete ( pos ) ;
}
evlist - > nr_entries = 0 ;
}
void perf_evlist__delete ( struct perf_evlist * evlist )
{
perf_evlist__purge ( evlist ) ;
2011-01-13 03:39:13 +03:00
free ( evlist - > mmap ) ;
2011-01-12 03:30:02 +03:00
free ( evlist - > pollfd ) ;
2011-01-12 01:56:53 +03:00
free ( evlist ) ;
}
void perf_evlist__add ( struct perf_evlist * evlist , struct perf_evsel * entry )
{
list_add_tail ( & entry - > node , & evlist - > entries ) ;
+ + evlist - > nr_entries ;
}
int perf_evlist__add_default ( struct perf_evlist * evlist )
{
struct perf_event_attr attr = {
. type = PERF_TYPE_HARDWARE ,
. config = PERF_COUNT_HW_CPU_CYCLES ,
} ;
struct perf_evsel * evsel = perf_evsel__new ( & attr , 0 ) ;
if ( evsel = = NULL )
return - ENOMEM ;
perf_evlist__add ( evlist , evsel ) ;
return 0 ;
}
2011-01-12 03:30:02 +03:00
int perf_evlist__alloc_pollfd ( struct perf_evlist * evlist , int ncpus , int nthreads )
{
int nfds = ncpus * nthreads * evlist - > nr_entries ;
evlist - > pollfd = malloc ( sizeof ( struct pollfd ) * nfds ) ;
return evlist - > pollfd ! = NULL ? 0 : - ENOMEM ;
}
2011-01-12 22:03:24 +03:00
void perf_evlist__add_pollfd ( struct perf_evlist * evlist , int fd )
{
fcntl ( fd , F_SETFL , O_NONBLOCK ) ;
evlist - > pollfd [ evlist - > nr_fds ] . fd = fd ;
evlist - > pollfd [ evlist - > nr_fds ] . events = POLLIN ;
evlist - > nr_fds + + ;
}
2011-01-13 03:39:13 +03:00
struct perf_evsel * perf_evlist__id2evsel ( struct perf_evlist * evlist , u64 id )
{
struct hlist_head * head ;
struct hlist_node * pos ;
struct perf_sample_id * sid ;
int hash ;
if ( evlist - > nr_entries = = 1 )
return list_entry ( evlist - > entries . next , struct perf_evsel , node ) ;
hash = hash_64 ( id , PERF_EVLIST__HLIST_BITS ) ;
head = & evlist - > heads [ hash ] ;
hlist_for_each_entry ( sid , pos , head , node )
if ( sid - > id = = id )
return sid - > evsel ;
return NULL ;
}
2011-01-15 15:40:59 +03:00
event_t * perf_evlist__read_on_cpu ( struct perf_evlist * evlist , int cpu )
{
/* XXX Move this to perf.c, making it generally available */
unsigned int page_size = sysconf ( _SC_PAGE_SIZE ) ;
struct perf_mmap * md = & evlist - > mmap [ cpu ] ;
unsigned int head = perf_mmap__read_head ( md ) ;
unsigned int old = md - > prev ;
unsigned char * data = md - > base + page_size ;
event_t * event = NULL ;
int diff ;
/*
* If we ' re further behind than half the buffer , there ' s a chance
* the writer will bite our tail and mess up the samples under us .
*
* If we somehow ended up ahead of the head , we got messed up .
*
* In either case , truncate and restart at head .
*/
diff = head - old ;
if ( diff > md - > mask / 2 | | diff < 0 ) {
fprintf ( stderr , " WARNING: failed to keep up with mmap data. \n " ) ;
/*
* head points to a known good entry , start there .
*/
old = head ;
}
if ( old ! = head ) {
size_t size ;
event = ( event_t * ) & data [ old & md - > mask ] ;
size = event - > header . size ;
/*
* Event straddles the mmap boundary - - header should always
* be inside due to u64 alignment of output .
*/
if ( ( old & md - > mask ) + size ! = ( ( old + size ) & md - > mask ) ) {
unsigned int offset = old ;
unsigned int len = min ( sizeof ( * event ) , size ) , cpy ;
void * dst = & evlist - > event_copy ;
do {
cpy = min ( md - > mask + 1 - ( offset & md - > mask ) , len ) ;
memcpy ( dst , & data [ offset & md - > mask ] , cpy ) ;
offset + = cpy ;
dst + = cpy ;
len - = cpy ;
} while ( len ) ;
event = & evlist - > event_copy ;
}
old + = size ;
}
md - > prev = old ;
return event ;
}