2014-08-01 20:02:58 +04:00
# include <linux/list.h>
2014-07-11 16:49:54 +04:00
# include <linux/compiler.h>
2014-10-03 18:40:11 +04:00
# include <linux/string.h>
2014-08-01 20:02:58 +04:00
# include "ordered-events.h"
# include "evlist.h"
# include "session.h"
# include "asm/bug.h"
# include "debug.h"
2014-07-11 16:49:54 +04:00
# define pr_N(n, fmt, ...) \
eprintf ( n , debug_ordered_events , fmt , # # __VA_ARGS__ )
# define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
2014-08-01 20:02:58 +04:00
static void queue_event ( struct ordered_events * oe , struct ordered_event * new )
{
struct ordered_event * last = oe - > last ;
u64 timestamp = new - > timestamp ;
struct list_head * p ;
+ + oe - > nr_events ;
oe - > last = new ;
2014-07-11 16:49:54 +04:00
pr_oe_time2 ( timestamp , " queue_event nr_events %u \n " , oe - > nr_events ) ;
2014-08-01 20:02:58 +04:00
if ( ! last ) {
list_add ( & new - > list , & oe - > events ) ;
oe - > max_timestamp = timestamp ;
return ;
}
/*
* last event might point to some random place in the list as it ' s
* the last queued event . We expect that the new event is close to
* this .
*/
if ( last - > timestamp < = timestamp ) {
while ( last - > timestamp < = timestamp ) {
p = last - > list . next ;
if ( p = = & oe - > events ) {
list_add_tail ( & new - > list , & oe - > events ) ;
oe - > max_timestamp = timestamp ;
return ;
}
last = list_entry ( p , struct ordered_event , list ) ;
}
list_add_tail ( & new - > list , & last - > list ) ;
} else {
while ( last - > timestamp > timestamp ) {
p = last - > list . prev ;
if ( p = = & oe - > events ) {
list_add ( & new - > list , & oe - > events ) ;
return ;
}
last = list_entry ( p , struct ordered_event , list ) ;
}
list_add ( & new - > list , & last - > list ) ;
}
}
2014-10-03 18:40:11 +04:00
static union perf_event * __dup_event ( struct ordered_events * oe ,
union perf_event * event )
{
union perf_event * new_event = NULL ;
if ( oe - > cur_alloc_size < oe - > max_alloc_size ) {
new_event = memdup ( event , event - > header . size ) ;
if ( new_event )
oe - > cur_alloc_size + = event - > header . size ;
}
return new_event ;
}
static union perf_event * dup_event ( struct ordered_events * oe ,
union perf_event * event )
{
return oe - > copy_on_queue ? __dup_event ( oe , event ) : event ;
}
static void free_dup_event ( struct ordered_events * oe , union perf_event * event )
{
if ( oe - > copy_on_queue ) {
oe - > cur_alloc_size - = event - > header . size ;
free ( event ) ;
}
}
2014-08-01 20:02:58 +04:00
# define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
2014-10-03 18:40:11 +04:00
static struct ordered_event * alloc_event ( struct ordered_events * oe ,
union perf_event * event )
2014-08-01 20:02:58 +04:00
{
struct list_head * cache = & oe - > cache ;
struct ordered_event * new = NULL ;
2014-10-03 18:40:11 +04:00
union perf_event * new_event ;
new_event = dup_event ( oe , event ) ;
if ( ! new_event )
return NULL ;
2014-08-01 20:02:58 +04:00
if ( ! list_empty ( cache ) ) {
new = list_entry ( cache - > next , struct ordered_event , list ) ;
list_del ( & new - > list ) ;
} else if ( oe - > buffer ) {
new = oe - > buffer + oe - > buffer_idx ;
if ( + + oe - > buffer_idx = = MAX_SAMPLE_BUFFER )
oe - > buffer = NULL ;
} else if ( oe - > cur_alloc_size < oe - > max_alloc_size ) {
size_t size = MAX_SAMPLE_BUFFER * sizeof ( * new ) ;
oe - > buffer = malloc ( size ) ;
2014-10-03 18:40:11 +04:00
if ( ! oe - > buffer ) {
free_dup_event ( oe , new_event ) ;
2014-08-01 20:02:58 +04:00
return NULL ;
2014-10-03 18:40:11 +04:00
}
2014-08-01 20:02:58 +04:00
2014-07-11 16:49:54 +04:00
pr ( " alloc size % " PRIu64 " B (+%zu), max % " PRIu64 " B \n " ,
oe - > cur_alloc_size , size , oe - > max_alloc_size ) ;
2014-08-01 20:02:58 +04:00
oe - > cur_alloc_size + = size ;
list_add ( & oe - > buffer - > list , & oe - > to_free ) ;
/* First entry is abused to maintain the to_free list. */
oe - > buffer_idx = 2 ;
new = oe - > buffer + 1 ;
2014-07-11 16:49:54 +04:00
} else {
pr ( " allocation limit reached % " PRIu64 " B \n " , oe - > max_alloc_size ) ;
2014-08-01 20:02:58 +04:00
}
2014-10-03 18:40:11 +04:00
new - > event = new_event ;
2014-08-01 20:02:58 +04:00
return new ;
}
struct ordered_event *
2014-10-03 18:40:11 +04:00
ordered_events__new ( struct ordered_events * oe , u64 timestamp ,
union perf_event * event )
2014-08-01 20:02:58 +04:00
{
struct ordered_event * new ;
2014-10-03 18:40:11 +04:00
new = alloc_event ( oe , event ) ;
2014-08-01 20:02:58 +04:00
if ( new ) {
new - > timestamp = timestamp ;
queue_event ( oe , new ) ;
}
return new ;
}
void ordered_events__delete ( struct ordered_events * oe , struct ordered_event * event )
{
2014-06-15 21:46:08 +04:00
list_move ( & event - > list , & oe - > cache ) ;
2014-08-01 20:02:58 +04:00
oe - > nr_events - - ;
2014-10-03 18:40:11 +04:00
free_dup_event ( oe , event - > event ) ;
2014-08-01 20:02:58 +04:00
}
2015-03-03 17:48:12 +03:00
static int __ordered_events__flush ( struct ordered_events * oe ,
struct machines * machines ,
struct perf_evlist * evlist ,
2014-08-01 20:02:58 +04:00
struct perf_tool * tool )
{
struct list_head * head = & oe - > events ;
struct ordered_event * tmp , * iter ;
struct perf_sample sample ;
u64 limit = oe - > next_flush ;
u64 last_ts = oe - > last ? oe - > last - > timestamp : 0ULL ;
bool show_progress = limit = = ULLONG_MAX ;
struct ui_progress prog ;
int ret ;
2015-02-23 00:52:47 +03:00
if ( ! limit )
2014-08-01 20:02:58 +04:00
return 0 ;
if ( show_progress )
ui_progress__init ( & prog , oe - > nr_events , " Processing time ordered events... " ) ;
list_for_each_entry_safe ( iter , tmp , head , list ) {
if ( session_done ( ) )
return 0 ;
if ( iter - > timestamp > limit )
break ;
2015-03-03 17:48:12 +03:00
ret = perf_evlist__parse_sample ( evlist , iter - > event , & sample ) ;
2014-08-01 20:02:58 +04:00
if ( ret )
pr_err ( " Can't parse sample, err = %d \n " , ret ) ;
else {
2015-03-03 17:48:12 +03:00
ret = machines__deliver_event ( machines , evlist , iter - > event ,
& sample , tool , iter - > file_offset ) ;
2014-08-01 20:02:58 +04:00
if ( ret )
return ret ;
}
ordered_events__delete ( oe , iter ) ;
oe - > last_flush = iter - > timestamp ;
if ( show_progress )
ui_progress__update ( & prog , 1 ) ;
}
if ( list_empty ( head ) )
oe - > last = NULL ;
else if ( last_ts < = limit )
oe - > last = list_entry ( head - > prev , struct ordered_event , list ) ;
return 0 ;
}
2015-03-03 17:48:12 +03:00
int ordered_events__flush ( struct ordered_events * oe , struct machines * machines ,
struct perf_evlist * evlist , struct perf_tool * tool ,
2014-08-01 20:02:58 +04:00
enum oe_flush how )
{
2014-07-11 16:49:54 +04:00
static const char * const str [ ] = {
2014-06-12 11:50:11 +04:00
" NONE " ,
2014-07-11 16:49:54 +04:00
" FINAL " ,
" ROUND " ,
" HALF " ,
} ;
2014-08-01 20:02:58 +04:00
int err ;
2015-02-23 00:52:47 +03:00
if ( oe - > nr_events = = 0 )
return 0 ;
2014-08-01 20:02:58 +04:00
switch ( how ) {
case OE_FLUSH__FINAL :
oe - > next_flush = ULLONG_MAX ;
break ;
case OE_FLUSH__HALF :
{
struct ordered_event * first , * last ;
struct list_head * head = & oe - > events ;
first = list_entry ( head - > next , struct ordered_event , list ) ;
last = oe - > last ;
/* Warn if we are called before any event got allocated. */
if ( WARN_ONCE ( ! last | | list_empty ( head ) , " empty queue " ) )
return 0 ;
oe - > next_flush = first - > timestamp ;
oe - > next_flush + = ( last - > timestamp - first - > timestamp ) / 2 ;
break ;
}
case OE_FLUSH__ROUND :
2014-06-12 11:50:11 +04:00
case OE_FLUSH__NONE :
2014-08-01 20:02:58 +04:00
default :
break ;
} ;
2014-07-11 16:49:54 +04:00
pr_oe_time ( oe - > next_flush , " next_flush - ordered_events__flush PRE %s, nr_events %u \n " ,
str [ how ] , oe - > nr_events ) ;
pr_oe_time ( oe - > max_timestamp , " max_timestamp \n " ) ;
2015-03-03 17:48:12 +03:00
err = __ordered_events__flush ( oe , machines , evlist , tool ) ;
2014-08-01 20:02:58 +04:00
if ( ! err ) {
if ( how = = OE_FLUSH__ROUND )
oe - > next_flush = oe - > max_timestamp ;
2014-06-12 11:50:11 +04:00
oe - > last_flush_type = how ;
2014-08-01 20:02:58 +04:00
}
2014-07-11 16:49:54 +04:00
pr_oe_time ( oe - > next_flush , " next_flush - ordered_events__flush POST %s, nr_events %u \n " ,
str [ how ] , oe - > nr_events ) ;
pr_oe_time ( oe - > last_flush , " last_flush \n " ) ;
2014-08-01 20:02:58 +04:00
return err ;
}
2014-06-11 00:47:40 +04:00
void ordered_events__init ( struct ordered_events * oe )
{
INIT_LIST_HEAD ( & oe - > events ) ;
INIT_LIST_HEAD ( & oe - > cache ) ;
INIT_LIST_HEAD ( & oe - > to_free ) ;
oe - > max_alloc_size = ( u64 ) - 1 ;
oe - > cur_alloc_size = 0 ;
}
2014-06-11 00:50:03 +04:00
void ordered_events__free ( struct ordered_events * oe )
{
while ( ! list_empty ( & oe - > to_free ) ) {
struct ordered_event * event ;
event = list_entry ( oe - > to_free . next , struct ordered_event , list ) ;
list_del ( & event - > list ) ;
2014-10-03 18:40:11 +04:00
free_dup_event ( oe , event - > event ) ;
2014-06-11 00:50:03 +04:00
free ( event ) ;
}
}