16c66bc167
Add a new thread that takes care of the hist creating to alleviate the main reader thread so it can keep perf mmaps served in time so that we reduce the possibility of losing events. The 'perf top' command now spawns 2 extra threads, the data processing is the following: 1) The main thread reads the data from mmaps and queues them to ordered events object; 2) The processing threads takes the data from the ordered events object and create initial histogram; 3) The GUI thread periodically sorts the initial histogram and presents it. Passing the data between threads 1 and 2 is done by having 2 ordered events queues. One is always being stored by thread 1 while the other is flushed out in thread 2. Passing the data between threads 2 and 3 stays the same as was initially for threads 1 and 3. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Namhyung Kim <namhyung@kernel.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-hhf4hllgkmle9wl1aly1jli0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
75 lines
1.8 KiB
C
75 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ORDERED_EVENTS_H
|
|
#define __ORDERED_EVENTS_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
struct perf_sample;
|
|
|
|
struct ordered_event {
|
|
u64 timestamp;
|
|
u64 file_offset;
|
|
union perf_event *event;
|
|
struct list_head list;
|
|
};
|
|
|
|
enum oe_flush {
|
|
OE_FLUSH__NONE,
|
|
OE_FLUSH__FINAL,
|
|
OE_FLUSH__ROUND,
|
|
OE_FLUSH__HALF,
|
|
OE_FLUSH__TOP,
|
|
};
|
|
|
|
struct ordered_events;
|
|
|
|
typedef int (*ordered_events__deliver_t)(struct ordered_events *oe,
|
|
struct ordered_event *event);
|
|
|
|
struct ordered_events_buffer {
|
|
struct list_head list;
|
|
struct ordered_event event[0];
|
|
};
|
|
|
|
struct ordered_events {
|
|
u64 last_flush;
|
|
u64 next_flush;
|
|
u64 max_timestamp;
|
|
u64 max_alloc_size;
|
|
u64 cur_alloc_size;
|
|
struct list_head events;
|
|
struct list_head cache;
|
|
struct list_head to_free;
|
|
struct ordered_events_buffer *buffer;
|
|
struct ordered_event *last;
|
|
ordered_events__deliver_t deliver;
|
|
int buffer_idx;
|
|
unsigned int nr_events;
|
|
enum oe_flush last_flush_type;
|
|
u32 nr_unordered_events;
|
|
bool copy_on_queue;
|
|
void *data;
|
|
};
|
|
|
|
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
|
|
u64 timestamp, u64 file_offset);
|
|
void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
|
|
int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
|
|
void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
|
|
void *data);
|
|
void ordered_events__free(struct ordered_events *oe);
|
|
void ordered_events__reinit(struct ordered_events *oe);
|
|
|
|
static inline
|
|
void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size)
|
|
{
|
|
oe->max_alloc_size = size;
|
|
}
|
|
|
|
static inline
|
|
void ordered_events__set_copy_on_queue(struct ordered_events *oe, bool copy)
|
|
{
|
|
oe->copy_on_queue = copy;
|
|
}
|
|
#endif /* __ORDERED_EVENTS_H */
|