tracing: Separate out trace events from global variables
The trace events for ftrace are all defined via global variables. The arrays of events and event systems are linked to a global list. This prevents multiple users of the event system (what to enable and what not to). By adding descriptors to represent the event/file relation, as well as to which trace_array descriptor they are associated with, allows for more than one set of events to be defined. Once the trace events files have a link between the trace event and the trace_array they are associated with, we can create multiple trace_arrays that can record separate events in separate buffers. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
613f04a0f5
commit
ae63b31e4d
@ -182,18 +182,20 @@ extern int ftrace_event_reg(struct ftrace_event_call *event,
|
|||||||
enum trace_reg type, void *data);
|
enum trace_reg type, void *data);
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TRACE_EVENT_FL_ENABLED_BIT,
|
|
||||||
TRACE_EVENT_FL_FILTERED_BIT,
|
TRACE_EVENT_FL_FILTERED_BIT,
|
||||||
TRACE_EVENT_FL_RECORDED_CMD_BIT,
|
|
||||||
TRACE_EVENT_FL_CAP_ANY_BIT,
|
TRACE_EVENT_FL_CAP_ANY_BIT,
|
||||||
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
|
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
|
||||||
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
|
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Event flags:
|
||||||
|
* FILTERED - The event has a filter attached
|
||||||
|
* CAP_ANY - Any user can enable for perf
|
||||||
|
* NO_SET_FILTER - Set when filter has error and is to be ignored
|
||||||
|
*/
|
||||||
enum {
|
enum {
|
||||||
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
|
|
||||||
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
|
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
|
||||||
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
|
|
||||||
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
|
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
|
||||||
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
|
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
|
||||||
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
|
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
|
||||||
@ -203,12 +205,44 @@ struct ftrace_event_call {
|
|||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct ftrace_event_class *class;
|
struct ftrace_event_class *class;
|
||||||
char *name;
|
char *name;
|
||||||
struct dentry *dir;
|
|
||||||
struct trace_event event;
|
struct trace_event event;
|
||||||
const char *print_fmt;
|
const char *print_fmt;
|
||||||
struct event_filter *filter;
|
struct event_filter *filter;
|
||||||
|
struct list_head *files;
|
||||||
void *mod;
|
void *mod;
|
||||||
void *data;
|
void *data;
|
||||||
|
int flags; /* static flags of different events */
|
||||||
|
|
||||||
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
|
int perf_refcount;
|
||||||
|
struct hlist_head __percpu *perf_events;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
struct trace_array;
|
||||||
|
struct ftrace_subsystem_dir;
|
||||||
|
|
||||||
|
enum {
|
||||||
|
FTRACE_EVENT_FL_ENABLED_BIT,
|
||||||
|
FTRACE_EVENT_FL_RECORDED_CMD_BIT,
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ftrace event file flags:
|
||||||
|
* ENABELD - The event is enabled
|
||||||
|
* RECORDED_CMD - The comms should be recorded at sched_switch
|
||||||
|
*/
|
||||||
|
enum {
|
||||||
|
FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
|
||||||
|
FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ftrace_event_file {
|
||||||
|
struct list_head list;
|
||||||
|
struct ftrace_event_call *event_call;
|
||||||
|
struct dentry *dir;
|
||||||
|
struct trace_array *tr;
|
||||||
|
struct ftrace_subsystem_dir *system;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 32 bit flags:
|
* 32 bit flags:
|
||||||
@ -223,17 +257,12 @@ struct ftrace_event_call {
|
|||||||
*
|
*
|
||||||
* Note: Reads of flags do not hold the event_mutex since
|
* Note: Reads of flags do not hold the event_mutex since
|
||||||
* they occur in critical sections. But the way flags
|
* they occur in critical sections. But the way flags
|
||||||
* is currently used, these changes do no affect the code
|
* is currently used, these changes do not affect the code
|
||||||
* except that when a change is made, it may have a slight
|
* except that when a change is made, it may have a slight
|
||||||
* delay in propagating the changes to other CPUs due to
|
* delay in propagating the changes to other CPUs due to
|
||||||
* caching and such.
|
* caching and such.
|
||||||
*/
|
*/
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
|
|
||||||
#ifdef CONFIG_PERF_EVENTS
|
|
||||||
int perf_refcount;
|
|
||||||
struct hlist_head __percpu *perf_events;
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define __TRACE_EVENT_FLAGS(name, value) \
|
#define __TRACE_EVENT_FLAGS(name, value) \
|
||||||
|
@ -518,7 +518,8 @@ static inline notrace int ftrace_get_offsets_##call( \
|
|||||||
static notrace void \
|
static notrace void \
|
||||||
ftrace_raw_event_##call(void *__data, proto) \
|
ftrace_raw_event_##call(void *__data, proto) \
|
||||||
{ \
|
{ \
|
||||||
struct ftrace_event_call *event_call = __data; \
|
struct ftrace_event_file *ftrace_file = __data; \
|
||||||
|
struct ftrace_event_call *event_call = ftrace_file->event_call; \
|
||||||
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
||||||
struct ring_buffer_event *event; \
|
struct ring_buffer_event *event; \
|
||||||
struct ftrace_raw_##call *entry; \
|
struct ftrace_raw_##call *entry; \
|
||||||
|
@ -189,6 +189,8 @@ unsigned long long ns2usecs(cycle_t nsec)
|
|||||||
*/
|
*/
|
||||||
static struct trace_array global_trace;
|
static struct trace_array global_trace;
|
||||||
|
|
||||||
|
LIST_HEAD(ftrace_trace_arrays);
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
|
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
|
||||||
|
|
||||||
int filter_current_check_discard(struct ring_buffer *buffer,
|
int filter_current_check_discard(struct ring_buffer *buffer,
|
||||||
@ -5359,6 +5361,12 @@ __init static int tracer_alloc_buffers(void)
|
|||||||
|
|
||||||
register_die_notifier(&trace_die_notifier);
|
register_die_notifier(&trace_die_notifier);
|
||||||
|
|
||||||
|
global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&global_trace.systems);
|
||||||
|
INIT_LIST_HEAD(&global_trace.events);
|
||||||
|
list_add(&global_trace.list, &ftrace_trace_arrays);
|
||||||
|
|
||||||
while (trace_boot_options) {
|
while (trace_boot_options) {
|
||||||
char *option;
|
char *option;
|
||||||
|
|
||||||
|
@ -158,13 +158,39 @@ struct trace_array_cpu {
|
|||||||
*/
|
*/
|
||||||
struct trace_array {
|
struct trace_array {
|
||||||
struct ring_buffer *buffer;
|
struct ring_buffer *buffer;
|
||||||
|
struct list_head list;
|
||||||
int cpu;
|
int cpu;
|
||||||
int buffer_disabled;
|
int buffer_disabled;
|
||||||
|
unsigned int flags;
|
||||||
cycle_t time_start;
|
cycle_t time_start;
|
||||||
|
struct dentry *dir;
|
||||||
|
struct dentry *event_dir;
|
||||||
|
struct list_head systems;
|
||||||
|
struct list_head events;
|
||||||
struct task_struct *waiter;
|
struct task_struct *waiter;
|
||||||
struct trace_array_cpu *data[NR_CPUS];
|
struct trace_array_cpu *data[NR_CPUS];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
TRACE_ARRAY_FL_GLOBAL = (1 << 0)
|
||||||
|
};
|
||||||
|
|
||||||
|
extern struct list_head ftrace_trace_arrays;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The global tracer (top) should be the first trace array added,
|
||||||
|
* but we check the flag anyway.
|
||||||
|
*/
|
||||||
|
static inline struct trace_array *top_trace_array(void)
|
||||||
|
{
|
||||||
|
struct trace_array *tr;
|
||||||
|
|
||||||
|
tr = list_entry(ftrace_trace_arrays.prev,
|
||||||
|
typeof(*tr), list);
|
||||||
|
WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
|
||||||
|
return tr;
|
||||||
|
}
|
||||||
|
|
||||||
#define FTRACE_CMP_TYPE(var, type) \
|
#define FTRACE_CMP_TYPE(var, type) \
|
||||||
__builtin_types_compatible_p(typeof(var), type *)
|
__builtin_types_compatible_p(typeof(var), type *)
|
||||||
|
|
||||||
@ -851,12 +877,19 @@ struct event_filter {
|
|||||||
struct event_subsystem {
|
struct event_subsystem {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
const char *name;
|
const char *name;
|
||||||
struct dentry *entry;
|
|
||||||
struct event_filter *filter;
|
struct event_filter *filter;
|
||||||
int nr_events;
|
|
||||||
int ref_count;
|
int ref_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ftrace_subsystem_dir {
|
||||||
|
struct list_head list;
|
||||||
|
struct event_subsystem *subsystem;
|
||||||
|
struct trace_array *tr;
|
||||||
|
struct dentry *entry;
|
||||||
|
int ref_count;
|
||||||
|
int nr_events;
|
||||||
|
};
|
||||||
|
|
||||||
#define FILTER_PRED_INVALID ((unsigned short)-1)
|
#define FILTER_PRED_INVALID ((unsigned short)-1)
|
||||||
#define FILTER_PRED_IS_RIGHT (1 << 15)
|
#define FILTER_PRED_IS_RIGHT (1 << 15)
|
||||||
#define FILTER_PRED_FOLD (1 << 15)
|
#define FILTER_PRED_FOLD (1 << 15)
|
||||||
@ -914,7 +947,7 @@ extern void print_event_filter(struct ftrace_event_call *call,
|
|||||||
struct trace_seq *s);
|
struct trace_seq *s);
|
||||||
extern int apply_event_filter(struct ftrace_event_call *call,
|
extern int apply_event_filter(struct ftrace_event_call *call,
|
||||||
char *filter_string);
|
char *filter_string);
|
||||||
extern int apply_subsystem_event_filter(struct event_subsystem *system,
|
extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
|
||||||
char *filter_string);
|
char *filter_string);
|
||||||
extern void print_subsystem_event_filter(struct event_subsystem *system,
|
extern void print_subsystem_event_filter(struct event_subsystem *system,
|
||||||
struct trace_seq *s);
|
struct trace_seq *s);
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1907,16 +1907,17 @@ out_unlock:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int apply_subsystem_event_filter(struct event_subsystem *system,
|
int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
|
||||||
char *filter_string)
|
char *filter_string)
|
||||||
{
|
{
|
||||||
|
struct event_subsystem *system = dir->subsystem;
|
||||||
struct event_filter *filter;
|
struct event_filter *filter;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
mutex_lock(&event_mutex);
|
mutex_lock(&event_mutex);
|
||||||
|
|
||||||
/* Make sure the system still has events */
|
/* Make sure the system still has events */
|
||||||
if (!system->nr_events) {
|
if (!dir->nr_events) {
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user