Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
This commit is contained in:
commit
2b1b858f69
@ -61,6 +61,14 @@
|
||||
#define BRANCH_PROFILE()
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_EVENT_TRACER
|
||||
#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
|
||||
*(_ftrace_events) \
|
||||
VMLINUX_SYMBOL(__stop_ftrace_events) = .;
|
||||
#else
|
||||
#define FTRACE_EVENTS()
|
||||
#endif
|
||||
|
||||
/* .data section */
|
||||
#define DATA_DATA \
|
||||
*(.data) \
|
||||
@ -81,7 +89,8 @@
|
||||
*(__tracepoints) \
|
||||
VMLINUX_SYMBOL(__stop___tracepoints) = .; \
|
||||
LIKELY_PROFILE() \
|
||||
BRANCH_PROFILE()
|
||||
BRANCH_PROFILE() \
|
||||
FTRACE_EVENTS()
|
||||
|
||||
#define RO_DATA(align) \
|
||||
. = ALIGN((align)); \
|
||||
|
@ -153,4 +153,7 @@ static inline void tracepoint_synchronize_unregister(void)
|
||||
synchronize_sched();
|
||||
}
|
||||
|
||||
#define DEFINE_TRACE_FMT(name, proto, args, fmt) \
|
||||
DECLARE_TRACE(name, TPPROTO(proto), TPARGS(args))
|
||||
|
||||
#endif
|
||||
|
@ -4,53 +4,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
DECLARE_TRACE(sched_kthread_stop,
|
||||
TPPROTO(struct task_struct *t),
|
||||
TPARGS(t));
|
||||
|
||||
DECLARE_TRACE(sched_kthread_stop_ret,
|
||||
TPPROTO(int ret),
|
||||
TPARGS(ret));
|
||||
|
||||
DECLARE_TRACE(sched_wait_task,
|
||||
TPPROTO(struct rq *rq, struct task_struct *p),
|
||||
TPARGS(rq, p));
|
||||
|
||||
DECLARE_TRACE(sched_wakeup,
|
||||
TPPROTO(struct rq *rq, struct task_struct *p, int success),
|
||||
TPARGS(rq, p, success));
|
||||
|
||||
DECLARE_TRACE(sched_wakeup_new,
|
||||
TPPROTO(struct rq *rq, struct task_struct *p, int success),
|
||||
TPARGS(rq, p, success));
|
||||
|
||||
DECLARE_TRACE(sched_switch,
|
||||
TPPROTO(struct rq *rq, struct task_struct *prev,
|
||||
struct task_struct *next),
|
||||
TPARGS(rq, prev, next));
|
||||
|
||||
DECLARE_TRACE(sched_migrate_task,
|
||||
TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
|
||||
TPARGS(p, orig_cpu, dest_cpu));
|
||||
|
||||
DECLARE_TRACE(sched_process_free,
|
||||
TPPROTO(struct task_struct *p),
|
||||
TPARGS(p));
|
||||
|
||||
DECLARE_TRACE(sched_process_exit,
|
||||
TPPROTO(struct task_struct *p),
|
||||
TPARGS(p));
|
||||
|
||||
DECLARE_TRACE(sched_process_wait,
|
||||
TPPROTO(struct pid *pid),
|
||||
TPARGS(pid));
|
||||
|
||||
DECLARE_TRACE(sched_process_fork,
|
||||
TPPROTO(struct task_struct *parent, struct task_struct *child),
|
||||
TPARGS(parent, child));
|
||||
|
||||
DECLARE_TRACE(sched_signal_send,
|
||||
TPPROTO(int sig, struct task_struct *p),
|
||||
TPARGS(sig, p));
|
||||
#include <trace/sched_event_types.h>
|
||||
|
||||
#endif
|
||||
|
72
include/trace/sched_event_types.h
Normal file
72
include/trace/sched_event_types.h
Normal file
@ -0,0 +1,72 @@
|
||||
|
||||
/* use <trace/sched.h> instead */
|
||||
#ifndef DEFINE_TRACE_FMT
|
||||
# error Do not include this file directly.
|
||||
# error Unless you know what you are doing.
|
||||
#endif
|
||||
|
||||
DEFINE_TRACE_FMT(sched_kthread_stop,
|
||||
TPPROTO(struct task_struct *t),
|
||||
TPARGS(t),
|
||||
TPFMT("task %s:%d", t->comm, t->pid));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_kthread_stop_ret,
|
||||
TPPROTO(int ret),
|
||||
TPARGS(ret),
|
||||
TPFMT("ret=%d", ret));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_wait_task,
|
||||
TPPROTO(struct rq *rq, struct task_struct *p),
|
||||
TPARGS(rq, p),
|
||||
TPFMT("task %s:%d", p->comm, p->pid));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_wakeup,
|
||||
TPPROTO(struct rq *rq, struct task_struct *p, int success),
|
||||
TPARGS(rq, p, success),
|
||||
TPFMT("task %s:%d %s",
|
||||
p->comm, p->pid, success?"succeeded":"failed"));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_wakeup_new,
|
||||
TPPROTO(struct rq *rq, struct task_struct *p, int success),
|
||||
TPARGS(rq, p, success),
|
||||
TPFMT("task %s:%d",
|
||||
p->comm, p->pid, success?"succeeded":"failed"));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_switch,
|
||||
TPPROTO(struct rq *rq, struct task_struct *prev,
|
||||
struct task_struct *next),
|
||||
TPARGS(rq, prev, next),
|
||||
TPFMT("task %s:%d ==> %s:%d",
|
||||
prev->comm, prev->pid, next->comm, next->pid));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_migrate_task,
|
||||
TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
|
||||
TPARGS(p, orig_cpu, dest_cpu),
|
||||
TPFMT("task %s:%d from: %d to: %d",
|
||||
p->comm, p->pid, orig_cpu, dest_cpu));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_process_free,
|
||||
TPPROTO(struct task_struct *p),
|
||||
TPARGS(p),
|
||||
TPFMT("task %s:%d", p->comm, p->pid));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_process_exit,
|
||||
TPPROTO(struct task_struct *p),
|
||||
TPARGS(p),
|
||||
TPFMT("task %s:%d", p->comm, p->pid));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_process_wait,
|
||||
TPPROTO(struct pid *pid),
|
||||
TPARGS(pid),
|
||||
TPFMT("pid %d", pid));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_process_fork,
|
||||
TPPROTO(struct task_struct *parent, struct task_struct *child),
|
||||
TPARGS(parent, child),
|
||||
TPFMT("parent %s:%d child %s:%d",
|
||||
parent->comm, parent->pid, child->comm, child->pid));
|
||||
|
||||
DEFINE_TRACE_FMT(sched_signal_send,
|
||||
TPPROTO(int sig, struct task_struct *p),
|
||||
TPARGS(sig, p),
|
||||
TPFMT("sig: %d task %s:%d", sig, p->comm, p->pid));
|
@ -159,6 +159,15 @@ config CONTEXT_SWITCH_TRACER
|
||||
This tracer gets called from the context switch and records
|
||||
all switching of tasks.
|
||||
|
||||
config EVENT_TRACER
|
||||
bool "Trace various events in the kernel"
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACING
|
||||
help
|
||||
This tracer hooks to various trace points in the kernel
|
||||
allowing the user to pick and choose which trace point they
|
||||
want to trace.
|
||||
|
||||
config BOOT_TRACER
|
||||
bool "Trace boot initcalls"
|
||||
depends on DEBUG_KERNEL
|
||||
|
@ -38,5 +38,7 @@ obj-$(CONFIG_POWER_TRACER) += trace_power.o
|
||||
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
|
||||
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
|
||||
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
|
||||
obj-$(CONFIG_EVENT_TRACER) += trace_events.o
|
||||
obj-$(CONFIG_EVENT_TRACER) += events.o
|
||||
|
||||
libftrace-y := ftrace.o
|
||||
|
13
kernel/trace/events.c
Normal file
13
kernel/trace/events.c
Normal file
@ -0,0 +1,13 @@
|
||||
/*
|
||||
* This is the place to register all trace points as events.
|
||||
* Include the trace/<type>.h at the top.
|
||||
* Include the trace/<type>_event_types.h at the bottom.
|
||||
*/
|
||||
|
||||
/* trace/<type>.h here */
|
||||
#include <trace/sched.h>
|
||||
|
||||
#include "trace_events.h"
|
||||
|
||||
/* trace/<type>_event_types.h here */
|
||||
#include <trace/sched_event_types.h>
|
407
kernel/trace/trace_events.c
Normal file
407
kernel/trace/trace_events.c
Normal file
@ -0,0 +1,407 @@
|
||||
/*
|
||||
* event tracer
|
||||
*
|
||||
* Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#include "trace_events.h"
|
||||
|
||||
#define events_for_each(event) \
|
||||
for (event = __start_ftrace_events; \
|
||||
(unsigned long)event < (unsigned long)__stop_ftrace_events; \
|
||||
event++)
|
||||
|
||||
void event_trace_printk(unsigned long ip, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, fmt);
|
||||
tracing_record_cmdline(current);
|
||||
trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
static void ftrace_clear_events(void)
|
||||
{
|
||||
struct ftrace_event_call *call = (void *)__start_ftrace_events;
|
||||
|
||||
|
||||
while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
|
||||
|
||||
if (call->enabled) {
|
||||
call->enabled = 0;
|
||||
call->unregfunc();
|
||||
}
|
||||
call++;
|
||||
}
|
||||
}
|
||||
|
||||
static int ftrace_set_clr_event(char *buf, int set)
|
||||
{
|
||||
struct ftrace_event_call *call = __start_ftrace_events;
|
||||
|
||||
|
||||
events_for_each(call) {
|
||||
|
||||
if (!call->name)
|
||||
continue;
|
||||
|
||||
if (strcmp(buf, call->name) != 0)
|
||||
continue;
|
||||
|
||||
if (set) {
|
||||
/* Already set? */
|
||||
if (call->enabled)
|
||||
return 0;
|
||||
call->enabled = 1;
|
||||
call->regfunc();
|
||||
} else {
|
||||
/* Already cleared? */
|
||||
if (!call->enabled)
|
||||
return 0;
|
||||
call->enabled = 0;
|
||||
call->unregfunc();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* 128 should be much more than enough */
|
||||
#define EVENT_BUF_SIZE 127
|
||||
|
||||
static ssize_t
|
||||
ftrace_event_write(struct file *file, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
size_t read = 0;
|
||||
int i, set = 1;
|
||||
ssize_t ret;
|
||||
char *buf;
|
||||
char ch;
|
||||
|
||||
if (!cnt || cnt < 0)
|
||||
return 0;
|
||||
|
||||
ret = get_user(ch, ubuf++);
|
||||
if (ret)
|
||||
return ret;
|
||||
read++;
|
||||
cnt--;
|
||||
|
||||
/* skip white space */
|
||||
while (cnt && isspace(ch)) {
|
||||
ret = get_user(ch, ubuf++);
|
||||
if (ret)
|
||||
return ret;
|
||||
read++;
|
||||
cnt--;
|
||||
}
|
||||
|
||||
/* Only white space found? */
|
||||
if (isspace(ch)) {
|
||||
file->f_pos += read;
|
||||
ret = read;
|
||||
return ret;
|
||||
}
|
||||
|
||||
buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (cnt > EVENT_BUF_SIZE)
|
||||
cnt = EVENT_BUF_SIZE;
|
||||
|
||||
i = 0;
|
||||
while (cnt && !isspace(ch)) {
|
||||
if (!i && ch == '!')
|
||||
set = 0;
|
||||
else
|
||||
buf[i++] = ch;
|
||||
|
||||
ret = get_user(ch, ubuf++);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
read++;
|
||||
cnt--;
|
||||
}
|
||||
buf[i] = 0;
|
||||
|
||||
file->f_pos += read;
|
||||
|
||||
ret = ftrace_set_clr_event(buf, set);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
ret = read;
|
||||
|
||||
out_free:
|
||||
kfree(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *
|
||||
t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct ftrace_event_call *call = m->private;
|
||||
struct ftrace_event_call *next = call;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
|
||||
return NULL;
|
||||
|
||||
m->private = ++next;
|
||||
|
||||
return call;
|
||||
}
|
||||
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return t_next(m, NULL, pos);
|
||||
}
|
||||
|
||||
static void *
|
||||
s_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct ftrace_event_call *call = m->private;
|
||||
struct ftrace_event_call *next;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
retry:
|
||||
if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
|
||||
return NULL;
|
||||
|
||||
if (!call->enabled) {
|
||||
call++;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
next = call;
|
||||
m->private = ++next;
|
||||
|
||||
return call;
|
||||
}
|
||||
|
||||
static void *s_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return s_next(m, NULL, pos);
|
||||
}
|
||||
|
||||
static int t_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ftrace_event_call *call = v;
|
||||
|
||||
seq_printf(m, "%s\n", call->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void t_stop(struct seq_file *m, void *p)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_event_seq_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret;
|
||||
const struct seq_operations *seq_ops;
|
||||
|
||||
if ((file->f_mode & FMODE_WRITE) &&
|
||||
!(file->f_flags & O_APPEND))
|
||||
ftrace_clear_events();
|
||||
|
||||
seq_ops = inode->i_private;
|
||||
ret = seq_open(file, seq_ops);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
|
||||
m->private = __start_ftrace_events;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_call *call = filp->private_data;
|
||||
char *buf;
|
||||
|
||||
if (call->enabled)
|
||||
buf = "1\n";
|
||||
else
|
||||
buf = "0\n";
|
||||
|
||||
return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_call *call = filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
switch (val) {
|
||||
case 0:
|
||||
if (!call->enabled)
|
||||
break;
|
||||
|
||||
call->enabled = 0;
|
||||
call->unregfunc();
|
||||
break;
|
||||
case 1:
|
||||
if (call->enabled)
|
||||
break;
|
||||
|
||||
call->enabled = 1;
|
||||
call->regfunc();
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static const struct seq_operations show_event_seq_ops = {
|
||||
.start = t_start,
|
||||
.next = t_next,
|
||||
.show = t_show,
|
||||
.stop = t_stop,
|
||||
};
|
||||
|
||||
static const struct seq_operations show_set_event_seq_ops = {
|
||||
.start = s_start,
|
||||
.next = s_next,
|
||||
.show = t_show,
|
||||
.stop = t_stop,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_avail_fops = {
|
||||
.open = ftrace_event_seq_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_set_event_fops = {
|
||||
.open = ftrace_event_seq_open,
|
||||
.read = seq_read,
|
||||
.write = ftrace_event_write,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_enable_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.read = event_enable_read,
|
||||
.write = event_enable_write,
|
||||
};
|
||||
|
||||
static struct dentry *event_trace_events_dir(void)
|
||||
{
|
||||
static struct dentry *d_tracer;
|
||||
static struct dentry *d_events;
|
||||
|
||||
if (d_events)
|
||||
return d_events;
|
||||
|
||||
d_tracer = tracing_init_dentry();
|
||||
if (!d_tracer)
|
||||
return NULL;
|
||||
|
||||
d_events = debugfs_create_dir("events", d_tracer);
|
||||
if (!d_events)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'events' directory\n");
|
||||
|
||||
return d_events;
|
||||
}
|
||||
|
||||
static int
|
||||
event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
|
||||
{
|
||||
struct dentry *entry;
|
||||
|
||||
call->dir = debugfs_create_dir(call->name, d_events);
|
||||
if (!call->dir) {
|
||||
pr_warning("Could not create debugfs "
|
||||
"'%s' directory\n", call->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
entry = debugfs_create_file("enable", 0644, call->dir, call,
|
||||
&ftrace_enable_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'%s/enable' entry\n", call->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __init int event_trace_init(void)
|
||||
{
|
||||
struct ftrace_event_call *call = __start_ftrace_events;
|
||||
struct dentry *d_tracer;
|
||||
struct dentry *entry;
|
||||
struct dentry *d_events;
|
||||
|
||||
d_tracer = tracing_init_dentry();
|
||||
if (!d_tracer)
|
||||
return 0;
|
||||
|
||||
entry = debugfs_create_file("available_events", 0444, d_tracer,
|
||||
(void *)&show_event_seq_ops,
|
||||
&ftrace_avail_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'available_events' entry\n");
|
||||
|
||||
entry = debugfs_create_file("set_event", 0644, d_tracer,
|
||||
(void *)&show_set_event_seq_ops,
|
||||
&ftrace_set_event_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'set_event' entry\n");
|
||||
|
||||
d_events = event_trace_events_dir();
|
||||
if (!d_events)
|
||||
return 0;
|
||||
|
||||
events_for_each(call) {
|
||||
/* The linker may leave blanks */
|
||||
if (!call->name)
|
||||
continue;
|
||||
event_create_dir(call, d_events);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(event_trace_init);
|
55
kernel/trace/trace_events.h
Normal file
55
kernel/trace/trace_events.h
Normal file
@ -0,0 +1,55 @@
|
||||
#ifndef _LINUX_KERNEL_TRACE_EVENTS_H
|
||||
#define _LINUX_KERNEL_TRACE_EVENTS_H
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include "trace.h"
|
||||
|
||||
struct ftrace_event_call {
|
||||
char *name;
|
||||
struct dentry *dir;
|
||||
int enabled;
|
||||
int (*regfunc)(void);
|
||||
void (*unregfunc)(void);
|
||||
};
|
||||
|
||||
|
||||
#undef TPFMT
|
||||
#define TPFMT(fmt, args...) fmt "\n", ##args
|
||||
|
||||
#undef DEFINE_TRACE_FMT
|
||||
#define DEFINE_TRACE_FMT(call, proto, args, fmt) \
|
||||
static void ftrace_event_##call(proto) \
|
||||
{ \
|
||||
event_trace_printk(_RET_IP_, "(" #call ") " fmt); \
|
||||
} \
|
||||
\
|
||||
static int ftrace_reg_event_##call(void) \
|
||||
{ \
|
||||
int ret; \
|
||||
\
|
||||
ret = register_trace_##call(ftrace_event_##call); \
|
||||
if (!ret) \
|
||||
pr_info("event trace: Could not activate trace point " \
|
||||
"probe to " #call); \
|
||||
return ret; \
|
||||
} \
|
||||
\
|
||||
static void ftrace_unreg_event_##call(void) \
|
||||
{ \
|
||||
unregister_trace_##call(ftrace_event_##call); \
|
||||
} \
|
||||
\
|
||||
static struct ftrace_event_call __used \
|
||||
__attribute__((__aligned__(4))) \
|
||||
__attribute__((section("_ftrace_events"))) event_##call = { \
|
||||
.name = #call, \
|
||||
.regfunc = ftrace_reg_event_##call, \
|
||||
.unregfunc = ftrace_unreg_event_##call, \
|
||||
}
|
||||
|
||||
void event_trace_printk(unsigned long ip, const char *fmt, ...);
|
||||
extern struct ftrace_event_call __start_ftrace_events[];
|
||||
extern struct ftrace_event_call __stop_ftrace_events[];
|
||||
|
||||
#endif /* _LINUX_KERNEL_TRACE_EVENTS_H */
|
Loading…
Reference in New Issue
Block a user