Merge branches 'tracing/ftrace', 'tracing/hw-branch-tracing' and 'tracing/ring-buffer'; commit 'v2.6.28' into tracing/core
This commit is contained in:
@@ -1024,7 +1024,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
|
||||
if (ret == -EBUSY) {
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
goto drop_new_super;
|
||||
goto free_cg_links;
|
||||
}
|
||||
|
||||
/* EBUSY should be the only error here */
|
||||
@@ -1073,10 +1073,11 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
|
||||
|
||||
return simple_set_mnt(mnt, sb);
|
||||
|
||||
free_cg_links:
|
||||
free_cg_links(&tmp_cg_links);
|
||||
drop_new_super:
|
||||
up_write(&sb->s_umount);
|
||||
deactivate_super(sb);
|
||||
free_cg_links(&tmp_cg_links);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2934,9 +2935,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
|
||||
again:
|
||||
root = subsys->root;
|
||||
if (root == &rootnode) {
|
||||
printk(KERN_INFO
|
||||
"Not cloning cgroup for unused subsystem %s\n",
|
||||
subsys->name);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1096,6 +1096,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
p->blocked_on = NULL; /* not blocked yet */
|
||||
#endif
|
||||
if (unlikely(ptrace_reparented(current)))
|
||||
ptrace_fork(p, clone_flags);
|
||||
|
||||
/* Perform scheduler related setup. Assign this task to a CPU. */
|
||||
sched_fork(p, clone_flags);
|
||||
|
@@ -197,6 +197,11 @@ static int common_timer_create(struct k_itimer *new_timer)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int no_timer_create(struct k_itimer *new_timer)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return nonzero if we know a priori this clockid_t value is bogus.
|
||||
*/
|
||||
@@ -248,6 +253,7 @@ static __init int init_posix_timers(void)
|
||||
.clock_getres = hrtimer_get_res,
|
||||
.clock_get = posix_get_monotonic_raw,
|
||||
.clock_set = do_posix_clock_nosettime,
|
||||
.timer_create = no_timer_create,
|
||||
};
|
||||
|
||||
register_posix_clock(CLOCK_REALTIME, &clock_realtime);
|
||||
|
@@ -25,6 +25,17 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
||||
/*
|
||||
* Initialize a new task whose father had been ptraced.
|
||||
*
|
||||
* Called from copy_process().
|
||||
*/
|
||||
void ptrace_fork(struct task_struct *child, unsigned long clone_flags)
|
||||
{
|
||||
arch_ptrace_fork(child, clone_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* ptrace a task: make the debugger its new parent and
|
||||
* move it to the ptrace list.
|
||||
@@ -72,6 +83,7 @@ void __ptrace_unlink(struct task_struct *child)
|
||||
child->parent = child->real_parent;
|
||||
list_del_init(&child->ptrace_entry);
|
||||
|
||||
arch_ptrace_untrace(child);
|
||||
if (task_is_traced(child))
|
||||
ptrace_untrace(child);
|
||||
}
|
||||
|
@@ -2324,7 +2324,7 @@ out_activate:
|
||||
success = 1;
|
||||
|
||||
out_running:
|
||||
trace_sched_wakeup(rq, p);
|
||||
trace_sched_wakeup(rq, p, success);
|
||||
check_preempt_curr(rq, p, sync);
|
||||
|
||||
p->state = TASK_RUNNING;
|
||||
|
@@ -838,6 +838,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
* back to us). This allows us to do a simple loop to
|
||||
* assign the commit to the tail.
|
||||
*/
|
||||
again:
|
||||
while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
|
||||
cpu_buffer->commit_page->page->commit =
|
||||
cpu_buffer->commit_page->write;
|
||||
@@ -853,6 +854,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
cpu_buffer->commit_page->write;
|
||||
barrier();
|
||||
}
|
||||
|
||||
/* again, keep gcc from optimizing */
|
||||
barrier();
|
||||
|
||||
/*
|
||||
* If an interrupt came in just after the first while loop
|
||||
* and pushed the tail page forward, we will be left with
|
||||
* a dangling commit that will never go forward.
|
||||
*/
|
||||
if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
|
||||
goto again;
|
||||
}
|
||||
|
||||
static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
@@ -950,12 +962,15 @@ static struct ring_buffer_event *
|
||||
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
unsigned type, unsigned long length, u64 *ts)
|
||||
{
|
||||
struct buffer_page *tail_page, *head_page, *reader_page;
|
||||
struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
|
||||
unsigned long tail, write;
|
||||
struct ring_buffer *buffer = cpu_buffer->buffer;
|
||||
struct ring_buffer_event *event;
|
||||
unsigned long flags;
|
||||
|
||||
commit_page = cpu_buffer->commit_page;
|
||||
/* we just need to protect against interrupts */
|
||||
barrier();
|
||||
tail_page = cpu_buffer->tail_page;
|
||||
write = local_add_return(length, &tail_page->write);
|
||||
tail = write - length;
|
||||
@@ -981,7 +996,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
* it all the way around the buffer, bail, and warn
|
||||
* about it.
|
||||
*/
|
||||
if (unlikely(next_page == cpu_buffer->commit_page)) {
|
||||
if (unlikely(next_page == commit_page)) {
|
||||
WARN_ON_ONCE(1);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@@ -679,6 +679,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
|
||||
ftrace_enable_cpu();
|
||||
}
|
||||
|
||||
void tracing_reset_online_cpus(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
tr->time_start = ftrace_now(tr->cpu);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
}
|
||||
|
||||
#define SAVED_CMDLINES 128
|
||||
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
|
||||
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
|
||||
|
@@ -374,6 +374,7 @@ struct trace_iterator {
|
||||
int tracing_is_enabled(void);
|
||||
void trace_wake_up(void);
|
||||
void tracing_reset(struct trace_array *tr, int cpu);
|
||||
void tracing_reset_online_cpus(struct trace_array *tr);
|
||||
int tracing_open_generic(struct inode *inode, struct file *filp);
|
||||
struct dentry *tracing_init_dentry(void);
|
||||
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
|
||||
|
@@ -37,16 +37,6 @@ void disable_boot_trace(void)
|
||||
tracing_stop_sched_switch_record();
|
||||
}
|
||||
|
||||
static void reset_boot_trace(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
tr->time_start = ftrace_now(tr->cpu);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
}
|
||||
|
||||
static int boot_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
@@ -130,7 +120,7 @@ struct tracer boot_tracer __read_mostly =
|
||||
{
|
||||
.name = "initcall",
|
||||
.init = boot_trace_init,
|
||||
.reset = reset_boot_trace,
|
||||
.reset = tracing_reset_online_cpus,
|
||||
.print_line = initcall_print_line,
|
||||
};
|
||||
|
||||
|
@@ -16,20 +16,10 @@
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
static void function_reset(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
tr->time_start = ftrace_now(tr->cpu);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
}
|
||||
|
||||
static void start_function_trace(struct trace_array *tr)
|
||||
{
|
||||
tr->cpu = get_cpu();
|
||||
function_reset(tr);
|
||||
tracing_reset_online_cpus(tr);
|
||||
put_cpu();
|
||||
|
||||
tracing_start_cmdline_record();
|
||||
@@ -55,7 +45,7 @@ static void function_trace_reset(struct trace_array *tr)
|
||||
|
||||
static void function_trace_start(struct trace_array *tr)
|
||||
{
|
||||
function_reset(tr);
|
||||
tracing_reset_online_cpus(tr);
|
||||
}
|
||||
|
||||
static struct tracer function_trace __read_mostly =
|
||||
|
@@ -25,16 +25,6 @@ static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
|
||||
#define this_buffer per_cpu(buffer, smp_processor_id())
|
||||
|
||||
|
||||
static void bts_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
tr->time_start = ftrace_now(tr->cpu);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
}
|
||||
|
||||
static void bts_trace_start_cpu(void *arg)
|
||||
{
|
||||
if (this_tracer)
|
||||
@@ -54,7 +44,7 @@ static void bts_trace_start(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
bts_trace_reset(tr);
|
||||
tracing_reset_online_cpus(tr);
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
|
||||
@@ -78,7 +68,7 @@ static void bts_trace_stop(struct trace_array *tr)
|
||||
|
||||
static int bts_trace_init(struct trace_array *tr)
|
||||
{
|
||||
bts_trace_reset(tr);
|
||||
tracing_reset_online_cpus(tr);
|
||||
bts_trace_start(tr);
|
||||
|
||||
return 0;
|
||||
|
@@ -22,14 +22,10 @@ static unsigned long prev_overruns;
|
||||
|
||||
static void mmio_reset_data(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
overrun_detected = false;
|
||||
prev_overruns = 0;
|
||||
tr->time_start = ftrace_now(tr->cpu);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
tracing_reset_online_cpus(tr);
|
||||
}
|
||||
|
||||
static int mmio_trace_init(struct trace_array *tr)
|
||||
|
@@ -49,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
||||
}
|
||||
|
||||
static void
|
||||
probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
|
||||
probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
|
||||
{
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long flags;
|
||||
@@ -72,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void sched_switch_reset(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
tr->time_start = ftrace_now(tr->cpu);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
}
|
||||
|
||||
static int tracing_sched_register(void)
|
||||
{
|
||||
int ret;
|
||||
@@ -197,7 +187,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
|
||||
|
||||
static void start_sched_trace(struct trace_array *tr)
|
||||
{
|
||||
sched_switch_reset(tr);
|
||||
tracing_reset_online_cpus(tr);
|
||||
tracing_start_sched_switch_record();
|
||||
}
|
||||
|
||||
@@ -221,7 +211,7 @@ static void sched_switch_trace_reset(struct trace_array *tr)
|
||||
|
||||
static void sched_switch_trace_start(struct trace_array *tr)
|
||||
{
|
||||
sched_switch_reset(tr);
|
||||
tracing_reset_online_cpus(tr);
|
||||
tracing_start_sched_switch();
|
||||
}
|
||||
|
||||
|
@@ -211,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr)
|
||||
}
|
||||
|
||||
static void
|
||||
probe_wakeup(struct rq *rq, struct task_struct *p)
|
||||
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
|
@@ -234,20 +234,10 @@ static void stop_stack_timers(void)
|
||||
stop_stack_timer(cpu);
|
||||
}
|
||||
|
||||
static void stack_reset(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
tr->time_start = ftrace_now(tr->cpu);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
}
|
||||
|
||||
static void start_stack_trace(struct trace_array *tr)
|
||||
{
|
||||
mutex_lock(&sample_timer_lock);
|
||||
stack_reset(tr);
|
||||
tracing_reset_online_cpus(tr);
|
||||
start_stack_timers();
|
||||
tracer_enabled = 1;
|
||||
mutex_unlock(&sample_timer_lock);
|
||||
|
Reference in New Issue
Block a user