tracing: Avoid soft lockup in trace_pipe
running following commands: # enable the binary option echo 1 > ./options/bin # disable context info option echo 0 > ./options/context-info # tracing only events echo 1 > ./events/enable cat trace_pipe plus forcing system to generate many tracing events, is causing lockup (in NON preemptive kernels) inside tracing_read_pipe function. The issue is also easily reproduced by running ltp stress test. (ftrace_stress_test.sh) The reasons are: - bin/hex/raw output functions for events are set to trace_nop_print function, which prints nothing and returns TRACE_TYPE_HANDLED value - LOST EVENT trace do not handle trace_seq overflow These reasons force the while loop in tracing_read_pipe function never to break. The attached patch fixies handling of lost event trace, and changes trace_nop_print to print minimal info, which is needed for the correct tracing_read_pipe processing. v2 changes: - omit the cond_resched changes by trace_nop_print changes - WARN changed to WARN_ONCE and added info to be able to find out the culprit v3 changes: - make more accurate patch comment Signed-off-by: Jiri Olsa <jolsa@redhat.com> LKML-Reference: <20110325110518.GC1922@jolsa.brq.redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
1813dc3776
commit
ee5e51f51b
@ -2013,9 +2013,10 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
|
|||||||
{
|
{
|
||||||
enum print_line_t ret;
|
enum print_line_t ret;
|
||||||
|
|
||||||
if (iter->lost_events)
|
if (iter->lost_events &&
|
||||||
trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
|
!trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
|
||||||
iter->cpu, iter->lost_events);
|
iter->cpu, iter->lost_events))
|
||||||
|
return TRACE_TYPE_PARTIAL_LINE;
|
||||||
|
|
||||||
if (iter->trace && iter->trace->print_line) {
|
if (iter->trace && iter->trace->print_line) {
|
||||||
ret = iter->trace->print_line(iter);
|
ret = iter->trace->print_line(iter);
|
||||||
@ -3229,6 +3230,14 @@ waitagain:
|
|||||||
|
|
||||||
if (iter->seq.len >= cnt)
|
if (iter->seq.len >= cnt)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Setting the full flag means we reached the trace_seq buffer
|
||||||
|
* size and we should leave by partial output condition above.
|
||||||
|
* One of the trace_seq_* functions is not used properly.
|
||||||
|
*/
|
||||||
|
WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
|
||||||
|
iter->ent->type);
|
||||||
}
|
}
|
||||||
trace_access_unlock(iter->cpu_file);
|
trace_access_unlock(iter->cpu_file);
|
||||||
trace_event_read_unlock();
|
trace_event_read_unlock();
|
||||||
|
@ -830,6 +830,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event);
|
|||||||
enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
|
enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
|
||||||
struct trace_event *event)
|
struct trace_event *event)
|
||||||
{
|
{
|
||||||
|
if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type))
|
||||||
|
return TRACE_TYPE_PARTIAL_LINE;
|
||||||
|
|
||||||
return TRACE_TYPE_HANDLED;
|
return TRACE_TYPE_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user