This includes two more fixes:

- Change idx variable in DO_TRACE macro to __idx to avoid name conflicts.
    A kvm event had "idx" as a parameter and it confused the macro.
 
  - Fix a race where interrupts would be traced when set_graph_function was set.
    The previous patch set increased a race window that tricked the function graph
    tracer to think it should trace interrupts when it really should not have.
    The bug has been there before, but was seldom hit. Only the last patch series
    made it more common.
 -----BEGIN PGP SIGNATURE-----
 
 iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCXACrFhQccm9zdGVkdEBn
 b29kbWlzLm9yZwAKCRAp5XQQmuv6qgUuAP9zZ0+PjDIXrkZATogEsMJ1OZKy5AiK
 mwT7S85/ouOeCQEAi5JUcSfwB0caq2nYB1GKOKNfDH0ffeVR4wNykcYjngM=
 =gjwC
 -----END PGP SIGNATURE-----

Merge tag 'trace-v4.20-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull more tracing fixes from Steven Rostedt:
 "Two more fixes:

   - Change idx variable in DO_TRACE macro to __idx to avoid name
     conflicts. A kvm event had "idx" as a parameter and it confused the
     macro.

   - Fix a race where interrupts would be traced when set_graph_function
     was set. The previous patch set increased a race window that
     tricked the function graph tracer to think it should trace
     interrupts when it really should not have.

     The bug has been there before, but was seldom hit. Only the last
     patch series made it more common"

* tag 'trace-v4.20-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing/fgraph: Fix set_graph_function from showing interrupts
  tracepoint: Use __idx instead of idx in DO_TRACE macro to make it unique
This commit is contained in:
Linus Torvalds 2018-11-30 10:40:11 -08:00
commit 49afe66143
5 changed files with 65 additions and 6 deletions

View File

@ -166,7 +166,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
struct tracepoint_func *it_func_ptr; \ struct tracepoint_func *it_func_ptr; \
void *it_func; \ void *it_func; \
void *__data; \ void *__data; \
int __maybe_unused idx = 0; \ int __maybe_unused __idx = 0; \
\ \
if (!(cond)) \ if (!(cond)) \
return; \ return; \
@ -182,7 +182,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* doesn't work from the idle path. \ * doesn't work from the idle path. \
*/ \ */ \
if (rcuidle) { \ if (rcuidle) { \
idx = srcu_read_lock_notrace(&tracepoint_srcu); \ __idx = srcu_read_lock_notrace(&tracepoint_srcu);\
rcu_irq_enter_irqson(); \ rcu_irq_enter_irqson(); \
} \ } \
\ \
@ -198,7 +198,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
\ \
if (rcuidle) { \ if (rcuidle) { \
rcu_irq_exit_irqson(); \ rcu_irq_exit_irqson(); \
srcu_read_unlock_notrace(&tracepoint_srcu, idx);\ srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
} \ } \
\ \
preempt_enable_notrace(); \ preempt_enable_notrace(); \

View File

@ -512,12 +512,44 @@ enum {
* can only be modified by current, we can reuse trace_recursion. * can only be modified by current, we can reuse trace_recursion.
*/ */
TRACE_IRQ_BIT, TRACE_IRQ_BIT,
/* Set if the function is in the set_graph_function file */
TRACE_GRAPH_BIT,
/*
* In the very unlikely case that an interrupt came in
* at a start of graph tracing, and we want to trace
* the function in that interrupt, the depth can be greater
* than zero, because of the preempted start of a previous
* trace. In an even more unlikely case, depth could be 2
* if a softirq interrupted the start of graph tracing,
* followed by an interrupt preempting a start of graph
* tracing in the softirq, and depth can even be 3
* if an NMI came in at the start of an interrupt function
* that preempted a softirq start of a function that
* preempted normal context!!!! Luckily, it can't be
* greater than 3, so the next two bits are a mask
* of what the depth is when we set TRACE_GRAPH_BIT
*/
TRACE_GRAPH_DEPTH_START_BIT,
TRACE_GRAPH_DEPTH_END_BIT,
}; };
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
#define trace_recursion_depth() \
(((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
#define trace_recursion_set_depth(depth) \
do { \
current->trace_recursion &= \
~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
current->trace_recursion |= \
((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
} while (0)
#define TRACE_CONTEXT_BITS 4 #define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT #define TRACE_FTRACE_START TRACE_FTRACE_BIT
@ -843,8 +875,9 @@ extern void __trace_graph_return(struct trace_array *tr,
extern struct ftrace_hash *ftrace_graph_hash; extern struct ftrace_hash *ftrace_graph_hash;
extern struct ftrace_hash *ftrace_graph_notrace_hash; extern struct ftrace_hash *ftrace_graph_notrace_hash;
static inline int ftrace_graph_addr(unsigned long addr) static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
{ {
unsigned long addr = trace->func;
int ret = 0; int ret = 0;
preempt_disable_notrace(); preempt_disable_notrace();
@ -855,6 +888,14 @@ static inline int ftrace_graph_addr(unsigned long addr)
} }
if (ftrace_lookup_ip(ftrace_graph_hash, addr)) { if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
/*
* This needs to be cleared on the return functions
* when the depth is zero.
*/
trace_recursion_set(TRACE_GRAPH_BIT);
trace_recursion_set_depth(trace->depth);
/* /*
* If no irqs are to be traced, but a set_graph_function * If no irqs are to be traced, but a set_graph_function
* is set, and called by an interrupt handler, we still * is set, and called by an interrupt handler, we still
@ -872,6 +913,13 @@ out:
return ret; return ret;
} }
static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
{
if (trace_recursion_test(TRACE_GRAPH_BIT) &&
trace->depth == trace_recursion_depth())
trace_recursion_clear(TRACE_GRAPH_BIT);
}
static inline int ftrace_graph_notrace_addr(unsigned long addr) static inline int ftrace_graph_notrace_addr(unsigned long addr)
{ {
int ret = 0; int ret = 0;
@ -885,7 +933,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
return ret; return ret;
} }
#else #else
static inline int ftrace_graph_addr(unsigned long addr) static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
{ {
return 1; return 1;
} }
@ -894,6 +942,8 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
{ {
return 0; return 0;
} }
static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
{ }
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
extern unsigned int fgraph_max_depth; extern unsigned int fgraph_max_depth;
@ -901,7 +951,8 @@ extern unsigned int fgraph_max_depth;
static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
{ {
/* trace it when it is-nested-in or is a function enabled. */ /* trace it when it is-nested-in or is a function enabled. */
return !(trace->depth || ftrace_graph_addr(trace->func)) || return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
ftrace_graph_addr(trace)) ||
(trace->depth < 0) || (trace->depth < 0) ||
(fgraph_max_depth && trace->depth >= fgraph_max_depth); (fgraph_max_depth && trace->depth >= fgraph_max_depth);
} }

View File

@ -509,6 +509,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
int cpu; int cpu;
int pc; int pc;
ftrace_graph_addr_finish(trace);
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu); data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@ -532,6 +534,8 @@ void set_graph_array(struct trace_array *tr)
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
{ {
ftrace_graph_addr_finish(trace);
if (tracing_thresh && if (tracing_thresh &&
(trace->rettime - trace->calltime < tracing_thresh)) (trace->rettime - trace->calltime < tracing_thresh))
return; return;

View File

@ -208,6 +208,8 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
unsigned long flags; unsigned long flags;
int pc; int pc;
ftrace_graph_addr_finish(trace);
if (!func_prolog_dec(tr, &data, &flags)) if (!func_prolog_dec(tr, &data, &flags))
return; return;

View File

@ -270,6 +270,8 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
unsigned long flags; unsigned long flags;
int pc; int pc;
ftrace_graph_addr_finish(trace);
if (!func_prolog_preempt_disable(tr, &data, &pc)) if (!func_prolog_preempt_disable(tr, &data, &pc))
return; return;