Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Misc kernel and tooling fixes" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: tools lib traceevent: Fix conversion of pointer to integer of different size perf/trace: Properly use u64 to hold event_id perf: Remove fragile swevent hlist optimization ftrace, perf: Avoid infinite event generation loop tools lib traceevent: Fix use of multiple options in processing field perf header: Fix possible memory leaks in process_group_desc() perf header: Fix bogus group name perf tools: Tag thread comm as overriden
This commit is contained in:
commit
e321ae4c20
@ -71,6 +71,17 @@ DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
|
|||||||
*/
|
*/
|
||||||
DEFINE_IRQ_VECTOR_EVENT(irq_work);
|
DEFINE_IRQ_VECTOR_EVENT(irq_work);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must dis-allow sampling irq_work_exit() because perf event sampling
|
||||||
|
* itself can cause irq_work, which would lead to an infinite loop;
|
||||||
|
*
|
||||||
|
* 1) irq_work_exit happens
|
||||||
|
* 2) generates perf sample
|
||||||
|
* 3) generates irq_work
|
||||||
|
* 4) goto 1
|
||||||
|
*/
|
||||||
|
TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* call_function - called when entering/exiting a call function interrupt
|
* call_function - called when entering/exiting a call function interrupt
|
||||||
* vector handler
|
* vector handler
|
||||||
|
@ -248,6 +248,9 @@ struct ftrace_event_call {
|
|||||||
#ifdef CONFIG_PERF_EVENTS
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
int perf_refcount;
|
int perf_refcount;
|
||||||
struct hlist_head __percpu *perf_events;
|
struct hlist_head __percpu *perf_events;
|
||||||
|
|
||||||
|
int (*perf_perm)(struct ftrace_event_call *,
|
||||||
|
struct perf_event *);
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -317,6 +320,19 @@ struct ftrace_event_file {
|
|||||||
} \
|
} \
|
||||||
early_initcall(trace_init_flags_##name);
|
early_initcall(trace_init_flags_##name);
|
||||||
|
|
||||||
|
#define __TRACE_EVENT_PERF_PERM(name, expr...) \
|
||||||
|
static int perf_perm_##name(struct ftrace_event_call *tp_event, \
|
||||||
|
struct perf_event *p_event) \
|
||||||
|
{ \
|
||||||
|
return ({ expr; }); \
|
||||||
|
} \
|
||||||
|
static int __init trace_init_perf_perm_##name(void) \
|
||||||
|
{ \
|
||||||
|
event_##name.perf_perm = &perf_perm_##name; \
|
||||||
|
return 0; \
|
||||||
|
} \
|
||||||
|
early_initcall(trace_init_perf_perm_##name);
|
||||||
|
|
||||||
#define PERF_MAX_TRACE_SIZE 2048
|
#define PERF_MAX_TRACE_SIZE 2048
|
||||||
|
|
||||||
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
|
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
|
||||||
|
@ -267,6 +267,8 @@ static inline void tracepoint_synchronize_unregister(void)
|
|||||||
|
|
||||||
#define TRACE_EVENT_FLAGS(event, flag)
|
#define TRACE_EVENT_FLAGS(event, flag)
|
||||||
|
|
||||||
|
#define TRACE_EVENT_PERF_PERM(event, expr...)
|
||||||
|
|
||||||
#endif /* DECLARE_TRACE */
|
#endif /* DECLARE_TRACE */
|
||||||
|
|
||||||
#ifndef TRACE_EVENT
|
#ifndef TRACE_EVENT
|
||||||
@ -399,4 +401,6 @@ static inline void tracepoint_synchronize_unregister(void)
|
|||||||
|
|
||||||
#define TRACE_EVENT_FLAGS(event, flag)
|
#define TRACE_EVENT_FLAGS(event, flag)
|
||||||
|
|
||||||
|
#define TRACE_EVENT_PERF_PERM(event, expr...)
|
||||||
|
|
||||||
#endif /* ifdef TRACE_EVENT (see note above) */
|
#endif /* ifdef TRACE_EVENT (see note above) */
|
||||||
|
@ -90,6 +90,10 @@
|
|||||||
#define TRACE_EVENT_FLAGS(name, value) \
|
#define TRACE_EVENT_FLAGS(name, value) \
|
||||||
__TRACE_EVENT_FLAGS(name, value)
|
__TRACE_EVENT_FLAGS(name, value)
|
||||||
|
|
||||||
|
#undef TRACE_EVENT_PERF_PERM
|
||||||
|
#define TRACE_EVENT_PERF_PERM(name, expr...) \
|
||||||
|
__TRACE_EVENT_PERF_PERM(name, expr)
|
||||||
|
|
||||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||||
|
|
||||||
|
|
||||||
@ -140,6 +144,9 @@
|
|||||||
#undef TRACE_EVENT_FLAGS
|
#undef TRACE_EVENT_FLAGS
|
||||||
#define TRACE_EVENT_FLAGS(event, flag)
|
#define TRACE_EVENT_FLAGS(event, flag)
|
||||||
|
|
||||||
|
#undef TRACE_EVENT_PERF_PERM
|
||||||
|
#define TRACE_EVENT_PERF_PERM(event, expr...)
|
||||||
|
|
||||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -5680,11 +5680,6 @@ static void swevent_hlist_put(struct perf_event *event)
|
|||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
if (event->cpu != -1) {
|
|
||||||
swevent_hlist_put_cpu(event, event->cpu);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
swevent_hlist_put_cpu(event, cpu);
|
swevent_hlist_put_cpu(event, cpu);
|
||||||
}
|
}
|
||||||
@ -5718,9 +5713,6 @@ static int swevent_hlist_get(struct perf_event *event)
|
|||||||
int err;
|
int err;
|
||||||
int cpu, failed_cpu;
|
int cpu, failed_cpu;
|
||||||
|
|
||||||
if (event->cpu != -1)
|
|
||||||
return swevent_hlist_get_cpu(event, event->cpu);
|
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
err = swevent_hlist_get_cpu(event, cpu);
|
err = swevent_hlist_get_cpu(event, cpu);
|
||||||
|
@ -24,6 +24,12 @@ static int total_ref_count;
|
|||||||
static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
|
static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
|
||||||
struct perf_event *p_event)
|
struct perf_event *p_event)
|
||||||
{
|
{
|
||||||
|
if (tp_event->perf_perm) {
|
||||||
|
int ret = tp_event->perf_perm(tp_event, p_event);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* The ftrace function trace is allowed only for root. */
|
/* The ftrace function trace is allowed only for root. */
|
||||||
if (ftrace_event_is_function(tp_event) &&
|
if (ftrace_event_is_function(tp_event) &&
|
||||||
perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
|
perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
|
||||||
@ -173,7 +179,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
|
|||||||
int perf_trace_init(struct perf_event *p_event)
|
int perf_trace_init(struct perf_event *p_event)
|
||||||
{
|
{
|
||||||
struct ftrace_event_call *tp_event;
|
struct ftrace_event_call *tp_event;
|
||||||
int event_id = p_event->attr.config;
|
u64 event_id = p_event->attr.config;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
mutex_lock(&event_mutex);
|
mutex_lock(&event_mutex);
|
||||||
|
@ -1606,6 +1606,24 @@ process_arg(struct event_format *event, struct print_arg *arg, char **tok)
|
|||||||
static enum event_type
|
static enum event_type
|
||||||
process_op(struct event_format *event, struct print_arg *arg, char **tok);
|
process_op(struct event_format *event, struct print_arg *arg, char **tok);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For __print_symbolic() and __print_flags, we need to completely
|
||||||
|
* evaluate the first argument, which defines what to print next.
|
||||||
|
*/
|
||||||
|
static enum event_type
|
||||||
|
process_field_arg(struct event_format *event, struct print_arg *arg, char **tok)
|
||||||
|
{
|
||||||
|
enum event_type type;
|
||||||
|
|
||||||
|
type = process_arg(event, arg, tok);
|
||||||
|
|
||||||
|
while (type == EVENT_OP) {
|
||||||
|
type = process_op(event, arg, tok);
|
||||||
|
}
|
||||||
|
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
static enum event_type
|
static enum event_type
|
||||||
process_cond(struct event_format *event, struct print_arg *top, char **tok)
|
process_cond(struct event_format *event, struct print_arg *top, char **tok)
|
||||||
{
|
{
|
||||||
@ -2371,7 +2389,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
|
|||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
type = process_arg(event, field, &token);
|
type = process_field_arg(event, field, &token);
|
||||||
|
|
||||||
/* Handle operations in the first argument */
|
/* Handle operations in the first argument */
|
||||||
while (type == EVENT_OP)
|
while (type == EVENT_OP)
|
||||||
@ -2424,7 +2442,8 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
|
|||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
type = process_arg(event, field, &token);
|
type = process_field_arg(event, field, &token);
|
||||||
|
|
||||||
if (test_type_token(type, token, EVENT_DELIM, ","))
|
if (test_type_token(type, token, EVENT_DELIM, ","))
|
||||||
goto out_free_field;
|
goto out_free_field;
|
||||||
|
|
||||||
@ -3446,7 +3465,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
|
|||||||
* is in the bottom half of the 32 bit field.
|
* is in the bottom half of the 32 bit field.
|
||||||
*/
|
*/
|
||||||
offset &= 0xffff;
|
offset &= 0xffff;
|
||||||
val = (unsigned long long)(data + offset);
|
val = (unsigned long long)((unsigned long)data + offset);
|
||||||
break;
|
break;
|
||||||
default: /* not sure what to do there */
|
default: /* not sure what to do there */
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2078,8 +2078,10 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused,
|
|||||||
if (evsel->idx == (int) desc[i].leader_idx) {
|
if (evsel->idx == (int) desc[i].leader_idx) {
|
||||||
evsel->leader = evsel;
|
evsel->leader = evsel;
|
||||||
/* {anon_group} is a dummy name */
|
/* {anon_group} is a dummy name */
|
||||||
if (strcmp(desc[i].name, "{anon_group}"))
|
if (strcmp(desc[i].name, "{anon_group}")) {
|
||||||
evsel->group_name = desc[i].name;
|
evsel->group_name = desc[i].name;
|
||||||
|
desc[i].name = NULL;
|
||||||
|
}
|
||||||
evsel->nr_members = desc[i].nr_members;
|
evsel->nr_members = desc[i].nr_members;
|
||||||
|
|
||||||
if (i >= nr_groups || nr > 0) {
|
if (i >= nr_groups || nr > 0) {
|
||||||
@ -2105,7 +2107,7 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused,
|
|||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
out_free:
|
out_free:
|
||||||
while ((int) --i >= 0)
|
for (i = 0; i < nr_groups; i++)
|
||||||
free(desc[i].name);
|
free(desc[i].name);
|
||||||
free(desc);
|
free(desc);
|
||||||
|
|
||||||
|
@ -70,14 +70,13 @@ int thread__set_comm(struct thread *thread, const char *str, u64 timestamp)
|
|||||||
/* Override latest entry if it had no specific time coverage */
|
/* Override latest entry if it had no specific time coverage */
|
||||||
if (!curr->start) {
|
if (!curr->start) {
|
||||||
comm__override(curr, str, timestamp);
|
comm__override(curr, str, timestamp);
|
||||||
return 0;
|
} else {
|
||||||
|
new = comm__new(str, timestamp);
|
||||||
|
if (!new)
|
||||||
|
return -ENOMEM;
|
||||||
|
list_add(&new->list, &thread->comm_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
new = comm__new(str, timestamp);
|
|
||||||
if (!new)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
list_add(&new->list, &thread->comm_list);
|
|
||||||
thread->comm_set = true;
|
thread->comm_set = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user