perf_events: Generalize use of event_filter_match()

Replace all occurrences of:
	event->cpu != -1 && event->cpu == smp_processor_id()
by a call to:
	event_filter_match(event)

This makes the code more consistent and will make the cgroup
patch smaller.

Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d220593.2308e30a.48c5.ffff8ae9@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Stephane Eranian 2011-01-03 18:20:01 +02:00 committed by Ingo Molnar
parent 0b3fcf178d
commit 5632ab12e9

View File

@ -949,7 +949,7 @@ static void __perf_install_in_context(void *info)
add_event_to_ctx(event, ctx);
if (event->cpu != -1 && event->cpu != smp_processor_id())
if (!event_filter_match(event))
goto unlock;
/*
@ -1094,7 +1094,7 @@ static void __perf_event_enable(void *info)
goto unlock;
__perf_event_mark_enabled(event, ctx);
if (event->cpu != -1 && event->cpu != smp_processor_id())
if (!event_filter_match(event))
goto unlock;
/*
@ -1441,7 +1441,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
if (event->cpu != -1 && event->cpu != smp_processor_id())
if (!event_filter_match(event))
continue;
if (group_can_go_on(event, cpuctx, 1))
@ -1473,7 +1473,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
* Listen to the 'cpu' scheduling filter constraint
* of events:
*/
if (event->cpu != -1 && event->cpu != smp_processor_id())
if (!event_filter_match(event))
continue;
if (group_can_go_on(event, cpuctx, can_add_hw)) {
@ -1700,7 +1700,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
if (event->cpu != -1 && event->cpu != smp_processor_id())
if (!event_filter_match(event))
continue;
hwc = &event->hw;
@ -3899,7 +3899,7 @@ static int perf_event_task_match(struct perf_event *event)
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (event->cpu != -1 && event->cpu != smp_processor_id())
if (!event_filter_match(event))
return 0;
if (event->attr.comm || event->attr.mmap ||
@ -4036,7 +4036,7 @@ static int perf_event_comm_match(struct perf_event *event)
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (event->cpu != -1 && event->cpu != smp_processor_id())
if (!event_filter_match(event))
return 0;
if (event->attr.comm)
@ -4184,7 +4184,7 @@ static int perf_event_mmap_match(struct perf_event *event,
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (event->cpu != -1 && event->cpu != smp_processor_id())
if (!event_filter_match(event))
return 0;
if ((!executable && event->attr.mmap_data) ||