perf, x86: Avoid double disable on throttle vs ioctl(PERF_IOC_DISABLE)
Calling ioctl(PERF_EVENT_IOC_DISABLE) on a thottled counter would result in a double disable, cure this by using x86_pmu_{start,stop} for throttle/unthrottle and teach x86_pmu_stop() to check ->active_mask. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@infradead.org> Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
c08053e627
commit
71e2d28280
@ -983,14 +983,8 @@ static int x86_pmu_start(struct perf_event *event)
|
|||||||
|
|
||||||
static void x86_pmu_unthrottle(struct perf_event *event)
|
static void x86_pmu_unthrottle(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
int ret = x86_pmu_start(event);
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
WARN_ON_ONCE(ret);
|
||||||
|
|
||||||
if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
|
|
||||||
cpuc->events[hwc->idx] != event))
|
|
||||||
return;
|
|
||||||
|
|
||||||
x86_pmu.enable(event);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_event_print_debug(void)
|
void perf_event_print_debug(void)
|
||||||
@ -1050,11 +1044,9 @@ static void x86_pmu_stop(struct perf_event *event)
|
|||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
int idx = hwc->idx;
|
int idx = hwc->idx;
|
||||||
|
|
||||||
/*
|
if (!__test_and_clear_bit(idx, cpuc->active_mask))
|
||||||
* Must be done before we disable, otherwise the nmi handler
|
return;
|
||||||
* could reenable again:
|
|
||||||
*/
|
|
||||||
__clear_bit(idx, cpuc->active_mask);
|
|
||||||
x86_pmu.disable(event);
|
x86_pmu.disable(event);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1123,7 +1115,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (perf_event_overflow(event, 1, &data, regs))
|
if (perf_event_overflow(event, 1, &data, regs))
|
||||||
x86_pmu.disable(event);
|
x86_pmu_stop(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (handled)
|
if (handled)
|
||||||
|
@ -774,7 +774,7 @@ again:
|
|||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
|
|
||||||
if (perf_event_overflow(event, 1, &data, regs))
|
if (perf_event_overflow(event, 1, &data, regs))
|
||||||
intel_pmu_disable_event(event);
|
x86_pmu_stop(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_pmu_ack_status(ack);
|
intel_pmu_ack_status(ack);
|
||||||
|
Loading…
Reference in New Issue
Block a user