x86, perfcounters: read out MSR_CORE_PERF_GLOBAL_STATUS with counters disabled
Impact: make perfcounter NMI and IRQ sequence more robust Make __smp_perf_counter_interrupt() a bit more conservative: first disable all counters, then read out the status. Most invocations are because there are real events, so there's no performance impact. Code flow gets a bit simpler as well this way. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
241771ef01
commit
87b9cf4623
@ -383,18 +383,16 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
|
||||
struct cpu_hw_counters *cpuc;
|
||||
u64 ack, status;
|
||||
|
||||
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
|
||||
if (!status) {
|
||||
ack_APIC_irq();
|
||||
return;
|
||||
}
|
||||
|
||||
/* Disable counters globally */
|
||||
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
|
||||
ack_APIC_irq();
|
||||
|
||||
cpuc = &per_cpu(cpu_hw_counters, cpu);
|
||||
|
||||
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
|
||||
if (!status)
|
||||
goto out;
|
||||
|
||||
again:
|
||||
ack = status;
|
||||
for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
|
||||
@ -440,7 +438,7 @@ again:
|
||||
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
|
||||
if (status)
|
||||
goto again;
|
||||
|
||||
out:
|
||||
/*
|
||||
* Do not reenable when global enable is off:
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user