perf/x86: Prettify pmu config literals
I got somewhat tired of having to decode hex numbers.. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: Stephane Eranian <eranian@google.com> Cc: Robert Richter <robert.richter@amd.com> Link: http://lkml.kernel.org/n/tip-0vsy1sgywc4uar3mu1szm0rg@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
35239e23c6
commit
f9b4eeb809
@ -268,6 +268,29 @@ struct x86_pmu_quirk {
|
||||
void (*func)(void);
|
||||
};
|
||||
|
||||
union x86_pmu_config {
|
||||
struct {
|
||||
u64 event:8,
|
||||
umask:8,
|
||||
usr:1,
|
||||
os:1,
|
||||
edge:1,
|
||||
pc:1,
|
||||
interrupt:1,
|
||||
__reserved1:1,
|
||||
en:1,
|
||||
inv:1,
|
||||
cmask:8,
|
||||
event2:4,
|
||||
__reserved2:4,
|
||||
go:1,
|
||||
ho:1;
|
||||
} bits;
|
||||
u64 value;
|
||||
};
|
||||
|
||||
#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
|
||||
|
||||
/*
|
||||
* struct x86_pmu - generic x86 pmu
|
||||
*/
|
||||
|
@ -1288,7 +1288,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
||||
*
|
||||
* Thereby we gain a PEBS capable cycle counter.
|
||||
*/
|
||||
u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
|
||||
u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
|
||||
|
||||
|
||||
alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
|
||||
event->hw.config = alt_config;
|
||||
@ -1690,9 +1691,11 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.extra_regs = intel_nehalem_extra_regs;
|
||||
|
||||
/* UOPS_ISSUED.STALLED_CYCLES */
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
|
||||
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
|
||||
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
|
||||
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
|
||||
|
||||
x86_add_quirk(intel_nehalem_quirk);
|
||||
|
||||
@ -1727,9 +1730,11 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.er_flags |= ERF_HAS_RSP_1;
|
||||
|
||||
/* UOPS_ISSUED.STALLED_CYCLES */
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
|
||||
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
|
||||
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
|
||||
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
|
||||
|
||||
pr_cont("Westmere events, ");
|
||||
break;
|
||||
@ -1750,9 +1755,11 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
|
||||
|
||||
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
|
||||
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
|
||||
/* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1;
|
||||
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
|
||||
X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
|
||||
|
||||
pr_cont("SandyBridge events, ");
|
||||
break;
|
||||
|
Loading…
x
Reference in New Issue
Block a user