perf, x86: implement ARCH_PERFMON_EVENTSEL bit masks
ARCH_PERFMON_EVENTSEL bit masks are often used in the kernel. This patch adds macros for the bit masks and removes local defines. The function intel_pmu_raw_event() becomes x86_pmu_raw_event() which is generic for x86 models and same also for p6. Duplicate code is removed. Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20100330092821.GH11907@erda.amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
948b1bb89a
commit
a098f4484b
@ -18,39 +18,31 @@
|
|||||||
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
|
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
|
||||||
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
|
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
|
||||||
|
|
||||||
#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22)
|
#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
|
||||||
#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
|
#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
|
||||||
#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
|
#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
|
||||||
#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
|
#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
|
||||||
#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
|
#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
|
||||||
|
#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
|
||||||
|
#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
|
||||||
|
#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
|
||||||
|
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
|
||||||
|
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
|
||||||
|
|
||||||
/*
|
#define AMD64_EVENTSEL_EVENT \
|
||||||
* Includes eventsel and unit mask as well:
|
(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
|
||||||
*/
|
#define INTEL_ARCH_EVENT_MASK \
|
||||||
|
(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
|
||||||
|
|
||||||
|
#define X86_RAW_EVENT_MASK \
|
||||||
#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL
|
(ARCH_PERFMON_EVENTSEL_EVENT | \
|
||||||
#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL
|
ARCH_PERFMON_EVENTSEL_UMASK | \
|
||||||
#define INTEL_ARCH_EDGE_MASK 0x00040000ULL
|
ARCH_PERFMON_EVENTSEL_EDGE | \
|
||||||
#define INTEL_ARCH_INV_MASK 0x00800000ULL
|
ARCH_PERFMON_EVENTSEL_INV | \
|
||||||
#define INTEL_ARCH_CNT_MASK 0xFF000000ULL
|
ARCH_PERFMON_EVENTSEL_CMASK)
|
||||||
#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
|
#define AMD64_RAW_EVENT_MASK \
|
||||||
|
(X86_RAW_EVENT_MASK | \
|
||||||
/*
|
AMD64_EVENTSEL_EVENT)
|
||||||
* filter mask to validate fixed counter events.
|
|
||||||
* the following filters disqualify for fixed counters:
|
|
||||||
* - inv
|
|
||||||
* - edge
|
|
||||||
* - cnt-mask
|
|
||||||
* The other filters are supported by fixed counters.
|
|
||||||
* The any-thread option is supported starting with v3.
|
|
||||||
*/
|
|
||||||
#define INTEL_ARCH_FIXED_MASK \
|
|
||||||
(INTEL_ARCH_CNT_MASK| \
|
|
||||||
INTEL_ARCH_INV_MASK| \
|
|
||||||
INTEL_ARCH_EDGE_MASK|\
|
|
||||||
INTEL_ARCH_UNIT_MASK|\
|
|
||||||
INTEL_ARCH_EVENT_MASK)
|
|
||||||
|
|
||||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
||||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
||||||
|
@ -143,13 +143,21 @@ struct cpu_hw_events {
|
|||||||
* Constraint on the Event code.
|
* Constraint on the Event code.
|
||||||
*/
|
*/
|
||||||
#define INTEL_EVENT_CONSTRAINT(c, n) \
|
#define INTEL_EVENT_CONSTRAINT(c, n) \
|
||||||
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
|
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Constraint on the Event code + UMask + fixed-mask
|
* Constraint on the Event code + UMask + fixed-mask
|
||||||
|
*
|
||||||
|
* filter mask to validate fixed counter events.
|
||||||
|
* the following filters disqualify for fixed counters:
|
||||||
|
* - inv
|
||||||
|
* - edge
|
||||||
|
* - cnt-mask
|
||||||
|
* The other filters are supported by fixed counters.
|
||||||
|
* The any-thread option is supported starting with v3.
|
||||||
*/
|
*/
|
||||||
#define FIXED_EVENT_CONSTRAINT(c, n) \
|
#define FIXED_EVENT_CONSTRAINT(c, n) \
|
||||||
EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
|
EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Constraint on the Event code + UMask
|
* Constraint on the Event code + UMask
|
||||||
@ -437,6 +445,11 @@ static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 x86_pmu_raw_event(u64 hw_event)
|
||||||
|
{
|
||||||
|
return hw_event & X86_RAW_EVENT_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup the hardware configuration for a given attr_type
|
* Setup the hardware configuration for a given attr_type
|
||||||
*/
|
*/
|
||||||
@ -1427,7 +1440,7 @@ void __init init_hw_perf_events(void)
|
|||||||
|
|
||||||
if (x86_pmu.event_constraints) {
|
if (x86_pmu.event_constraints) {
|
||||||
for_each_event_constraint(c, x86_pmu.event_constraints) {
|
for_each_event_constraint(c, x86_pmu.event_constraints) {
|
||||||
if (c->cmask != INTEL_ARCH_FIXED_MASK)
|
if (c->cmask != X86_RAW_EVENT_MASK)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
|
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
|
||||||
|
@ -113,20 +113,7 @@ static u64 amd_pmu_event_map(int hw_event)
|
|||||||
|
|
||||||
static u64 amd_pmu_raw_event(u64 hw_event)
|
static u64 amd_pmu_raw_event(u64 hw_event)
|
||||||
{
|
{
|
||||||
#define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL
|
return hw_event & AMD64_RAW_EVENT_MASK;
|
||||||
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
|
|
||||||
#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
|
|
||||||
#define K7_EVNTSEL_INV_MASK 0x000800000ULL
|
|
||||||
#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
|
|
||||||
|
|
||||||
#define K7_EVNTSEL_MASK \
|
|
||||||
(K7_EVNTSEL_EVENT_MASK | \
|
|
||||||
K7_EVNTSEL_UNIT_MASK | \
|
|
||||||
K7_EVNTSEL_EDGE_MASK | \
|
|
||||||
K7_EVNTSEL_INV_MASK | \
|
|
||||||
K7_EVNTSEL_REG_MASK)
|
|
||||||
|
|
||||||
return hw_event & K7_EVNTSEL_MASK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -452,24 +452,6 @@ static __initconst u64 atom_hw_cache_event_ids
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 intel_pmu_raw_event(u64 hw_event)
|
|
||||||
{
|
|
||||||
#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
|
|
||||||
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
|
||||||
#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
|
|
||||||
#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
|
|
||||||
#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
|
|
||||||
|
|
||||||
#define CORE_EVNTSEL_MASK \
|
|
||||||
(INTEL_ARCH_EVTSEL_MASK | \
|
|
||||||
INTEL_ARCH_UNIT_MASK | \
|
|
||||||
INTEL_ARCH_EDGE_MASK | \
|
|
||||||
INTEL_ARCH_INV_MASK | \
|
|
||||||
INTEL_ARCH_CNT_MASK)
|
|
||||||
|
|
||||||
return hw_event & CORE_EVNTSEL_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void intel_pmu_disable_all(void)
|
static void intel_pmu_disable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
@ -788,7 +770,7 @@ static __initconst struct x86_pmu core_pmu = {
|
|||||||
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
||||||
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
||||||
.event_map = intel_pmu_event_map,
|
.event_map = intel_pmu_event_map,
|
||||||
.raw_event = intel_pmu_raw_event,
|
.raw_event = x86_pmu_raw_event,
|
||||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||||
.apic = 1,
|
.apic = 1,
|
||||||
/*
|
/*
|
||||||
@ -827,7 +809,7 @@ static __initconst struct x86_pmu intel_pmu = {
|
|||||||
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
||||||
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
||||||
.event_map = intel_pmu_event_map,
|
.event_map = intel_pmu_event_map,
|
||||||
.raw_event = intel_pmu_raw_event,
|
.raw_event = x86_pmu_raw_event,
|
||||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||||
.apic = 1,
|
.apic = 1,
|
||||||
/*
|
/*
|
||||||
|
@ -27,24 +27,6 @@ static u64 p6_pmu_event_map(int hw_event)
|
|||||||
*/
|
*/
|
||||||
#define P6_NOP_EVENT 0x0000002EULL
|
#define P6_NOP_EVENT 0x0000002EULL
|
||||||
|
|
||||||
static u64 p6_pmu_raw_event(u64 hw_event)
|
|
||||||
{
|
|
||||||
#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
|
|
||||||
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
|
||||||
#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
|
|
||||||
#define P6_EVNTSEL_INV_MASK 0x00800000ULL
|
|
||||||
#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
|
|
||||||
|
|
||||||
#define P6_EVNTSEL_MASK \
|
|
||||||
(P6_EVNTSEL_EVENT_MASK | \
|
|
||||||
P6_EVNTSEL_UNIT_MASK | \
|
|
||||||
P6_EVNTSEL_EDGE_MASK | \
|
|
||||||
P6_EVNTSEL_INV_MASK | \
|
|
||||||
P6_EVNTSEL_REG_MASK)
|
|
||||||
|
|
||||||
return hw_event & P6_EVNTSEL_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct event_constraint p6_event_constraints[] =
|
static struct event_constraint p6_event_constraints[] =
|
||||||
{
|
{
|
||||||
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
|
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
|
||||||
@ -114,7 +96,7 @@ static __initconst struct x86_pmu p6_pmu = {
|
|||||||
.eventsel = MSR_P6_EVNTSEL0,
|
.eventsel = MSR_P6_EVNTSEL0,
|
||||||
.perfctr = MSR_P6_PERFCTR0,
|
.perfctr = MSR_P6_PERFCTR0,
|
||||||
.event_map = p6_pmu_event_map,
|
.event_map = p6_pmu_event_map,
|
||||||
.raw_event = p6_pmu_raw_event,
|
.raw_event = x86_pmu_raw_event,
|
||||||
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
||||||
.apic = 1,
|
.apic = 1,
|
||||||
.max_period = (1ULL << 31) - 1,
|
.max_period = (1ULL << 31) - 1,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user