perf/x86: Hybrid PMU support for unconstrained
The unconstrained value depends on the number of GP and fixed counters. Each hybrid PMU should use its own unconstrained. Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/1618237865-33448-8-git-send-email-kan.liang@linux.intel.com
This commit is contained in:
parent
d4b294bf84
commit
eaacf07d11
@ -3147,7 +3147,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
||||
}
|
||||
}
|
||||
|
||||
return &unconstrained;
|
||||
return &hybrid_var(cpuc->pmu, unconstrained);
|
||||
}
|
||||
|
||||
static struct event_constraint *
|
||||
|
@ -638,6 +638,7 @@ struct x86_hybrid_pmu {
|
||||
int max_pebs_events;
|
||||
int num_counters;
|
||||
int num_counters_fixed;
|
||||
struct event_constraint unconstrained;
|
||||
};
|
||||
|
||||
static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
|
||||
@ -658,6 +659,16 @@ extern struct static_key_false perf_is_hybrid;
|
||||
__Fp; \
|
||||
}))
|
||||
|
||||
#define hybrid_var(_pmu, _var) \
|
||||
(*({ \
|
||||
typeof(&_var) __Fp = &_var; \
|
||||
\
|
||||
if (is_hybrid() && (_pmu)) \
|
||||
__Fp = &hybrid_pmu(_pmu)->_var; \
|
||||
\
|
||||
__Fp; \
|
||||
}))
|
||||
|
||||
/*
|
||||
* struct x86_pmu - generic x86 pmu
|
||||
*/
|
||||
|
Loading…
x
Reference in New Issue
Block a user