perf/x86: Hybrid PMU support for event constraints
The events are different among hybrid PMUs. Each hybrid PMU should use its own event constraints. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Andi Kleen <ak@linux.intel.com> Link: https://lkml.kernel.org/r/1618237865-33448-10-git-send-email-kan.liang@linux.intel.com
This commit is contained in:
parent
0d18f2dfea
commit
24ee38ffe6
@ -1518,6 +1518,7 @@ void perf_event_print_debug(void)
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
int num_counters = hybrid(cpuc->pmu, num_counters);
|
||||
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
|
||||
struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
|
||||
unsigned long flags;
|
||||
int idx;
|
||||
|
||||
@ -1537,7 +1538,7 @@ void perf_event_print_debug(void)
|
||||
pr_info("CPU#%d: status: %016llx\n", cpu, status);
|
||||
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
|
||||
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
|
||||
if (x86_pmu.pebs_constraints) {
|
||||
if (pebs_constraints) {
|
||||
rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
|
||||
pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
|
||||
}
|
||||
|
@ -3136,10 +3136,11 @@ struct event_constraint *
|
||||
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
|
||||
struct event_constraint *c;
|
||||
|
||||
if (x86_pmu.event_constraints) {
|
||||
for_each_event_constraint(c, x86_pmu.event_constraints) {
|
||||
if (event_constraints) {
|
||||
for_each_event_constraint(c, event_constraints) {
|
||||
if (constraint_match(c, event->hw.config)) {
|
||||
event->hw.flags |= c->flags;
|
||||
return c;
|
||||
|
@ -959,13 +959,14 @@ struct event_constraint intel_spr_pebs_event_constraints[] = {
|
||||
|
||||
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
|
||||
{
|
||||
struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);
|
||||
struct event_constraint *c;
|
||||
|
||||
if (!event->attr.precise_ip)
|
||||
return NULL;
|
||||
|
||||
if (x86_pmu.pebs_constraints) {
|
||||
for_each_event_constraint(c, x86_pmu.pebs_constraints) {
|
||||
if (pebs_constraints) {
|
||||
for_each_event_constraint(c, pebs_constraints) {
|
||||
if (constraint_match(c, event->hw.config)) {
|
||||
event->hw.flags |= c->flags;
|
||||
return c;
|
||||
|
@ -648,6 +648,8 @@ struct x86_hybrid_pmu {
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
||||
struct event_constraint *event_constraints;
|
||||
struct event_constraint *pebs_constraints;
|
||||
};
|
||||
|
||||
static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
|
||||
|
Loading…
x
Reference in New Issue
Block a user