perf/x86: Hybrid PMU support for unconstrained

The unconstrained value depends on the number of GP and fixed counters.
Each hybrid PMU should use its own unconstrained.

Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1618237865-33448-8-git-send-email-kan.liang@linux.intel.com
This commit is contained in:
Kan Liang 2021-04-12 07:30:47 -07:00 committed by Peter Zijlstra
parent d4b294bf84
commit eaacf07d11
2 changed files with 12 additions and 1 deletions

View File

@ -3147,7 +3147,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
}
}
return &unconstrained;
return &hybrid_var(cpuc->pmu, unconstrained);
}
static struct event_constraint *

View File

@ -638,6 +638,7 @@ struct x86_hybrid_pmu {
int max_pebs_events;
int num_counters;
int num_counters_fixed;
struct event_constraint unconstrained;
};
static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
@ -658,6 +659,16 @@ extern struct static_key_false perf_is_hybrid;
__Fp; \
}))
#define hybrid_var(_pmu, _var) \
(*({ \
typeof(&_var) __Fp = &_var; \
\
if (is_hybrid() && (_pmu)) \
__Fp = &hybrid_pmu(_pmu)->_var; \
\
__Fp; \
}))
/*
* struct x86_pmu - generic x86 pmu
*/