perf parse-events: Remove now unused hybrid logic

The event parser no longer needs to recurse in case of a legacy cache
event in a PMU, the necessary wild card logic has moved to
perf_pmu__supports_legacy_cache and
perf_pmu__supports_wildcard_numeric.

Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Caleb Biggers <caleb.biggers@intel.com>
Cc: Edward Baker <edward.baker@intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kang Minchul <tegongkang@gmail.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Perry Taylor <perry.taylor@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Samantha Alt <samantha.alt@intel.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Tiezhu Yang <yangtiezhu@loongson.cn>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: Yang Jihong <yangjihong1@huawei.com>
Link: https://lore.kernel.org/r/20230502223851.2234828-28-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Ian Rogers 2023-05-02 15:38:34 -07:00 committed by Arnaldo Carvalho de Melo
parent 8bc75f699c
commit 996e54bbee
5 changed files with 0 additions and 282 deletions

View File

@ -24,7 +24,6 @@ perf-y += llvm-utils.o
perf-y += mmap.o
perf-y += memswap.o
perf-y += parse-events.o
perf-y += parse-events-hybrid.o
perf-y += print-events.o
perf-y += tracepoint.o
perf-y += perf_regs.o

View File

@ -1,181 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
#include <linux/zalloc.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/param.h>
#include "evlist.h"
#include "evsel.h"
#include "parse-events.h"
#include "parse-events-hybrid.h"
#include "debug.h"
#include "pmu.h"
#include "pmu-hybrid.h"
#include "perf.h"
static void config_hybrid_attr(struct perf_event_attr *attr,
int type, int pmu_type)
{
/*
* attr.config layout for type PERF_TYPE_HARDWARE and
* PERF_TYPE_HW_CACHE
*
* PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
* AA: hardware event ID
* EEEEEEEE: PMU type ID
* PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
* BB: hardware cache ID
* CC: hardware cache op ID
* DD: hardware cache op result ID
* EEEEEEEE: PMU type ID
* If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
*/
attr->type = type;
attr->config = (attr->config & PERF_HW_EVENT_MASK) |
((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
}
static int create_event_hybrid(__u32 config_type, int *idx,
struct list_head *list,
struct perf_event_attr *attr, const char *name,
const char *metric_id,
struct list_head *config_terms,
struct perf_pmu *pmu)
{
struct evsel *evsel;
__u32 type = attr->type;
__u64 config = attr->config;
config_hybrid_attr(attr, config_type, pmu->type);
/*
* Some hybrid hardware cache events are only available on one CPU
* PMU. For example, the 'L1-dcache-load-misses' is only available
* on cpu_core, while the 'L1-icache-loads' is only available on
* cpu_atom. We need to remove "not supported" hybrid cache events.
*/
if (attr->type == PERF_TYPE_HW_CACHE
&& !is_event_supported(attr->type, attr->config))
return 0;
evsel = parse_events__add_event_hybrid(list, idx, attr, name, metric_id,
pmu, config_terms);
if (evsel) {
evsel->pmu_name = strdup(pmu->name);
if (!evsel->pmu_name)
return -ENOMEM;
} else
return -ENOMEM;
attr->type = type;
attr->config = config;
return 0;
}
static int pmu_cmp(struct parse_events_state *parse_state,
struct perf_pmu *pmu)
{
if (parse_state->evlist && parse_state->evlist->hybrid_pmu_name)
return strcmp(parse_state->evlist->hybrid_pmu_name, pmu->name);
if (parse_state->hybrid_pmu_name)
return strcmp(parse_state->hybrid_pmu_name, pmu->name);
return 0;
}
static int add_hw_hybrid(struct parse_events_state *parse_state,
struct list_head *list, struct perf_event_attr *attr,
const char *name, const char *metric_id,
struct list_head *config_terms)
{
struct perf_pmu *pmu;
int ret;
perf_pmu__for_each_hybrid_pmu(pmu) {
LIST_HEAD(terms);
if (pmu_cmp(parse_state, pmu))
continue;
copy_config_terms(&terms, config_terms);
ret = create_event_hybrid(PERF_TYPE_HARDWARE,
&parse_state->idx, list, attr, name,
metric_id, &terms, pmu);
free_config_terms(&terms);
if (ret)
return ret;
}
return 0;
}
static int create_raw_event_hybrid(int *idx, struct list_head *list,
struct perf_event_attr *attr,
const char *name,
const char *metric_id,
struct list_head *config_terms,
struct perf_pmu *pmu)
{
struct evsel *evsel;
attr->type = pmu->type;
evsel = parse_events__add_event_hybrid(list, idx, attr, name, metric_id,
pmu, config_terms);
if (evsel)
evsel->pmu_name = strdup(pmu->name);
else
return -ENOMEM;
return 0;
}
static int add_raw_hybrid(struct parse_events_state *parse_state,
struct list_head *list, struct perf_event_attr *attr,
const char *name, const char *metric_id,
struct list_head *config_terms)
{
struct perf_pmu *pmu;
int ret;
perf_pmu__for_each_hybrid_pmu(pmu) {
LIST_HEAD(terms);
if (pmu_cmp(parse_state, pmu))
continue;
copy_config_terms(&terms, config_terms);
ret = create_raw_event_hybrid(&parse_state->idx, list, attr,
name, metric_id, &terms, pmu);
free_config_terms(&terms);
if (ret)
return ret;
}
return 0;
}
int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_event_attr *attr,
const char *name, const char *metric_id,
struct list_head *config_terms,
bool *hybrid)
{
*hybrid = false;
if (attr->type == PERF_TYPE_SOFTWARE)
return 0;
if (!perf_pmu__has_hybrid())
return 0;
*hybrid = true;
if (attr->type != PERF_TYPE_RAW) {
return add_hw_hybrid(parse_state, list, attr, name, metric_id,
config_terms);
}
return add_raw_hybrid(parse_state, list, attr, name, metric_id,
config_terms);
}

View File

@ -1,18 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PERF_PARSE_EVENTS_HYBRID_H
#define __PERF_PARSE_EVENTS_HYBRID_H
#include <linux/list.h>
#include <stdbool.h>
#include <linux/types.h>
#include <linux/perf_event.h>
#include <string.h>
int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_event_attr *attr,
const char *name, const char *metric_id,
struct list_head *config_terms,
bool *hybrid);
#endif /* __PERF_PARSE_EVENTS_HYBRID_H */

View File

@ -25,7 +25,6 @@
#include "util/parse-branch-options.h"
#include "util/evsel_config.h"
#include "util/event.h"
#include "util/pmu-hybrid.h"
#include "util/bpf-filter.h"
#include "util/util.h"
#include "tracepoint.h"
@ -39,9 +38,6 @@ extern int parse_events_debug;
int parse_events_parse(void *parse_state, void *scanner);
static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused);
static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
const char *str, char *pmu_name,
struct list_head *list);
struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = {
@ -1526,33 +1522,6 @@ static bool config_term_percore(struct list_head *config_terms)
return false;
}
static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state,
struct list_head *list, char *name,
struct list_head *head_config)
{
struct parse_events_term *term;
int ret = -1;
if (parse_state->fake_pmu || !head_config || list_empty(head_config) ||
!perf_pmu__is_hybrid(name)) {
return -1;
}
/*
* More than one term in list.
*/
if (head_config->next && head_config->next->next != head_config)
return -1;
term = list_first_entry(head_config, struct parse_events_term, list);
if (term && term->config && strcmp(term->config, "event")) {
ret = parse_events__with_hybrid_pmu(parse_state, term->config,
name, list);
}
return ret;
}
int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, char *name,
struct list_head *head_config,
@ -1642,11 +1611,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
return -ENOMEM;
if (!parse_events__inside_hybrid_pmu(parse_state, list, name,
head_config)) {
return 0;
}
if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
free_config_terms(&config_terms);
return -EINVAL;
@ -2023,32 +1987,6 @@ int parse_events_terms(struct list_head *terms, const char *str)
return ret;
}
static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
const char *str, char *pmu_name,
struct list_head *list)
{
struct parse_events_state ps = {
.list = LIST_HEAD_INIT(ps.list),
.stoken = PE_START_EVENTS,
.hybrid_pmu_name = pmu_name,
.idx = parse_state->idx,
};
int ret;
ret = parse_events__scanner(str, &ps);
if (!ret) {
if (!list_empty(&ps.list)) {
list_splice(&ps.list, list);
parse_state->idx = ps.idx;
return 0;
} else
return -1;
}
return ret;
}
__weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
{
/* Order by insertion index. */
@ -2779,15 +2717,3 @@ char *parse_events_formats_error_string(char *additional_terms)
fail:
return NULL;
}
struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
struct perf_event_attr *attr,
const char *name,
const char *metric_id,
struct perf_pmu *pmu,
struct list_head *config_terms)
{
return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id,
pmu, config_terms, /*auto_merge_stats=*/false,
/*cpu_list=*/NULL);
}

View File

@ -122,7 +122,6 @@ struct parse_events_state {
struct list_head *terms;
int stoken;
struct perf_pmu *fake_pmu;
char *hybrid_pmu_name;
/* Should PE_LEGACY_NAME tokens be generated for config terms? */
bool match_legacy_cache_terms;
bool wild_card_pmus;
@ -235,11 +234,4 @@ static inline bool is_sdt_event(char *str __maybe_unused)
}
#endif /* HAVE_LIBELF_SUPPORT */
struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
struct perf_event_attr *attr,
const char *name,
const char *metric_id,
struct perf_pmu *pmu,
struct list_head *config_terms);
#endif /* __PERF_PARSE_EVENTS_H */