99fc5941b8
A config terms list was spliced twice, resulting in a never-ending loop when the list was traversed. Fix by using list_splice_init() and copying and freeing the lists as necessary. This patch also depends on patch "perf tools: Factor out copy_config_terms() and free_config_terms()" Example on ADL: Before: # perf record -e '{intel_pt//,cycles/aux-sample-size=4096/pp}' uname & # jobs [1]+ Running perf record -e "{intel_pt//,cycles/aux-sample-size=4096/pp}" uname # perf top -E 10 PerfTop: 4071 irqs/sec kernel: 6.9% exact: 100.0% lost: 0/0 drop: 0/0 [4000Hz cycles], (all, 24 CPUs) --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 97.60% perf [.] __evsel__get_config_term 0.25% [kernel] [k] kallsyms_expand_symbol.constprop.13 0.24% perf [.] kallsyms__parse 0.15% [kernel] [k] _raw_spin_lock 0.14% [kernel] [k] number 0.13% [kernel] [k] advance_transaction 0.08% [kernel] [k] format_decode 0.08% perf [.] map__process_kallsym_symbol 0.08% perf [.] rb_insert_color 0.08% [kernel] [k] vsnprintf exiting. # kill %1 After: # perf record -e '{intel_pt//,cycles/aux-sample-size=4096/pp}' uname & Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.060 MB perf.data ] # perf script | head perf-exec 604 [001] 1827.312293: psb: psb offs: 0 ffffffffb8415e87 pt_config_start+0x37 ([kernel.kallsyms]) perf-exec 604 1827.312293: 1 branches: ffffffffb856a3bd event_sched_in.isra.133+0xfd ([kernel.kallsyms]) => ffffffffb856a9a0 perf_pmu_nop_void+0x0 ([kernel.kallsyms]) perf-exec 604 1827.312293: 1 branches: ffffffffb856b10e merge_sched_in+0x26e ([kernel.kallsyms]) => ffffffffb856a2c0 event_sched_in.isra.133+0x0 ([kernel.kallsyms]) perf-exec 604 1827.312293: 1 branches: ffffffffb856a45d event_sched_in.isra.133+0x19d ([kernel.kallsyms]) => ffffffffb8568b80 perf_event_set_state.part.61+0x0 ([kernel.kallsyms]) perf-exec 604 1827.312293: 1 branches: ffffffffb8568b86 perf_event_set_state.part.61+0x6 ([kernel.kallsyms]) => ffffffffb85662a0 perf_event_update_time+0x0 ([kernel.kallsyms]) perf-exec 604 1827.312293: 1 branches: ffffffffb856a35c event_sched_in.isra.133+0x9c ([kernel.kallsyms]) => ffffffffb8567610 perf_log_itrace_start+0x0 ([kernel.kallsyms]) perf-exec 604 1827.312293: 1 branches: ffffffffb856a377 event_sched_in.isra.133+0xb7 ([kernel.kallsyms]) => ffffffffb8403b40 x86_pmu_add+0x0 ([kernel.kallsyms]) perf-exec 604 1827.312293: 1 branches: ffffffffb8403b86 x86_pmu_add+0x46 ([kernel.kallsyms]) => ffffffffb8403940 collect_events+0x0 ([kernel.kallsyms]) perf-exec 604 1827.312293: 1 branches: ffffffffb8403a7b collect_events+0x13b ([kernel.kallsyms]) => ffffffffb8402cd0 collect_event+0x0 ([kernel.kallsyms]) Fixes:30def61f64
("perf parse-events Create two hybrid cache events") Fixes:94da591b1c
("perf parse-events Create two hybrid raw events") Fixes:9cbfa2f64c
("perf parse-events Create two hybrid hardware events") Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Acked-by: Jiri Olsa <jolsa@redhat.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Kan Liang <kan.liang@linux.intel.com> Link: https //lore.kernel.org/r/20210909125508.28693-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
191 lines
4.6 KiB
C
191 lines
4.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/err.h>
|
|
#include <linux/zalloc.h>
|
|
#include <errno.h>
|
|
#include <sys/types.h>
|
|
#include <sys/stat.h>
|
|
#include <fcntl.h>
|
|
#include <sys/param.h>
|
|
#include "evlist.h"
|
|
#include "evsel.h"
|
|
#include "parse-events.h"
|
|
#include "parse-events-hybrid.h"
|
|
#include "debug.h"
|
|
#include "pmu.h"
|
|
#include "pmu-hybrid.h"
|
|
#include "perf.h"
|
|
|
|
static void config_hybrid_attr(struct perf_event_attr *attr,
|
|
int type, int pmu_type)
|
|
{
|
|
/*
|
|
* attr.config layout for type PERF_TYPE_HARDWARE and
|
|
* PERF_TYPE_HW_CACHE
|
|
*
|
|
* PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
|
|
* AA: hardware event ID
|
|
* EEEEEEEE: PMU type ID
|
|
* PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
|
|
* BB: hardware cache ID
|
|
* CC: hardware cache op ID
|
|
* DD: hardware cache op result ID
|
|
* EEEEEEEE: PMU type ID
|
|
* If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
|
|
*/
|
|
attr->type = type;
|
|
attr->config = attr->config | ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
|
|
}
|
|
|
|
static int create_event_hybrid(__u32 config_type, int *idx,
|
|
struct list_head *list,
|
|
struct perf_event_attr *attr, char *name,
|
|
struct list_head *config_terms,
|
|
struct perf_pmu *pmu)
|
|
{
|
|
struct evsel *evsel;
|
|
__u32 type = attr->type;
|
|
__u64 config = attr->config;
|
|
|
|
config_hybrid_attr(attr, config_type, pmu->type);
|
|
evsel = parse_events__add_event_hybrid(list, idx, attr, name,
|
|
pmu, config_terms);
|
|
if (evsel)
|
|
evsel->pmu_name = strdup(pmu->name);
|
|
else
|
|
return -ENOMEM;
|
|
|
|
attr->type = type;
|
|
attr->config = config;
|
|
return 0;
|
|
}
|
|
|
|
static int pmu_cmp(struct parse_events_state *parse_state,
|
|
struct perf_pmu *pmu)
|
|
{
|
|
if (!parse_state->hybrid_pmu_name)
|
|
return 0;
|
|
|
|
return strcmp(parse_state->hybrid_pmu_name, pmu->name);
|
|
}
|
|
|
|
static int add_hw_hybrid(struct parse_events_state *parse_state,
|
|
struct list_head *list, struct perf_event_attr *attr,
|
|
char *name, struct list_head *config_terms)
|
|
{
|
|
struct perf_pmu *pmu;
|
|
int ret;
|
|
|
|
perf_pmu__for_each_hybrid_pmu(pmu) {
|
|
LIST_HEAD(terms);
|
|
|
|
if (pmu_cmp(parse_state, pmu))
|
|
continue;
|
|
|
|
copy_config_terms(&terms, config_terms);
|
|
ret = create_event_hybrid(PERF_TYPE_HARDWARE,
|
|
&parse_state->idx, list, attr, name,
|
|
&terms, pmu);
|
|
free_config_terms(&terms);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int create_raw_event_hybrid(int *idx, struct list_head *list,
|
|
struct perf_event_attr *attr, char *name,
|
|
struct list_head *config_terms,
|
|
struct perf_pmu *pmu)
|
|
{
|
|
struct evsel *evsel;
|
|
|
|
attr->type = pmu->type;
|
|
evsel = parse_events__add_event_hybrid(list, idx, attr, name,
|
|
pmu, config_terms);
|
|
if (evsel)
|
|
evsel->pmu_name = strdup(pmu->name);
|
|
else
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int add_raw_hybrid(struct parse_events_state *parse_state,
|
|
struct list_head *list, struct perf_event_attr *attr,
|
|
char *name, struct list_head *config_terms)
|
|
{
|
|
struct perf_pmu *pmu;
|
|
int ret;
|
|
|
|
perf_pmu__for_each_hybrid_pmu(pmu) {
|
|
LIST_HEAD(terms);
|
|
|
|
if (pmu_cmp(parse_state, pmu))
|
|
continue;
|
|
|
|
copy_config_terms(&terms, config_terms);
|
|
ret = create_raw_event_hybrid(&parse_state->idx, list, attr,
|
|
name, &terms, pmu);
|
|
free_config_terms(&terms);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
|
|
struct list_head *list,
|
|
struct perf_event_attr *attr,
|
|
char *name, struct list_head *config_terms,
|
|
bool *hybrid)
|
|
{
|
|
*hybrid = false;
|
|
if (attr->type == PERF_TYPE_SOFTWARE)
|
|
return 0;
|
|
|
|
if (!perf_pmu__has_hybrid())
|
|
return 0;
|
|
|
|
*hybrid = true;
|
|
if (attr->type != PERF_TYPE_RAW) {
|
|
return add_hw_hybrid(parse_state, list, attr, name,
|
|
config_terms);
|
|
}
|
|
|
|
return add_raw_hybrid(parse_state, list, attr, name,
|
|
config_terms);
|
|
}
|
|
|
|
int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
|
|
struct perf_event_attr *attr, char *name,
|
|
struct list_head *config_terms,
|
|
bool *hybrid,
|
|
struct parse_events_state *parse_state)
|
|
{
|
|
struct perf_pmu *pmu;
|
|
int ret;
|
|
|
|
*hybrid = false;
|
|
if (!perf_pmu__has_hybrid())
|
|
return 0;
|
|
|
|
*hybrid = true;
|
|
perf_pmu__for_each_hybrid_pmu(pmu) {
|
|
LIST_HEAD(terms);
|
|
|
|
if (pmu_cmp(parse_state, pmu))
|
|
continue;
|
|
|
|
copy_config_terms(&terms, config_terms);
|
|
ret = create_event_hybrid(PERF_TYPE_HW_CACHE, idx, list,
|
|
attr, name, &terms, pmu);
|
|
free_config_terms(&terms);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|