perf/core improvements and fixes for 4.14:

New features:
 
 - Add PERF_SAMPLE_CALLCHAIN and PERF_RECORD_MMAP[2] to 'perf data' CTF
   conversion, allowing CTF trace visualization tools to show callchains
   and to resolve symbols (Geneviève Bastien)
 
 Improvements:
 
 - Use group read for event groups in 'perf stat', reducing overhead when
   groups are defined in the event specification, i.e. when using {} to
   enclose a list of events, asking them to be read at the same time,
   e.g.: "perf stat -e '{cycles,instructions}'" (Jiri Olsa)
 
 Fixes:
 
 - Do not overwrite perf_sample->weight in 'perf annotate' when
   processing samples, use whatever came from the kernel when
   perf_event_attr.sample_type has PERF_SAMPLE_WEIGHT set or just handle
   its default value, 0, when that is not set and "weight" is one of the
   sort orders chosen (Arnaldo Carvalho de Melo)
 
 - 'perf annotate --show-total-period' fixes:
    - TUI should show period, not nr_samples
    - Set appropriate column width for period/percent
    - Fix the column header to show "Period" when when that is what
      is being asked for
   (Taeung Song, Arnaldo Carvalho de Melo)
 
 - Use default sort if evlist is empty, fixing pipe mode (David Carrillo-Cisneros)
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABCAAGBQJZe5Q0AAoJENZQFvNTUqpAlKIP/0UlNkMoJUlONy6zQhyCWHe5
 JpsFoLr/7NlBLVj0kTm8w2aURkmsS3FDWDeOiGmSd+W52HzHjbR6KZzgbxOrQGhk
 tNEQaxeoJYBkk/T1Blp0PVGCsZnn933KUnmo9XRMrb5iTA9YZLQ4k4QDAu+xuiEB
 r0uPrJQ7XY9NmJFaMCrhQkQI6FeAyL5MQw9OozbWQdMOJGJS0BAmjZ+3QmiXSQjr
 0qBVn9hi9CqBbKVyxzApVG4yi8VZSjKt9HP40WvSAE5ObC12VjA0Xe651tm5f2G3
 RN52tAP11S+PfaRJU1n0FtmagJQWhpbBsQpjp7Szi3KelEqPH0HlJ0PD5TDxD8Vz
 qEkaGbuVLx8mlMiZv6s/cu5bWE5lGJrDSMkIgBydmNj4QUB/9dmGwPMzYvTZwNr4
 fIKE+EXrDGIWGMObtZ57+sb0auQR4DluFTwamh3brUDLOuqOEG13qlw34Gs5ep8B
 8yLeTios8bqVuCghVPpPUaGj/fwpjPudQ8l0+jXFsEsnxspZh6qbE0iEncoz8WWc
 C14pKYbqI2vwRd6Duhlq8uCQIXwb5EHUmA/B79IdhvL5R37AH2kynBY9tLAagwpj
 oscnaORCf/wOAH9yUBztelqdEnrA0D77KmJSkTHLTmCHMgdjOh/mkGeptpsOGZWu
 pmZKKdEsgQMbHeq7ntdb
 =sXpq
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo-4.14-20170728' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes for 4.14 from Arnaldo Carvalho de Melo:

New features:

 - Add PERF_SAMPLE_CALLCHAIN and PERF_RECORD_MMAP[2] to 'perf data' CTF
   conversion, allowing CTF trace visualization tools to show callchains
   and to resolve symbols (Geneviève Bastien)

Improvements:

 - Use group read for event groups in 'perf stat', reducing overhead when
   groups are defined in the event specification, i.e. when using {} to
   enclose a list of events, asking them to be read at the same time,
   e.g.: "perf stat -e '{cycles,instructions}'" (Jiri Olsa)

Fixes:

 - Do not overwrite perf_sample->weight in 'perf annotate' when
   processing samples, use whatever came from the kernel when
   perf_event_attr.sample_type has PERF_SAMPLE_WEIGHT set or just handle
   its default value, 0, when that is not set and "weight" is one of the
   sort orders chosen (Arnaldo Carvalho de Melo)

 - 'perf annotate --show-total-period' fixes:
    - TUI should show period, not nr_samples
    - Set appropriate column width for period/percent
    - Fix the column header to show "Period" when when that is what
      is being asked for
   (Taeung Song, Arnaldo Carvalho de Melo)

 - Use default sort if evlist is empty, fixing pipe mode (David Carrillo-Cisneros)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2017-07-30 11:15:37 +02:00
commit c3a3800fe4
13 changed files with 334 additions and 32 deletions

View File

@ -177,8 +177,6 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
*/
process_branch_stack(sample->branch_stack, al, sample);
sample->weight = 1;
he = hists__add_entry(hists, al, NULL, NULL, NULL, sample, true);
if (he == NULL)
return -ENOMEM;

View File

@ -69,7 +69,7 @@ static int cmd_data_convert(int argc, const char **argv)
};
#ifndef HAVE_LIBBABELTRACE_SUPPORT
pr_err("No conversion support compiled in.\n");
pr_err("No conversion support compiled in. perf should be compiled with environment variables LIBBABELTRACE=1 and LIBBABELTRACE_DIR=/path/to/libbabeltrace/\n");
return -1;
#endif

View File

@ -213,10 +213,20 @@ static void perf_stat__reset_stats(void)
static int create_perf_stat_counter(struct perf_evsel *evsel)
{
struct perf_event_attr *attr = &evsel->attr;
struct perf_evsel *leader = evsel->leader;
if (stat_config.scale)
if (stat_config.scale) {
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
}
/*
* The event is part of non trivial group, let's enable
* the group read (for leader) and ID retrieval for all
* members.
*/
if (leader->nr_members > 1)
attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
attr->inherit = !no_inherit;
@ -333,13 +343,21 @@ static int read_counter(struct perf_evsel *counter)
struct perf_counts_values *count;
count = perf_counts(counter->counts, cpu, thread);
if (perf_evsel__read(counter, cpu, thread, count)) {
/*
* The leader's group read loads data into its group members
* (via perf_evsel__read_counter) and sets threir count->loaded.
*/
if (!count->loaded &&
perf_evsel__read_counter(counter, cpu, thread)) {
counter->counts->scaled = -1;
perf_counts(counter->counts, cpu, thread)->ena = 0;
perf_counts(counter->counts, cpu, thread)->run = 0;
return -1;
}
count->loaded = false;
if (STAT_RECORD) {
if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
pr_err("failed to write stat event\n");
@ -559,6 +577,11 @@ static int store_counter_ids(struct perf_evsel *counter)
return __store_counter_ids(counter, cpus, threads);
}
static bool perf_evsel__should_store_id(struct perf_evsel *counter)
{
return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
}
static int __run_perf_stat(int argc, const char **argv)
{
int interval = stat_config.interval;
@ -631,7 +654,8 @@ try_again:
if (l > unit_width)
unit_width = l;
if (STAT_RECORD && store_counter_ids(counter))
if (perf_evsel__should_store_id(counter) &&
store_counter_ids(counter))
return -1;
}

View File

@ -17,8 +17,8 @@
#include <sys/ttydefaults.h>
struct disasm_line_samples {
double percent;
u64 nr;
double percent;
struct sym_hist_entry he;
};
#define IPC_WIDTH 6
@ -110,11 +110,12 @@ static int annotate_browser__set_jumps_percent_color(struct annotate_browser *br
static int annotate_browser__pcnt_width(struct annotate_browser *ab)
{
int w = 7 * ab->nr_events;
return (annotate_browser__opts.show_total_period ? 12 : 7) * ab->nr_events;
}
if (ab->have_cycles)
w += IPC_WIDTH + CYCLES_WIDTH;
return w;
static int annotate_browser__cycles_width(struct annotate_browser *ab)
{
return ab->have_cycles ? IPC_WIDTH + CYCLES_WIDTH : 0;
}
static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
@ -127,7 +128,8 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
(!current_entry || (browser->use_navkeypressed &&
!browser->navkeypressed)));
int width = browser->width, printed;
int i, pcnt_width = annotate_browser__pcnt_width(ab);
int i, pcnt_width = annotate_browser__pcnt_width(ab),
cycles_width = annotate_browser__cycles_width(ab);
double percent_max = 0.0;
char bf[256];
bool show_title = false;
@ -151,8 +153,8 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
bdl->samples[i].percent,
current_entry);
if (annotate_browser__opts.show_total_period) {
ui_browser__printf(browser, "%6" PRIu64 " ",
bdl->samples[i].nr);
ui_browser__printf(browser, "%11" PRIu64 " ",
bdl->samples[i].he.period);
} else {
ui_browser__printf(browser, "%6.2f ",
bdl->samples[i].percent);
@ -162,9 +164,11 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
ui_browser__set_percent_color(browser, 0, current_entry);
if (!show_title)
ui_browser__write_nstring(browser, " ", 7 * ab->nr_events);
else
ui_browser__printf(browser, "%*s", 7, "Percent");
ui_browser__write_nstring(browser, " ", pcnt_width);
else {
ui_browser__printf(browser, "%*s", pcnt_width,
annotate_browser__opts.show_total_period ? "Period" : "Percent");
}
}
if (ab->have_cycles) {
if (dl->ipc)
@ -190,7 +194,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
width += 1;
if (!*dl->line)
ui_browser__write_nstring(browser, " ", width - pcnt_width);
ui_browser__write_nstring(browser, " ", width - pcnt_width - cycles_width);
else if (dl->offset == -1) {
if (dl->line_nr && annotate_browser__opts.show_linenr)
printed = scnprintf(bf, sizeof(bf), "%-*d ",
@ -199,7 +203,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
printed = scnprintf(bf, sizeof(bf), "%*s ",
ab->addr_width, " ");
ui_browser__write_nstring(browser, bf, printed);
ui_browser__write_nstring(browser, dl->line, width - printed - pcnt_width + 1);
ui_browser__write_nstring(browser, dl->line, width - printed - pcnt_width - cycles_width + 1);
} else {
u64 addr = dl->offset;
int color = -1;
@ -256,7 +260,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
}
disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
ui_browser__write_nstring(browser, bf, width - pcnt_width - 3 - printed);
ui_browser__write_nstring(browser, bf, width - pcnt_width - cycles_width - 3 - printed);
}
if (current_entry)
@ -457,7 +461,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
pos->offset,
next ? next->offset : len,
&path, &sample);
bpos->samples[i].nr = sample.nr_samples;
bpos->samples[i].he = sample;
if (max_percent < bpos->samples[i].percent)
max_percent = bpos->samples[i].percent;

View File

@ -963,8 +963,9 @@ double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
u64 period = 0;
while (offset < end) {
hits += h->addr[offset++].nr_samples;
period += h->addr[offset++].period;
hits += h->addr[offset].nr_samples;
period += h->addr[offset].period;
++offset;
}
if (h->nr_samples) {
@ -1142,7 +1143,7 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
color = get_percent_color(percent);
if (symbol_conf.show_total_period)
color_fprintf(stdout, color, " %7" PRIu64,
color_fprintf(stdout, color, " %11" PRIu64,
sample.period);
else
color_fprintf(stdout, color, " %7.2f", percent);
@ -1165,7 +1166,7 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
} else if (max_lines && printed >= max_lines)
return 1;
else {
int width = 8;
int width = symbol_conf.show_total_period ? 12 : 8;
if (queue)
return -1;
@ -1806,7 +1807,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
int printed = 2, queue_len = 0;
int more = 0;
u64 len;
int width = 8;
int width = symbol_conf.show_total_period ? 12 : 8;
int graph_dotted_len;
filename = strdup(dso->long_name);

View File

@ -12,6 +12,7 @@ struct perf_counts_values {
};
u64 values[3];
};
bool loaded;
};
struct perf_counts {

View File

@ -76,6 +76,8 @@ struct ctf_writer {
struct bt_ctf_event_class *comm_class;
struct bt_ctf_event_class *exit_class;
struct bt_ctf_event_class *fork_class;
struct bt_ctf_event_class *mmap_class;
struct bt_ctf_event_class *mmap2_class;
};
struct convert {
@ -506,6 +508,81 @@ put_len_type:
return ret;
}
static int
add_callchain_output_values(struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
struct ip_callchain *callchain)
{
struct bt_ctf_field_type *len_type, *seq_type;
struct bt_ctf_field *len_field, *seq_field;
unsigned int nr_elements = callchain->nr;
unsigned int i;
int ret;
len_type = bt_ctf_event_class_get_field_by_name(
event_class, "perf_callchain_size");
len_field = bt_ctf_field_create(len_type);
if (!len_field) {
pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
ret = -1;
goto put_len_type;
}
ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
if (ret) {
pr_err("failed to set field value for perf_callchain_size\n");
goto put_len_field;
}
ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
if (ret) {
pr_err("failed to set payload to perf_callchain_size\n");
goto put_len_field;
}
seq_type = bt_ctf_event_class_get_field_by_name(
event_class, "perf_callchain");
seq_field = bt_ctf_field_create(seq_type);
if (!seq_field) {
pr_err("failed to create 'perf_callchain' for callchain output event\n");
ret = -1;
goto put_seq_type;
}
ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
if (ret) {
pr_err("failed to set length of 'perf_callchain'\n");
goto put_seq_field;
}
for (i = 0; i < nr_elements; i++) {
struct bt_ctf_field *elem_field =
bt_ctf_field_sequence_get_field(seq_field, i);
ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
((u64 *)(callchain->ips))[i]);
bt_ctf_field_put(elem_field);
if (ret) {
pr_err("failed to set callchain[%d]\n", i);
goto put_seq_field;
}
}
ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
if (ret)
pr_err("failed to set payload for raw_data\n");
put_seq_field:
bt_ctf_field_put(seq_field);
put_seq_type:
bt_ctf_field_type_put(seq_type);
put_len_field:
bt_ctf_field_put(len_field);
put_len_type:
bt_ctf_field_type_put(len_type);
return ret;
}
static int add_generic_values(struct ctf_writer *cw,
struct bt_ctf_event *event,
struct perf_evsel *evsel,
@ -519,7 +596,6 @@ static int add_generic_values(struct ctf_writer *cw,
* PERF_SAMPLE_TIME - not needed as we have it in
* ctf event header
* PERF_SAMPLE_READ - TODO
* PERF_SAMPLE_CALLCHAIN - TODO
* PERF_SAMPLE_RAW - tracepoint fields are handled separately
* PERF_SAMPLE_BRANCH_STACK - TODO
* PERF_SAMPLE_REGS_USER - TODO
@ -720,6 +796,7 @@ static int process_sample_event(struct perf_tool *tool,
struct bt_ctf_event_class *event_class;
struct bt_ctf_event *event;
int ret;
unsigned long type = evsel->attr.sample_type;
if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
return 0;
@ -751,6 +828,13 @@ static int process_sample_event(struct perf_tool *tool,
return -1;
}
if (type & PERF_SAMPLE_CALLCHAIN) {
ret = add_callchain_output_values(event_class,
event, sample->callchain);
if (ret)
return -1;
}
if (perf_evsel__is_bpf_output(evsel)) {
ret = add_bpf_output_values(event_class, event, sample);
if (ret)
@ -833,6 +917,18 @@ __FUNC_PROCESS_NON_SAMPLE(exit,
__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
__NON_SAMPLE_SET_FIELD(fork, u64, time);
)
__FUNC_PROCESS_NON_SAMPLE(mmap,
__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
__NON_SAMPLE_SET_FIELD(mmap, string, filename);
)
__FUNC_PROCESS_NON_SAMPLE(mmap2,
__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
)
#undef __NON_SAMPLE_SET_FIELD
#undef __FUNC_PROCESS_NON_SAMPLE
@ -1043,6 +1139,14 @@ static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
if (type & PERF_SAMPLE_TRANSACTION)
ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
if (type & PERF_SAMPLE_CALLCHAIN) {
ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
ADD_FIELD(event_class,
bt_ctf_field_type_sequence_create(
cw->data.u64_hex, "perf_callchain_size"),
"perf_callchain");
}
#undef ADD_FIELD
return 0;
}
@ -1164,6 +1268,19 @@ __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
__NON_SAMPLE_ADD_FIELD(u64, time);
)
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
__NON_SAMPLE_ADD_FIELD(u32, pid);
__NON_SAMPLE_ADD_FIELD(u32, tid);
__NON_SAMPLE_ADD_FIELD(u64_hex, start);
__NON_SAMPLE_ADD_FIELD(string, filename);
)
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
__NON_SAMPLE_ADD_FIELD(u32, pid);
__NON_SAMPLE_ADD_FIELD(u32, tid);
__NON_SAMPLE_ADD_FIELD(u64_hex, start);
__NON_SAMPLE_ADD_FIELD(string, filename);
)
#undef __NON_SAMPLE_ADD_FIELD
#undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
@ -1179,6 +1296,12 @@ static int setup_non_sample_events(struct ctf_writer *cw,
if (ret)
return ret;
ret = add_fork_event(cw);
if (ret)
return ret;
ret = add_mmap_event(cw);
if (ret)
return ret;
ret = add_mmap2_event(cw);
if (ret)
return ret;
return 0;
@ -1482,6 +1605,8 @@ int bt_convert__perf2ctf(const char *input, const char *path,
c.tool.comm = process_comm_event;
c.tool.exit = process_exit_event;
c.tool.fork = process_fork_event;
c.tool.mmap = process_mmap_event;
c.tool.mmap2 = process_mmap2_event;
}
err = perf_config(convert__config, &c);

View File

@ -265,6 +265,11 @@ bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
struct list_head *list);
static inline bool perf_evlist__empty(struct perf_evlist *evlist)
{
return list_empty(&evlist->entries);
}
static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
{
return list_entry(evlist->entries.next, struct perf_evsel, node);

View File

@ -49,6 +49,7 @@ static struct {
bool clockid_wrong;
bool lbr_flags;
bool write_backward;
bool group_read;
} perf_missing_features;
static clockid_t clockid;
@ -1261,20 +1262,148 @@ void perf_counts_values__scale(struct perf_counts_values *count,
*pscaled = scaled;
}
static int perf_evsel__read_size(struct perf_evsel *evsel)
{
u64 read_format = evsel->attr.read_format;
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
if (read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
if (read_format & PERF_FORMAT_GROUP) {
nr = evsel->nr_members;
size += sizeof(u64);
}
size += entry * nr;
return size;
}
int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
struct perf_counts_values *count)
{
size_t size = perf_evsel__read_size(evsel);
memset(count, 0, sizeof(*count));
if (FD(evsel, cpu, thread) < 0)
return -EINVAL;
if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) <= 0)
if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
return -errno;
return 0;
}
static int
perf_evsel__read_one(struct perf_evsel *evsel, int cpu, int thread)
{
struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
return perf_evsel__read(evsel, cpu, thread, count);
}
static void
perf_evsel__set_count(struct perf_evsel *counter, int cpu, int thread,
u64 val, u64 ena, u64 run)
{
struct perf_counts_values *count;
count = perf_counts(counter->counts, cpu, thread);
count->val = val;
count->ena = ena;
count->run = run;
count->loaded = true;
}
static int
perf_evsel__process_group_data(struct perf_evsel *leader,
int cpu, int thread, u64 *data)
{
u64 read_format = leader->attr.read_format;
struct sample_read_value *v;
u64 nr, ena = 0, run = 0, i;
nr = *data++;
if (nr != (u64) leader->nr_members)
return -EINVAL;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
ena = *data++;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
run = *data++;
v = (struct sample_read_value *) data;
perf_evsel__set_count(leader, cpu, thread,
v[0].value, ena, run);
for (i = 1; i < nr; i++) {
struct perf_evsel *counter;
counter = perf_evlist__id2evsel(leader->evlist, v[i].id);
if (!counter)
return -EINVAL;
perf_evsel__set_count(counter, cpu, thread,
v[i].value, ena, run);
}
return 0;
}
static int
perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
{
struct perf_stat_evsel *ps = leader->priv;
u64 read_format = leader->attr.read_format;
int size = perf_evsel__read_size(leader);
u64 *data = ps->group_data;
if (!(read_format & PERF_FORMAT_ID))
return -EINVAL;
if (!perf_evsel__is_group_leader(leader))
return -EINVAL;
if (!data) {
data = zalloc(size);
if (!data)
return -ENOMEM;
ps->group_data = data;
}
if (FD(leader, cpu, thread) < 0)
return -EINVAL;
if (readn(FD(leader, cpu, thread), data, size) <= 0)
return -errno;
return perf_evsel__process_group_data(leader, cpu, thread, data);
}
int perf_evsel__read_counter(struct perf_evsel *evsel, int cpu, int thread)
{
u64 read_format = evsel->attr.read_format;
if (read_format & PERF_FORMAT_GROUP)
return perf_evsel__read_group(evsel, cpu, thread);
else
return perf_evsel__read_one(evsel, cpu, thread);
}
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
int cpu, int thread, bool scale)
{
@ -1550,6 +1679,8 @@ fallback_missing_features:
if (perf_missing_features.lbr_flags)
evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES);
if (perf_missing_features.group_read && evsel->attr.inherit)
evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
@ -1705,6 +1836,12 @@ try_fallback:
perf_missing_features.lbr_flags = true;
pr_debug2("switching off branch sample type no (cycles/flags)\n");
goto fallback_missing_features;
} else if (!perf_missing_features.group_read &&
evsel->attr.inherit &&
(evsel->attr.read_format & PERF_FORMAT_GROUP)) {
perf_missing_features.group_read = true;
pr_debug2("switching off group read\n");
goto fallback_missing_features;
}
out_close:
do {

View File

@ -299,6 +299,8 @@ static inline bool perf_evsel__match2(struct perf_evsel *e1,
int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
struct perf_counts_values *count);
int perf_evsel__read_counter(struct perf_evsel *evsel, int cpu, int thread);
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
int cpu, int thread, bool scale);

View File

@ -2563,7 +2563,7 @@ static const char *get_default_sort_order(struct perf_evlist *evlist)
BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
if (evlist == NULL)
if (evlist == NULL || perf_evlist__empty(evlist))
goto out_no_evlist;
evlist__for_each_entry(evlist, evsel) {

View File

@ -128,6 +128,10 @@ static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
{
struct perf_stat_evsel *ps = evsel->priv;
if (ps)
free(ps->group_data);
zfree(&evsel->priv);
}

View File

@ -28,8 +28,9 @@ enum perf_stat_evsel_id {
};
struct perf_stat_evsel {
struct stats res_stats[3];
enum perf_stat_evsel_id id;
struct stats res_stats[3];
enum perf_stat_evsel_id id;
u64 *group_data;
};
enum aggr_mode {