perf record: Reuse target::initial_delay

This just simply replace record_opts::initial_delay with
target::initial_delay. Nothing else is changed.

Signed-off-by: Changbin Du <changbin.du@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Hui Wang <hw.huiwang@huawei.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230302031146.2801588-3-changbin.du@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Changbin Du 2023-03-02 11:11:45 +08:00 committed by Arnaldo Carvalho de Melo
parent 07d85ba9d0
commit cb4b9e6813
5 changed files with 14 additions and 15 deletions

View File

@ -1292,7 +1292,7 @@ static int record__open(struct record *rec)
* dummy event so that we can track PERF_RECORD_MMAP to cover the delay * dummy event so that we can track PERF_RECORD_MMAP to cover the delay
* of waiting or event synthesis. * of waiting or event synthesis.
*/ */
if (opts->initial_delay || target__has_cpu(&opts->target) || if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
perf_pmu__has_hybrid()) { perf_pmu__has_hybrid()) {
pos = evlist__get_tracking_event(evlist); pos = evlist__get_tracking_event(evlist);
if (!evsel__is_dummy_event(pos)) { if (!evsel__is_dummy_event(pos)) {
@ -1307,7 +1307,7 @@ static int record__open(struct record *rec)
* Enable the dummy event when the process is forked for * Enable the dummy event when the process is forked for
* initial_delay, immediately for system wide. * initial_delay, immediately for system wide.
*/ */
if (opts->initial_delay && !pos->immediate && if (opts->target.initial_delay && !pos->immediate &&
!target__has_cpu(&opts->target)) !target__has_cpu(&opts->target))
pos->core.attr.enable_on_exec = 1; pos->core.attr.enable_on_exec = 1;
else else
@ -2522,7 +2522,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
* (apart from group members) have enable_on_exec=1 set, * (apart from group members) have enable_on_exec=1 set,
* so don't spoil it by prematurely enabling them. * so don't spoil it by prematurely enabling them.
*/ */
if (!target__none(&opts->target) && !opts->initial_delay) if (!target__none(&opts->target) && !opts->target.initial_delay)
evlist__enable(rec->evlist); evlist__enable(rec->evlist);
/* /*
@ -2574,10 +2574,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
evlist__start_workload(rec->evlist); evlist__start_workload(rec->evlist);
} }
if (opts->initial_delay) { if (opts->target.initial_delay) {
pr_info(EVLIST_DISABLED_MSG); pr_info(EVLIST_DISABLED_MSG);
if (opts->initial_delay > 0) { if (opts->target.initial_delay > 0) {
usleep(opts->initial_delay * USEC_PER_MSEC); usleep(opts->target.initial_delay * USEC_PER_MSEC);
evlist__enable(rec->evlist); evlist__enable(rec->evlist);
pr_info(EVLIST_ENABLED_MSG); pr_info(EVLIST_ENABLED_MSG);
} }

View File

@ -3993,14 +3993,14 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0) if (err < 0)
goto out_error_mmap; goto out_error_mmap;
if (!target__none(&trace->opts.target) && !trace->opts.initial_delay) if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
evlist__enable(evlist); evlist__enable(evlist);
if (forks) if (forks)
evlist__start_workload(evlist); evlist__start_workload(evlist);
if (trace->opts.initial_delay) { if (trace->opts.target.initial_delay) {
usleep(trace->opts.initial_delay * 1000); usleep(trace->opts.target.initial_delay * 1000);
evlist__enable(evlist); evlist__enable(evlist);
} }
@ -4788,7 +4788,7 @@ int cmd_trace(int argc, const char **argv)
"per thread proc mmap processing timeout in ms"), "per thread proc mmap processing timeout in ms"),
OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
trace__parse_cgroups), trace__parse_cgroups),
OPT_INTEGER('D', "delay", &trace.opts.initial_delay, OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
"ms to wait before starting measurement after program " "ms to wait before starting measurement after program "
"start"), "start"),
OPTS_EVSWITCH(&trace.evswitch), OPTS_EVSWITCH(&trace.evswitch),

View File

@ -2262,8 +2262,8 @@ int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *o
if (unset) if (unset)
return 0; return 0;
opts->initial_delay = str_to_delay(str); opts->target.initial_delay = str_to_delay(str);
if (opts->initial_delay) if (opts->target.initial_delay)
return 0; return 0;
ret = parse_event_enable_times(str, NULL); ret = parse_event_enable_times(str, NULL);
@ -2306,7 +2306,7 @@ int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *o
eet->evlist = evlist; eet->evlist = evlist;
evlist->eet = eet; evlist->eet = eet;
opts->initial_delay = eet->times[0].start; opts->target.initial_delay = eet->times[0].start;
return 0; return 0;

View File

@ -1334,7 +1334,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
* group leaders for traced executed by perf. * group leaders for traced executed by perf.
*/ */
if (target__none(&opts->target) && evsel__is_group_leader(evsel) && if (target__none(&opts->target) && evsel__is_group_leader(evsel) &&
!opts->initial_delay) !opts->target.initial_delay)
attr->enable_on_exec = 1; attr->enable_on_exec = 1;
if (evsel->immediate) { if (evsel->immediate) {

View File

@ -65,7 +65,6 @@ struct record_opts {
const char *auxtrace_snapshot_opts; const char *auxtrace_snapshot_opts;
const char *auxtrace_sample_opts; const char *auxtrace_sample_opts;
bool sample_transaction; bool sample_transaction;
int initial_delay;
bool use_clockid; bool use_clockid;
clockid_t clockid; clockid_t clockid;
u64 clockid_res_ns; u64 clockid_res_ns;