perf evlist: Introduce poll method for common code idiom
Since we have access two evlist members in all these poll calls, provide a helper. This will also help to make the patch introducing the pollfd class more clear, as the evlist specific uses will be hiden away perf_evlist__poll(). Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jean Pihet <jean.pihet@linaro.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-jr9d4aop4lvy9453qahbcgp0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
@@ -459,7 +459,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
|||||||
if (hits == rec->samples) {
|
if (hits == rec->samples) {
|
||||||
if (done)
|
if (done)
|
||||||
break;
|
break;
|
||||||
err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
|
err = perf_evlist__poll(rec->evlist, -1);
|
||||||
/*
|
/*
|
||||||
* Propagate error, only if there's any. Ignore positive
|
* Propagate error, only if there's any. Ignore positive
|
||||||
* number of returned events and interrupt error.
|
* number of returned events and interrupt error.
|
||||||
|
|||||||
@@ -964,7 +964,7 @@ static int __cmd_top(struct perf_top *top)
|
|||||||
perf_evlist__enable(top->evlist);
|
perf_evlist__enable(top->evlist);
|
||||||
|
|
||||||
/* Wait for a minimal set of events before starting the snapshot */
|
/* Wait for a minimal set of events before starting the snapshot */
|
||||||
poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
|
perf_evlist__poll(top->evlist, 100);
|
||||||
|
|
||||||
perf_top__mmap_read(top);
|
perf_top__mmap_read(top);
|
||||||
|
|
||||||
@@ -991,7 +991,7 @@ static int __cmd_top(struct perf_top *top)
|
|||||||
perf_top__mmap_read(top);
|
perf_top__mmap_read(top);
|
||||||
|
|
||||||
if (hits == top->samples)
|
if (hits == top->samples)
|
||||||
ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
|
ret = perf_evlist__poll(top->evlist, 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|||||||
@@ -2171,7 +2171,7 @@ next_event:
|
|||||||
if (trace->nr_events == before) {
|
if (trace->nr_events == before) {
|
||||||
int timeout = done ? 100 : -1;
|
int timeout = done ? 100 : -1;
|
||||||
|
|
||||||
if (poll(evlist->pollfd, evlist->nr_fds, timeout) > 0)
|
if (perf_evlist__poll(evlist, timeout) > 0)
|
||||||
goto again;
|
goto again;
|
||||||
} else {
|
} else {
|
||||||
goto again;
|
goto again;
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ int test__syscall_open_tp_fields(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (nr_events == before)
|
if (nr_events == before)
|
||||||
poll(evlist->pollfd, evlist->nr_fds, 10);
|
perf_evlist__poll(evlist, 10);
|
||||||
|
|
||||||
if (++nr_polls > 5) {
|
if (++nr_polls > 5) {
|
||||||
pr_debug("%s: no events!\n", __func__);
|
pr_debug("%s: no events!\n", __func__);
|
||||||
|
|||||||
@@ -268,7 +268,7 @@ int test__PERF_RECORD(void)
|
|||||||
* perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
|
* perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
|
||||||
*/
|
*/
|
||||||
if (total_events == before && false)
|
if (total_events == before && false)
|
||||||
poll(evlist->pollfd, evlist->nr_fds, -1);
|
perf_evlist__poll(evlist, -1);
|
||||||
|
|
||||||
sleep(1);
|
sleep(1);
|
||||||
if (++wakeups > 5) {
|
if (++wakeups > 5) {
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ retry:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!exited || !nr_exit) {
|
if (!exited || !nr_exit) {
|
||||||
poll(evlist->pollfd, evlist->nr_fds, -1);
|
perf_evlist__poll(evlist, -1);
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -475,6 +475,11 @@ int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mas
|
|||||||
return nr_fds;
|
return nr_fds;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
|
||||||
|
{
|
||||||
|
return poll(evlist->pollfd, evlist->nr_fds, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
static void perf_evlist__id_hash(struct perf_evlist *evlist,
|
static void perf_evlist__id_hash(struct perf_evlist *evlist,
|
||||||
struct perf_evsel *evsel,
|
struct perf_evsel *evsel,
|
||||||
int cpu, int thread, u64 id)
|
int cpu, int thread, u64 id)
|
||||||
|
|||||||
@@ -87,6 +87,8 @@ int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
|
|||||||
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
|
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
|
||||||
int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask);
|
int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask);
|
||||||
|
|
||||||
|
int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
|
||||||
|
|
||||||
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
|
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
|
||||||
|
|
||||||
struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
|
struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
|
||||||
|
|||||||
@@ -736,7 +736,7 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
|
|||||||
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
|
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
n = poll(evlist->pollfd, evlist->nr_fds, timeout);
|
n = perf_evlist__poll(evlist, timeout);
|
||||||
if (n < 0) {
|
if (n < 0) {
|
||||||
PyErr_SetFromErrno(PyExc_OSError);
|
PyErr_SetFromErrno(PyExc_OSError);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|||||||
Reference in New Issue
Block a user