f9b10c82fa
Failures to open the tracepoint cause this test to fail, however, typically such failures are permission related. Lower the failure to just skipping the test in those cases and add a skip reason. Committer testing: Before: $ perf test "openat syscall" 2: Detect openat syscall event : FAILED! 3: Detect openat syscall event on all cpus : FAILED! $ After: $ perf test "openat syscall" 2: Detect openat syscall event : Skip (permissions) 3: Detect openat syscall event on all cpus : Skip (permissions) $ Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Carsten Haitzler <carsten.haitzler@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.garry@huawei.com> Cc: Marco Elver <elver@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Sohaib Mohamed <sohaib.amhmd@gmail.com> Cc: Stephane Eranian <eranian@google.com> Link: https://lore.kernel.org/r/20220518042027.836799-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
139 lines
3.5 KiB
C
139 lines
3.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <errno.h>
|
|
#include <inttypes.h>
|
|
/* For the CPU_* macros */
|
|
#include <pthread.h>
|
|
|
|
#include <sys/types.h>
|
|
#include <sys/stat.h>
|
|
#include <fcntl.h>
|
|
#include <api/fs/fs.h>
|
|
#include <linux/err.h>
|
|
#include <linux/string.h>
|
|
#include <api/fs/tracing_path.h>
|
|
#include "evsel.h"
|
|
#include "tests.h"
|
|
#include "thread_map.h"
|
|
#include <perf/cpumap.h>
|
|
#include "debug.h"
|
|
#include "stat.h"
|
|
#include "util/counts.h"
|
|
|
|
static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __maybe_unused,
|
|
int subtest __maybe_unused)
|
|
{
|
|
int err = TEST_FAIL, fd, idx;
|
|
struct perf_cpu cpu;
|
|
struct perf_cpu_map *cpus;
|
|
struct evsel *evsel;
|
|
unsigned int nr_openat_calls = 111, i;
|
|
cpu_set_t cpu_set;
|
|
struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
|
|
char sbuf[STRERR_BUFSIZE];
|
|
char errbuf[BUFSIZ];
|
|
|
|
if (threads == NULL) {
|
|
pr_debug("thread_map__new\n");
|
|
return -1;
|
|
}
|
|
|
|
cpus = perf_cpu_map__new(NULL);
|
|
if (cpus == NULL) {
|
|
pr_debug("perf_cpu_map__new\n");
|
|
goto out_thread_map_delete;
|
|
}
|
|
|
|
CPU_ZERO(&cpu_set);
|
|
|
|
evsel = evsel__newtp("syscalls", "sys_enter_openat");
|
|
if (IS_ERR(evsel)) {
|
|
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
|
|
pr_debug("%s\n", errbuf);
|
|
err = TEST_SKIP;
|
|
goto out_cpu_map_delete;
|
|
}
|
|
|
|
if (evsel__open(evsel, cpus, threads) < 0) {
|
|
pr_debug("failed to open counter: %s, "
|
|
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
|
|
str_error_r(errno, sbuf, sizeof(sbuf)));
|
|
err = TEST_SKIP;
|
|
goto out_evsel_delete;
|
|
}
|
|
|
|
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
|
|
unsigned int ncalls = nr_openat_calls + idx;
|
|
/*
|
|
* XXX eventually lift this restriction in a way that
|
|
* keeps perf building on older glibc installations
|
|
* without CPU_ALLOC. 1024 cpus in 2010 still seems
|
|
* a reasonable upper limit tho :-)
|
|
*/
|
|
if (cpu.cpu >= CPU_SETSIZE) {
|
|
pr_debug("Ignoring CPU %d\n", cpu.cpu);
|
|
continue;
|
|
}
|
|
|
|
CPU_SET(cpu.cpu, &cpu_set);
|
|
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
|
|
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
|
|
cpu.cpu,
|
|
str_error_r(errno, sbuf, sizeof(sbuf)));
|
|
goto out_close_fd;
|
|
}
|
|
for (i = 0; i < ncalls; ++i) {
|
|
fd = openat(0, "/etc/passwd", O_RDONLY);
|
|
close(fd);
|
|
}
|
|
CPU_CLR(cpu.cpu, &cpu_set);
|
|
}
|
|
|
|
evsel->core.cpus = perf_cpu_map__get(cpus);
|
|
|
|
err = TEST_OK;
|
|
|
|
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
|
|
unsigned int expected;
|
|
|
|
if (cpu.cpu >= CPU_SETSIZE)
|
|
continue;
|
|
|
|
if (evsel__read_on_cpu(evsel, idx, 0) < 0) {
|
|
pr_debug("evsel__read_on_cpu\n");
|
|
err = TEST_FAIL;
|
|
break;
|
|
}
|
|
|
|
expected = nr_openat_calls + idx;
|
|
if (perf_counts(evsel->counts, idx, 0)->val != expected) {
|
|
pr_debug("evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
|
|
expected, cpu.cpu, perf_counts(evsel->counts, idx, 0)->val);
|
|
err = TEST_FAIL;
|
|
}
|
|
}
|
|
|
|
evsel__free_counts(evsel);
|
|
out_close_fd:
|
|
perf_evsel__close_fd(&evsel->core);
|
|
out_evsel_delete:
|
|
evsel__delete(evsel);
|
|
out_cpu_map_delete:
|
|
perf_cpu_map__put(cpus);
|
|
out_thread_map_delete:
|
|
perf_thread_map__put(threads);
|
|
return err;
|
|
}
|
|
|
|
|
|
static struct test_case tests__openat_syscall_event_on_all_cpus[] = {
|
|
TEST_CASE_REASON("Detect openat syscall event on all cpus",
|
|
openat_syscall_event_on_all_cpus,
|
|
"permissions"),
|
|
{ .name = NULL, }
|
|
};
|
|
|
|
struct test_suite suite__openat_syscall_event_on_all_cpus = {
|
|
.desc = "Detect openat syscall event on all cpus",
|
|
.test_cases = tests__openat_syscall_event_on_all_cpus,
|
|
};
|