d6a735ef32
Some helper functions will be used for cgroup counting too. Move them to a header file for sharing. Committer notes: Fix the build on older systems with: - struct bpf_map_info map_info = {0}; + struct bpf_map_info map_info = { .id = 0, }; This wasn't breaking the build in such systems as bpf_counter.c isn't built due to: tools/perf/util/Build: perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o The bpf_counter.h file on the other hand is included from places that are built everywhere. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Acked-by: Song Liu <songliubraving@fb.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lore.kernel.org/lkml/20210625071826.608504-4-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
132 lines
3.0 KiB
C
132 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __PERF_BPF_COUNTER_H
|
|
#define __PERF_BPF_COUNTER_H 1
|
|
|
|
#include <linux/list.h>
|
|
#include <sys/resource.h>
|
|
#include <bpf/bpf.h>
|
|
#include <bpf/btf.h>
|
|
#include <bpf/libbpf.h>
|
|
|
|
struct evsel;
|
|
struct target;
|
|
struct bpf_counter;
|
|
|
|
typedef int (*bpf_counter_evsel_op)(struct evsel *evsel);
|
|
typedef int (*bpf_counter_evsel_target_op)(struct evsel *evsel,
|
|
struct target *target);
|
|
typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
|
|
int cpu,
|
|
int fd);
|
|
|
|
struct bpf_counter_ops {
|
|
bpf_counter_evsel_target_op load;
|
|
bpf_counter_evsel_op enable;
|
|
bpf_counter_evsel_op disable;
|
|
bpf_counter_evsel_op read;
|
|
bpf_counter_evsel_op destroy;
|
|
bpf_counter_evsel_install_pe_op install_pe;
|
|
};
|
|
|
|
struct bpf_counter {
|
|
void *skel;
|
|
struct list_head list;
|
|
};
|
|
|
|
#ifdef HAVE_BPF_SKEL
|
|
|
|
int bpf_counter__load(struct evsel *evsel, struct target *target);
|
|
int bpf_counter__enable(struct evsel *evsel);
|
|
int bpf_counter__disable(struct evsel *evsel);
|
|
int bpf_counter__read(struct evsel *evsel);
|
|
void bpf_counter__destroy(struct evsel *evsel);
|
|
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
|
|
|
|
#else /* HAVE_BPF_SKEL */
|
|
|
|
#include <linux/err.h>
|
|
|
|
static inline int bpf_counter__load(struct evsel *evsel __maybe_unused,
|
|
struct target *target __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int bpf_counter__enable(struct evsel *evsel __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int bpf_counter__disable(struct evsel *evsel __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int bpf_counter__read(struct evsel *evsel __maybe_unused)
|
|
{
|
|
return -EAGAIN;
|
|
}
|
|
|
|
static inline void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
|
|
{
|
|
}
|
|
|
|
static inline int bpf_counter__install_pe(struct evsel *evsel __maybe_unused,
|
|
int cpu __maybe_unused,
|
|
int fd __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* HAVE_BPF_SKEL */
|
|
|
|
static inline void set_max_rlimit(void)
|
|
{
|
|
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
|
|
|
|
setrlimit(RLIMIT_MEMLOCK, &rinf);
|
|
}
|
|
|
|
static inline __u32 bpf_link_get_id(int fd)
|
|
{
|
|
struct bpf_link_info link_info = { .id = 0, };
|
|
__u32 link_info_len = sizeof(link_info);
|
|
|
|
bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
|
|
return link_info.id;
|
|
}
|
|
|
|
static inline __u32 bpf_link_get_prog_id(int fd)
|
|
{
|
|
struct bpf_link_info link_info = { .id = 0, };
|
|
__u32 link_info_len = sizeof(link_info);
|
|
|
|
bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
|
|
return link_info.prog_id;
|
|
}
|
|
|
|
static inline __u32 bpf_map_get_id(int fd)
|
|
{
|
|
struct bpf_map_info map_info = { .id = 0, };
|
|
__u32 map_info_len = sizeof(map_info);
|
|
|
|
bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
|
|
return map_info.id;
|
|
}
|
|
|
|
/* trigger the leader program on a cpu */
|
|
static inline int bperf_trigger_reading(int prog_fd, int cpu)
|
|
{
|
|
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
.ctx_in = NULL,
|
|
.ctx_size_in = 0,
|
|
.flags = BPF_F_TEST_RUN_ON_CPU,
|
|
.cpu = cpu,
|
|
.retval = 0,
|
|
);
|
|
|
|
return bpf_prog_test_run_opts(prog_fd, &opts);
|
|
}
|
|
|
|
#endif /* __PERF_BPF_COUNTER_H */
|