Merge branch 'bpf-fix-a-couple-of-test-failures-with-lto-kernel'
Yonghong Song says: ==================== bpf: Fix a couple of test failures with LTO kernel With a LTO kernel built with clang, with one of earlier version of kernel, I encountered two test failures, ksyms and kprobe_multi_bench_attach/kernel. Now with latest bpf-next, only kprobe_multi_bench_attach/kernel failed. But it is possible in the future ksyms selftest may fail again. Both test failures are due to static variable/function renaming due to cross-file inlining. For Ksyms failure, the solution is to strip .llvm.<hash> suffixes for symbols in /proc/kallsyms before comparing against the ksym in bpf program. For kprobe_multi_bench_attach/kernel failure, the solution is to either provide names in /proc/kallsyms to the kernel or ignore those names who have .llvm.<hash> suffix since the kernel sym name comparison is against /proc/kallsyms. Please see each individual patches for details. Changelogs: v2 -> v3: - no need to check config file, directly so strstr with '.llvm.'. - for kprobe_multi_bench with syms, instead of skipping the syms, consult /proc/kallyms to find corresponding names. - add a test with populating addrs to the kernel for kprobe multi attach. v1 -> v2: - Let libbpf handle .llvm.<hash suffixes since it may impact bpf program ksym. ==================== Link: https://lore.kernel.org/r/20240326041443.1197498-1-yonghong.song@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
e478cf26c5
@ -1970,6 +1970,20 @@ static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct extern_desc *find_extern_by_name_with_len(const struct bpf_object *obj,
|
||||
const void *name, int len)
|
||||
{
|
||||
const char *ext_name;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < obj->nr_extern; i++) {
|
||||
ext_name = obj->externs[i].name;
|
||||
if (strlen(ext_name) == len && strncmp(ext_name, name, len) == 0)
|
||||
return &obj->externs[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
|
||||
char value)
|
||||
{
|
||||
@ -7986,7 +8000,10 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
|
||||
typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
|
||||
const char *sym_name, void *ctx);
|
||||
|
||||
static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
|
||||
{
|
||||
char sym_type, sym_name[500];
|
||||
unsigned long long sym_addr;
|
||||
@ -8026,8 +8043,13 @@ static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
|
||||
struct bpf_object *obj = ctx;
|
||||
const struct btf_type *t;
|
||||
struct extern_desc *ext;
|
||||
char *res;
|
||||
|
||||
ext = find_extern_by_name(obj, sym_name);
|
||||
res = strstr(sym_name, ".llvm.");
|
||||
if (sym_type == 'd' && res)
|
||||
ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name);
|
||||
else
|
||||
ext = find_extern_by_name(obj, sym_name);
|
||||
if (!ext || ext->type != EXT_KSYM)
|
||||
return 0;
|
||||
|
||||
|
@ -518,11 +518,6 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void
|
||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
__u32 kind);
|
||||
|
||||
typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
|
||||
const char *sym_name, void *ctx);
|
||||
|
||||
int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *arg);
|
||||
|
||||
/* handle direct returned errors */
|
||||
static inline int libbpf_err(int ret)
|
||||
{
|
||||
|
@ -336,15 +336,80 @@ static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
|
||||
return strcmp((const char *) key1, (const char *) key2) == 0;
|
||||
}
|
||||
|
||||
static bool is_invalid_entry(char *buf, bool kernel)
|
||||
{
|
||||
if (kernel && strchr(buf, '['))
|
||||
return true;
|
||||
if (!kernel && !strchr(buf, '['))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool skip_entry(char *name)
|
||||
{
|
||||
/*
|
||||
* We attach to almost all kernel functions and some of them
|
||||
* will cause 'suspicious RCU usage' when fprobe is attached
|
||||
* to them. Filter out the current culprits - arch_cpu_idle
|
||||
* default_idle and rcu_* functions.
|
||||
*/
|
||||
if (!strcmp(name, "arch_cpu_idle"))
|
||||
return true;
|
||||
if (!strcmp(name, "default_idle"))
|
||||
return true;
|
||||
if (!strncmp(name, "rcu_", 4))
|
||||
return true;
|
||||
if (!strcmp(name, "bpf_dispatcher_xdp_func"))
|
||||
return true;
|
||||
if (!strncmp(name, "__ftrace_invalid_address__",
|
||||
sizeof("__ftrace_invalid_address__") - 1))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Do comparision by ignoring '.llvm.<hash>' suffixes. */
|
||||
static int compare_name(const char *name1, const char *name2)
|
||||
{
|
||||
const char *res1, *res2;
|
||||
int len1, len2;
|
||||
|
||||
res1 = strstr(name1, ".llvm.");
|
||||
res2 = strstr(name2, ".llvm.");
|
||||
len1 = res1 ? res1 - name1 : strlen(name1);
|
||||
len2 = res2 ? res2 - name2 : strlen(name2);
|
||||
|
||||
if (len1 == len2)
|
||||
return strncmp(name1, name2, len1);
|
||||
if (len1 < len2)
|
||||
return strncmp(name1, name2, len1) <= 0 ? -1 : 1;
|
||||
return strncmp(name1, name2, len2) >= 0 ? 1 : -1;
|
||||
}
|
||||
|
||||
static int load_kallsyms_compare(const void *p1, const void *p2)
|
||||
{
|
||||
return compare_name(((const struct ksym *)p1)->name, ((const struct ksym *)p2)->name);
|
||||
}
|
||||
|
||||
static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
|
||||
{
|
||||
return compare_name(p1, p2->name);
|
||||
}
|
||||
|
||||
static int get_syms(char ***symsp, size_t *cntp, bool kernel)
|
||||
{
|
||||
size_t cap = 0, cnt = 0, i;
|
||||
char *name = NULL, **syms = NULL;
|
||||
size_t cap = 0, cnt = 0;
|
||||
char *name = NULL, *ksym_name, **syms = NULL;
|
||||
struct hashmap *map;
|
||||
struct ksyms *ksyms;
|
||||
struct ksym *ks;
|
||||
char buf[256];
|
||||
FILE *f;
|
||||
int err = 0;
|
||||
|
||||
ksyms = load_kallsyms_custom_local(load_kallsyms_compare);
|
||||
if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_custom_local"))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* The available_filter_functions contains many duplicates,
|
||||
* but other than that all symbols are usable in kprobe multi
|
||||
@ -368,33 +433,23 @@ static int get_syms(char ***symsp, size_t *cntp, bool kernel)
|
||||
}
|
||||
|
||||
while (fgets(buf, sizeof(buf), f)) {
|
||||
if (kernel && strchr(buf, '['))
|
||||
continue;
|
||||
if (!kernel && !strchr(buf, '['))
|
||||
if (is_invalid_entry(buf, kernel))
|
||||
continue;
|
||||
|
||||
free(name);
|
||||
if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
|
||||
continue;
|
||||
/*
|
||||
* We attach to almost all kernel functions and some of them
|
||||
* will cause 'suspicious RCU usage' when fprobe is attached
|
||||
* to them. Filter out the current culprits - arch_cpu_idle
|
||||
* default_idle and rcu_* functions.
|
||||
*/
|
||||
if (!strcmp(name, "arch_cpu_idle"))
|
||||
continue;
|
||||
if (!strcmp(name, "default_idle"))
|
||||
continue;
|
||||
if (!strncmp(name, "rcu_", 4))
|
||||
continue;
|
||||
if (!strcmp(name, "bpf_dispatcher_xdp_func"))
|
||||
continue;
|
||||
if (!strncmp(name, "__ftrace_invalid_address__",
|
||||
sizeof("__ftrace_invalid_address__") - 1))
|
||||
if (skip_entry(name))
|
||||
continue;
|
||||
|
||||
err = hashmap__add(map, name, 0);
|
||||
ks = search_kallsyms_custom_local(ksyms, name, search_kallsyms_compare);
|
||||
if (!ks) {
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
ksym_name = ks->name;
|
||||
err = hashmap__add(map, ksym_name, 0);
|
||||
if (err == -EEXIST) {
|
||||
err = 0;
|
||||
continue;
|
||||
@ -407,8 +462,7 @@ static int get_syms(char ***symsp, size_t *cntp, bool kernel)
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
syms[cnt++] = name;
|
||||
name = NULL;
|
||||
syms[cnt++] = ksym_name;
|
||||
}
|
||||
|
||||
*symsp = syms;
|
||||
@ -418,24 +472,107 @@ error:
|
||||
free(name);
|
||||
fclose(f);
|
||||
hashmap__free(map);
|
||||
if (err) {
|
||||
for (i = 0; i < cnt; i++)
|
||||
free(syms[i]);
|
||||
if (err)
|
||||
free(syms);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel)
|
||||
{
|
||||
unsigned long *addr, *addrs, *tmp_addrs;
|
||||
int err = 0, max_cnt, inc_cnt;
|
||||
char *name = NULL;
|
||||
size_t cnt = 0;
|
||||
char buf[256];
|
||||
FILE *f;
|
||||
|
||||
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
|
||||
f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r");
|
||||
else
|
||||
f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
|
||||
|
||||
if (!f)
|
||||
return -ENOENT;
|
||||
|
||||
/* In my local setup, the number of entries is 50k+ so Let us initially
|
||||
* allocate space to hold 64k entries. If 64k is not enough, incrementally
|
||||
* increase 1k each time.
|
||||
*/
|
||||
max_cnt = 65536;
|
||||
inc_cnt = 1024;
|
||||
addrs = malloc(max_cnt * sizeof(long));
|
||||
if (addrs == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
while (fgets(buf, sizeof(buf), f)) {
|
||||
if (is_invalid_entry(buf, kernel))
|
||||
continue;
|
||||
|
||||
free(name);
|
||||
if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2)
|
||||
continue;
|
||||
if (skip_entry(name))
|
||||
continue;
|
||||
|
||||
if (cnt == max_cnt) {
|
||||
max_cnt += inc_cnt;
|
||||
tmp_addrs = realloc(addrs, max_cnt);
|
||||
if (!tmp_addrs) {
|
||||
err = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
addrs = tmp_addrs;
|
||||
}
|
||||
|
||||
addrs[cnt++] = (unsigned long)addr;
|
||||
}
|
||||
|
||||
*addrsp = addrs;
|
||||
*cntp = cnt;
|
||||
|
||||
error:
|
||||
free(name);
|
||||
fclose(f);
|
||||
if (err)
|
||||
free(addrs);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void do_bench_test(struct kprobe_multi_empty *skel, struct bpf_kprobe_multi_opts *opts)
|
||||
{
|
||||
long attach_start_ns, attach_end_ns;
|
||||
long detach_start_ns, detach_end_ns;
|
||||
double attach_delta, detach_delta;
|
||||
struct bpf_link *link = NULL;
|
||||
|
||||
attach_start_ns = get_time_ns();
|
||||
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_empty,
|
||||
NULL, opts);
|
||||
attach_end_ns = get_time_ns();
|
||||
|
||||
if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
|
||||
return;
|
||||
|
||||
detach_start_ns = get_time_ns();
|
||||
bpf_link__destroy(link);
|
||||
detach_end_ns = get_time_ns();
|
||||
|
||||
attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
|
||||
detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
|
||||
|
||||
printf("%s: found %lu functions\n", __func__, opts->cnt);
|
||||
printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
|
||||
printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
|
||||
}
|
||||
|
||||
static void test_kprobe_multi_bench_attach(bool kernel)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
|
||||
struct kprobe_multi_empty *skel = NULL;
|
||||
long attach_start_ns, attach_end_ns;
|
||||
long detach_start_ns, detach_end_ns;
|
||||
double attach_delta, detach_delta;
|
||||
struct bpf_link *link = NULL;
|
||||
char **syms = NULL;
|
||||
size_t cnt = 0, i;
|
||||
size_t cnt = 0;
|
||||
|
||||
if (!ASSERT_OK(get_syms(&syms, &cnt, kernel), "get_syms"))
|
||||
return;
|
||||
@ -447,32 +584,43 @@ static void test_kprobe_multi_bench_attach(bool kernel)
|
||||
opts.syms = (const char **) syms;
|
||||
opts.cnt = cnt;
|
||||
|
||||
attach_start_ns = get_time_ns();
|
||||
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_empty,
|
||||
NULL, &opts);
|
||||
attach_end_ns = get_time_ns();
|
||||
|
||||
if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
|
||||
goto cleanup;
|
||||
|
||||
detach_start_ns = get_time_ns();
|
||||
bpf_link__destroy(link);
|
||||
detach_end_ns = get_time_ns();
|
||||
|
||||
attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
|
||||
detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
|
||||
|
||||
printf("%s: found %lu functions\n", __func__, cnt);
|
||||
printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
|
||||
printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
|
||||
do_bench_test(skel, &opts);
|
||||
|
||||
cleanup:
|
||||
kprobe_multi_empty__destroy(skel);
|
||||
if (syms) {
|
||||
for (i = 0; i < cnt; i++)
|
||||
free(syms[i]);
|
||||
if (syms)
|
||||
free(syms);
|
||||
}
|
||||
|
||||
static void test_kprobe_multi_bench_attach_addr(bool kernel)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
|
||||
struct kprobe_multi_empty *skel = NULL;
|
||||
unsigned long *addrs = NULL;
|
||||
size_t cnt = 0;
|
||||
int err;
|
||||
|
||||
err = get_addrs(&addrs, &cnt, kernel);
|
||||
if (err == -ENOENT) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ASSERT_OK(err, "get_addrs"))
|
||||
return;
|
||||
|
||||
skel = kprobe_multi_empty__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
|
||||
goto cleanup;
|
||||
|
||||
opts.addrs = addrs;
|
||||
opts.cnt = cnt;
|
||||
|
||||
do_bench_test(skel, &opts);
|
||||
|
||||
cleanup:
|
||||
kprobe_multi_empty__destroy(skel);
|
||||
free(addrs);
|
||||
}
|
||||
|
||||
static void test_attach_override(void)
|
||||
@ -515,6 +663,10 @@ void serial_test_kprobe_multi_bench_attach(void)
|
||||
test_kprobe_multi_bench_attach(true);
|
||||
if (test__start_subtest("modules"))
|
||||
test_kprobe_multi_bench_attach(false);
|
||||
if (test__start_subtest("kernel"))
|
||||
test_kprobe_multi_bench_attach_addr(true);
|
||||
if (test__start_subtest("modules"))
|
||||
test_kprobe_multi_bench_attach_addr(false);
|
||||
}
|
||||
|
||||
void test_kprobe_multi_test(void)
|
||||
|
@ -5,8 +5,6 @@
|
||||
#include "test_ksyms.skel.h"
|
||||
#include <sys/stat.h>
|
||||
|
||||
static int duration;
|
||||
|
||||
void test_ksyms(void)
|
||||
{
|
||||
const char *btf_path = "/sys/kernel/btf/vmlinux";
|
||||
@ -18,43 +16,37 @@ void test_ksyms(void)
|
||||
int err;
|
||||
|
||||
err = kallsyms_find("bpf_link_fops", &link_fops_addr);
|
||||
if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
|
||||
if (!ASSERT_NEQ(err, -EINVAL, "bpf_link_fops: kallsyms_fopen"))
|
||||
return;
|
||||
if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_link_fops' not found\n"))
|
||||
if (!ASSERT_NEQ(err, -ENOENT, "bpf_link_fops: ksym_find"))
|
||||
return;
|
||||
|
||||
err = kallsyms_find("__per_cpu_start", &per_cpu_start_addr);
|
||||
if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
|
||||
if (!ASSERT_NEQ(err, -EINVAL, "__per_cpu_start: kallsyms_fopen"))
|
||||
return;
|
||||
if (CHECK(err == -ENOENT, "ksym_find", "symbol 'per_cpu_start' not found\n"))
|
||||
if (!ASSERT_NEQ(err, -ENOENT, "__per_cpu_start: ksym_find"))
|
||||
return;
|
||||
|
||||
if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno))
|
||||
if (!ASSERT_OK(stat(btf_path, &st), "stat_btf"))
|
||||
return;
|
||||
btf_size = st.st_size;
|
||||
|
||||
skel = test_ksyms__open_and_load();
|
||||
if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n"))
|
||||
if (!ASSERT_OK_PTR(skel, "test_ksyms__open_and_load"))
|
||||
return;
|
||||
|
||||
err = test_ksyms__attach(skel);
|
||||
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
|
||||
if (!ASSERT_OK(err, "test_ksyms__attach"))
|
||||
goto cleanup;
|
||||
|
||||
/* trigger tracepoint */
|
||||
usleep(1);
|
||||
|
||||
data = skel->data;
|
||||
CHECK(data->out__bpf_link_fops != link_fops_addr, "bpf_link_fops",
|
||||
"got 0x%llx, exp 0x%llx\n",
|
||||
data->out__bpf_link_fops, link_fops_addr);
|
||||
CHECK(data->out__bpf_link_fops1 != 0, "bpf_link_fops1",
|
||||
"got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0);
|
||||
CHECK(data->out__btf_size != btf_size, "btf_size",
|
||||
"got %llu, exp %llu\n", data->out__btf_size, btf_size);
|
||||
CHECK(data->out__per_cpu_start != per_cpu_start_addr, "__per_cpu_start",
|
||||
"got %llu, exp %llu\n", data->out__per_cpu_start,
|
||||
per_cpu_start_addr);
|
||||
ASSERT_EQ(data->out__bpf_link_fops, link_fops_addr, "bpf_link_fops");
|
||||
ASSERT_EQ(data->out__bpf_link_fops1, 0, "bpf_link_fops1");
|
||||
ASSERT_EQ(data->out__btf_size, btf_size, "btf_size");
|
||||
ASSERT_EQ(data->out__per_cpu_start, per_cpu_start_addr, "__per_cpu_start");
|
||||
|
||||
cleanup:
|
||||
test_ksyms__destroy(skel);
|
||||
|
@ -61,12 +61,7 @@ void free_kallsyms_local(struct ksyms *ksyms)
|
||||
free(ksyms);
|
||||
}
|
||||
|
||||
static int ksym_cmp(const void *p1, const void *p2)
|
||||
{
|
||||
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
|
||||
}
|
||||
|
||||
struct ksyms *load_kallsyms_local(void)
|
||||
static struct ksyms *load_kallsyms_local_common(ksym_cmp_t cmp_cb)
|
||||
{
|
||||
FILE *f;
|
||||
char func[256], buf[256];
|
||||
@ -100,7 +95,7 @@ struct ksyms *load_kallsyms_local(void)
|
||||
goto error;
|
||||
}
|
||||
fclose(f);
|
||||
qsort(ksyms->syms, ksyms->sym_cnt, sizeof(struct ksym), ksym_cmp);
|
||||
qsort(ksyms->syms, ksyms->sym_cnt, sizeof(struct ksym), cmp_cb);
|
||||
return ksyms;
|
||||
|
||||
error:
|
||||
@ -109,6 +104,21 @@ error:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int ksym_cmp(const void *p1, const void *p2)
|
||||
{
|
||||
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
|
||||
}
|
||||
|
||||
struct ksyms *load_kallsyms_local(void)
|
||||
{
|
||||
return load_kallsyms_local_common(ksym_cmp);
|
||||
}
|
||||
|
||||
struct ksyms *load_kallsyms_custom_local(ksym_cmp_t cmp_cb)
|
||||
{
|
||||
return load_kallsyms_local_common(cmp_cb);
|
||||
}
|
||||
|
||||
int load_kallsyms(void)
|
||||
{
|
||||
pthread_mutex_lock(&ksyms_mutex);
|
||||
@ -148,6 +158,28 @@ struct ksym *ksym_search_local(struct ksyms *ksyms, long key)
|
||||
return &ksyms->syms[0];
|
||||
}
|
||||
|
||||
struct ksym *search_kallsyms_custom_local(struct ksyms *ksyms, const void *p,
|
||||
ksym_search_cmp_t cmp_cb)
|
||||
{
|
||||
int start = 0, mid, end = ksyms->sym_cnt;
|
||||
struct ksym *ks;
|
||||
int result;
|
||||
|
||||
while (start < end) {
|
||||
mid = start + (end - start) / 2;
|
||||
ks = &ksyms->syms[mid];
|
||||
result = cmp_cb(p, ks);
|
||||
if (result < 0)
|
||||
end = mid;
|
||||
else if (result > 0)
|
||||
start = mid + 1;
|
||||
else
|
||||
return ks;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct ksym *ksym_search(long key)
|
||||
{
|
||||
if (!ksyms)
|
||||
|
@ -13,6 +13,9 @@ struct ksym {
|
||||
};
|
||||
struct ksyms;
|
||||
|
||||
typedef int (*ksym_cmp_t)(const void *p1, const void *p2);
|
||||
typedef int (*ksym_search_cmp_t)(const void *p1, const struct ksym *p2);
|
||||
|
||||
int load_kallsyms(void);
|
||||
struct ksym *ksym_search(long key);
|
||||
long ksym_get_addr(const char *name);
|
||||
@ -22,6 +25,10 @@ struct ksym *ksym_search_local(struct ksyms *ksyms, long key);
|
||||
long ksym_get_addr_local(struct ksyms *ksyms, const char *name);
|
||||
void free_kallsyms_local(struct ksyms *ksyms);
|
||||
|
||||
struct ksyms *load_kallsyms_custom_local(ksym_cmp_t cmp_cb);
|
||||
struct ksym *search_kallsyms_custom_local(struct ksyms *ksyms, const void *p1,
|
||||
ksym_search_cmp_t cmp_cb);
|
||||
|
||||
/* open kallsyms and find addresses on the fly, faster than load + search. */
|
||||
int kallsyms_find(const char *sym, unsigned long long *addr);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user