Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-12-03 The main changes are: 1) Support BTF in kernel modules, from Andrii. 2) Introduce preferred busy-polling, from Björn. 3) bpf_ima_inode_hash() and bpf_bprm_opts_set() helpers, from KP Singh. 4) Memcg-based memory accounting for bpf objects, from Roman. 5) Allow bpf_{s,g}etsockopt from cgroup bind{4,6} hooks, from Stanislav. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (118 commits) selftests/bpf: Fix invalid use of strncat in test_sockmap libbpf: Use memcpy instead of strncpy to please GCC selftests/bpf: Add fentry/fexit/fmod_ret selftest for kernel module selftests/bpf: Add tp_btf CO-RE reloc test for modules libbpf: Support attachment of BPF tracing programs to kernel modules libbpf: Factor out low-level BPF program loading helper bpf: Allow to specify kernel module BTFs when attaching BPF programs bpf: Remove hard-coded btf_vmlinux assumption from BPF verifier selftests/bpf: Add CO-RE relocs selftest relying on kernel module BTF selftests/bpf: Add support for marking sub-tests as skipped selftests/bpf: Add bpf_testmod kernel module for testing libbpf: Add kernel module BTF support for CO-RE relocations libbpf: Refactor CO-RE relocs to not assume a single BTF object libbpf: Add internal helper to load BTF data by FD bpf: Keep module's btf_data_size intact after load bpf: Fix bpf_put_raw_tracepoint()'s use of __module_address() selftests/bpf: Add Userspace tests for TCP_WINDOW_CLAMP bpf: Adds support for setting window clamp samples/bpf: Fix spelling mistake "recieving" -> "receiving" bpf: Fix cold build of test_progs-no_alu32 ... ==================== Link: https://lore.kernel.org/r/20201204021936.85653-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@ -176,6 +176,8 @@ enum kern_feature_id {
|
||||
FEAT_PROBE_READ_KERN,
|
||||
/* BPF_PROG_BIND_MAP is supported */
|
||||
FEAT_PROG_BIND_MAP,
|
||||
/* Kernel support for module BTFs */
|
||||
FEAT_MODULE_BTF,
|
||||
__FEAT_CNT,
|
||||
};
|
||||
|
||||
@ -276,6 +278,7 @@ struct bpf_program {
|
||||
enum bpf_prog_type type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
int prog_ifindex;
|
||||
__u32 attach_btf_obj_fd;
|
||||
__u32 attach_btf_id;
|
||||
__u32 attach_prog_fd;
|
||||
void *func_info;
|
||||
@ -402,6 +405,13 @@ struct extern_desc {
|
||||
|
||||
static LIST_HEAD(bpf_objects_list);
|
||||
|
||||
struct module_btf {
|
||||
struct btf *btf;
|
||||
char *name;
|
||||
__u32 id;
|
||||
int fd;
|
||||
};
|
||||
|
||||
struct bpf_object {
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
char license[64];
|
||||
@ -462,11 +472,19 @@ struct bpf_object {
|
||||
struct list_head list;
|
||||
|
||||
struct btf *btf;
|
||||
struct btf_ext *btf_ext;
|
||||
|
||||
/* Parse and load BTF vmlinux if any of the programs in the object need
|
||||
* it at load time.
|
||||
*/
|
||||
struct btf *btf_vmlinux;
|
||||
struct btf_ext *btf_ext;
|
||||
/* vmlinux BTF override for CO-RE relocations */
|
||||
struct btf *btf_vmlinux_override;
|
||||
/* Lazily initialized kernel module BTFs */
|
||||
struct module_btf *btf_modules;
|
||||
bool btf_modules_loaded;
|
||||
size_t btf_module_cnt;
|
||||
size_t btf_module_cap;
|
||||
|
||||
void *priv;
|
||||
bpf_object_clear_priv_t clear_priv;
|
||||
@ -3960,6 +3978,35 @@ static int probe_prog_bind_map(void)
|
||||
return ret >= 0;
|
||||
}
|
||||
|
||||
static int probe_module_btf(void)
|
||||
{
|
||||
static const char strs[] = "\0int";
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
|
||||
};
|
||||
struct bpf_btf_info info;
|
||||
__u32 len = sizeof(info);
|
||||
char name[16];
|
||||
int fd, err;
|
||||
|
||||
fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
|
||||
if (fd < 0)
|
||||
return 0; /* BTF not supported at all */
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.name = ptr_to_u64(name);
|
||||
info.name_len = sizeof(name);
|
||||
|
||||
/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
|
||||
* kernel's module BTF support coincides with support for
|
||||
* name/name_len fields in struct bpf_btf_info.
|
||||
*/
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
close(fd);
|
||||
return !err;
|
||||
}
|
||||
|
||||
enum kern_feature_result {
|
||||
FEAT_UNKNOWN = 0,
|
||||
FEAT_SUPPORTED = 1,
|
||||
@ -4003,7 +4050,10 @@ static struct kern_feature_desc {
|
||||
},
|
||||
[FEAT_PROG_BIND_MAP] = {
|
||||
"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
|
||||
}
|
||||
},
|
||||
[FEAT_MODULE_BTF] = {
|
||||
"module BTF support", probe_module_btf,
|
||||
},
|
||||
};
|
||||
|
||||
static bool kernel_supports(enum kern_feature_id feat_id)
|
||||
@ -4603,46 +4653,43 @@ static size_t bpf_core_essential_name_len(const char *name)
|
||||
return n;
|
||||
}
|
||||
|
||||
/* dynamically sized list of type IDs */
|
||||
struct ids_vec {
|
||||
__u32 *data;
|
||||
struct core_cand
|
||||
{
|
||||
const struct btf *btf;
|
||||
const struct btf_type *t;
|
||||
const char *name;
|
||||
__u32 id;
|
||||
};
|
||||
|
||||
/* dynamically sized list of type IDs and its associated struct btf */
|
||||
struct core_cand_list {
|
||||
struct core_cand *cands;
|
||||
int len;
|
||||
};
|
||||
|
||||
static void bpf_core_free_cands(struct ids_vec *cand_ids)
|
||||
static void bpf_core_free_cands(struct core_cand_list *cands)
|
||||
{
|
||||
free(cand_ids->data);
|
||||
free(cand_ids);
|
||||
free(cands->cands);
|
||||
free(cands);
|
||||
}
|
||||
|
||||
static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
|
||||
__u32 local_type_id,
|
||||
const struct btf *targ_btf)
|
||||
static int bpf_core_add_cands(struct core_cand *local_cand,
|
||||
size_t local_essent_len,
|
||||
const struct btf *targ_btf,
|
||||
const char *targ_btf_name,
|
||||
int targ_start_id,
|
||||
struct core_cand_list *cands)
|
||||
{
|
||||
size_t local_essent_len, targ_essent_len;
|
||||
const char *local_name, *targ_name;
|
||||
const struct btf_type *t, *local_t;
|
||||
struct ids_vec *cand_ids;
|
||||
__u32 *new_ids;
|
||||
int i, err, n;
|
||||
|
||||
local_t = btf__type_by_id(local_btf, local_type_id);
|
||||
if (!local_t)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
local_name = btf__name_by_offset(local_btf, local_t->name_off);
|
||||
if (str_is_empty(local_name))
|
||||
return ERR_PTR(-EINVAL);
|
||||
local_essent_len = bpf_core_essential_name_len(local_name);
|
||||
|
||||
cand_ids = calloc(1, sizeof(*cand_ids));
|
||||
if (!cand_ids)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
struct core_cand *new_cands, *cand;
|
||||
const struct btf_type *t;
|
||||
const char *targ_name;
|
||||
size_t targ_essent_len;
|
||||
int n, i;
|
||||
|
||||
n = btf__get_nr_types(targ_btf);
|
||||
for (i = 1; i <= n; i++) {
|
||||
for (i = targ_start_id; i <= n; i++) {
|
||||
t = btf__type_by_id(targ_btf, i);
|
||||
if (btf_kind(t) != btf_kind(local_t))
|
||||
if (btf_kind(t) != btf_kind(local_cand->t))
|
||||
continue;
|
||||
|
||||
targ_name = btf__name_by_offset(targ_btf, t->name_off);
|
||||
@ -4653,24 +4700,174 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
|
||||
if (targ_essent_len != local_essent_len)
|
||||
continue;
|
||||
|
||||
if (strncmp(local_name, targ_name, local_essent_len) == 0) {
|
||||
pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n",
|
||||
local_type_id, btf_kind_str(local_t),
|
||||
local_name, i, btf_kind_str(t), targ_name);
|
||||
new_ids = libbpf_reallocarray(cand_ids->data,
|
||||
cand_ids->len + 1,
|
||||
sizeof(*cand_ids->data));
|
||||
if (!new_ids) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
cand_ids->data = new_ids;
|
||||
cand_ids->data[cand_ids->len++] = i;
|
||||
}
|
||||
if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
|
||||
continue;
|
||||
|
||||
pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
|
||||
local_cand->id, btf_kind_str(local_cand->t),
|
||||
local_cand->name, i, btf_kind_str(t), targ_name,
|
||||
targ_btf_name);
|
||||
new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
|
||||
sizeof(*cands->cands));
|
||||
if (!new_cands)
|
||||
return -ENOMEM;
|
||||
|
||||
cand = &new_cands[cands->len];
|
||||
cand->btf = targ_btf;
|
||||
cand->t = t;
|
||||
cand->name = targ_name;
|
||||
cand->id = i;
|
||||
|
||||
cands->cands = new_cands;
|
||||
cands->len++;
|
||||
}
|
||||
return cand_ids;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int load_module_btfs(struct bpf_object *obj)
|
||||
{
|
||||
struct bpf_btf_info info;
|
||||
struct module_btf *mod_btf;
|
||||
struct btf *btf;
|
||||
char name[64];
|
||||
__u32 id = 0, len;
|
||||
int err, fd;
|
||||
|
||||
if (obj->btf_modules_loaded)
|
||||
return 0;
|
||||
|
||||
/* don't do this again, even if we find no module BTFs */
|
||||
obj->btf_modules_loaded = true;
|
||||
|
||||
/* kernel too old to support module BTFs */
|
||||
if (!kernel_supports(FEAT_MODULE_BTF))
|
||||
return 0;
|
||||
|
||||
while (true) {
|
||||
err = bpf_btf_get_next_id(id, &id);
|
||||
if (err && errno == ENOENT)
|
||||
return 0;
|
||||
if (err) {
|
||||
err = -errno;
|
||||
pr_warn("failed to iterate BTF objects: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
fd = bpf_btf_get_fd_by_id(id);
|
||||
if (fd < 0) {
|
||||
if (errno == ENOENT)
|
||||
continue; /* expected race: BTF was unloaded */
|
||||
err = -errno;
|
||||
pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
len = sizeof(info);
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.name = ptr_to_u64(name);
|
||||
info.name_len = sizeof(name);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
pr_warn("failed to get BTF object #%d info: %d\n", id, err);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* ignore non-module BTFs */
|
||||
if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
|
||||
close(fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
btf = btf_get_from_fd(fd, obj->btf_vmlinux);
|
||||
if (IS_ERR(btf)) {
|
||||
pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n",
|
||||
name, id, PTR_ERR(btf));
|
||||
err = PTR_ERR(btf);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = btf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
|
||||
sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
|
||||
|
||||
mod_btf->btf = btf;
|
||||
mod_btf->id = id;
|
||||
mod_btf->fd = fd;
|
||||
mod_btf->name = strdup(name);
|
||||
if (!mod_btf->name) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
continue;
|
||||
|
||||
err_out:
|
||||
bpf_core_free_cands(cand_ids);
|
||||
close(fd);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct core_cand_list *
|
||||
bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
|
||||
{
|
||||
struct core_cand local_cand = {};
|
||||
struct core_cand_list *cands;
|
||||
const struct btf *main_btf;
|
||||
size_t local_essent_len;
|
||||
int err, i;
|
||||
|
||||
local_cand.btf = local_btf;
|
||||
local_cand.t = btf__type_by_id(local_btf, local_type_id);
|
||||
if (!local_cand.t)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
|
||||
if (str_is_empty(local_cand.name))
|
||||
return ERR_PTR(-EINVAL);
|
||||
local_essent_len = bpf_core_essential_name_len(local_cand.name);
|
||||
|
||||
cands = calloc(1, sizeof(*cands));
|
||||
if (!cands)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Attempt to find target candidates in vmlinux BTF first */
|
||||
main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
|
||||
err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
/* if vmlinux BTF has any candidate, don't got for module BTFs */
|
||||
if (cands->len)
|
||||
return cands;
|
||||
|
||||
/* if vmlinux BTF was overridden, don't attempt to load module BTFs */
|
||||
if (obj->btf_vmlinux_override)
|
||||
return cands;
|
||||
|
||||
/* now look through module BTFs, trying to still find candidates */
|
||||
err = load_module_btfs(obj);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
for (i = 0; i < obj->btf_module_cnt; i++) {
|
||||
err = bpf_core_add_cands(&local_cand, local_essent_len,
|
||||
obj->btf_modules[i].btf,
|
||||
obj->btf_modules[i].name,
|
||||
btf__get_nr_types(obj->btf_vmlinux) + 1,
|
||||
cands);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return cands;
|
||||
err_out:
|
||||
bpf_core_free_cands(cands);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@ -5664,7 +5861,6 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
|
||||
const struct bpf_core_relo *relo,
|
||||
int relo_idx,
|
||||
const struct btf *local_btf,
|
||||
const struct btf *targ_btf,
|
||||
struct hashmap *cand_cache)
|
||||
{
|
||||
struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
|
||||
@ -5672,8 +5868,8 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
|
||||
struct bpf_core_relo_res cand_res, targ_res;
|
||||
const struct btf_type *local_type;
|
||||
const char *local_name;
|
||||
struct ids_vec *cand_ids;
|
||||
__u32 local_id, cand_id;
|
||||
struct core_cand_list *cands = NULL;
|
||||
__u32 local_id;
|
||||
const char *spec_str;
|
||||
int i, j, err;
|
||||
|
||||
@ -5720,24 +5916,24 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
|
||||
cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
|
||||
if (IS_ERR(cand_ids)) {
|
||||
pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld",
|
||||
if (!hashmap__find(cand_cache, type_key, (void **)&cands)) {
|
||||
cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
|
||||
if (IS_ERR(cands)) {
|
||||
pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
|
||||
prog->name, relo_idx, local_id, btf_kind_str(local_type),
|
||||
local_name, PTR_ERR(cand_ids));
|
||||
return PTR_ERR(cand_ids);
|
||||
local_name, PTR_ERR(cands));
|
||||
return PTR_ERR(cands);
|
||||
}
|
||||
err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
|
||||
err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
|
||||
if (err) {
|
||||
bpf_core_free_cands(cand_ids);
|
||||
bpf_core_free_cands(cands);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0, j = 0; i < cand_ids->len; i++) {
|
||||
cand_id = cand_ids->data[i];
|
||||
err = bpf_core_spec_match(&local_spec, targ_btf, cand_id, &cand_spec);
|
||||
for (i = 0, j = 0; i < cands->len; i++) {
|
||||
err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
|
||||
cands->cands[i].id, &cand_spec);
|
||||
if (err < 0) {
|
||||
pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
|
||||
prog->name, relo_idx, i);
|
||||
@ -5781,7 +5977,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cand_ids->data[j++] = cand_spec.root_type_id;
|
||||
cands->cands[j++] = cands->cands[i];
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5793,7 +5989,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
|
||||
* depending on relo's kind.
|
||||
*/
|
||||
if (j > 0)
|
||||
cand_ids->len = j;
|
||||
cands->len = j;
|
||||
|
||||
/*
|
||||
* If no candidates were found, it might be both a programmer error,
|
||||
@ -5837,20 +6033,19 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
|
||||
struct hashmap_entry *entry;
|
||||
struct hashmap *cand_cache = NULL;
|
||||
struct bpf_program *prog;
|
||||
struct btf *targ_btf;
|
||||
const char *sec_name;
|
||||
int i, err = 0, insn_idx, sec_idx;
|
||||
|
||||
if (obj->btf_ext->core_relo_info.len == 0)
|
||||
return 0;
|
||||
|
||||
if (targ_btf_path)
|
||||
targ_btf = btf__parse(targ_btf_path, NULL);
|
||||
else
|
||||
targ_btf = obj->btf_vmlinux;
|
||||
if (IS_ERR_OR_NULL(targ_btf)) {
|
||||
pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
|
||||
return PTR_ERR(targ_btf);
|
||||
if (targ_btf_path) {
|
||||
obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
|
||||
if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) {
|
||||
err = PTR_ERR(obj->btf_vmlinux_override);
|
||||
pr_warn("failed to parse target BTF: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
|
||||
@ -5902,8 +6097,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
|
||||
if (!prog->load)
|
||||
continue;
|
||||
|
||||
err = bpf_core_apply_relo(prog, rec, i, obj->btf,
|
||||
targ_btf, cand_cache);
|
||||
err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
|
||||
if (err) {
|
||||
pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
|
||||
prog->name, i, err);
|
||||
@ -5913,9 +6107,10 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
|
||||
}
|
||||
|
||||
out:
|
||||
/* obj->btf_vmlinux is freed at the end of object load phase */
|
||||
if (targ_btf != obj->btf_vmlinux)
|
||||
btf__free(targ_btf);
|
||||
/* obj->btf_vmlinux and module BTFs are freed after object load */
|
||||
btf__free(obj->btf_vmlinux_override);
|
||||
obj->btf_vmlinux_override = NULL;
|
||||
|
||||
if (!IS_ERR_OR_NULL(cand_cache)) {
|
||||
hashmap__for_each_entry(cand_cache, entry, i) {
|
||||
bpf_core_free_cands(entry->value);
|
||||
@ -6626,16 +6821,25 @@ static int
|
||||
load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
|
||||
char *license, __u32 kern_version, int *pfd)
|
||||
{
|
||||
struct bpf_load_program_attr load_attr;
|
||||
struct bpf_prog_load_params load_attr = {};
|
||||
char *cp, errmsg[STRERR_BUFSIZE];
|
||||
size_t log_buf_size = 0;
|
||||
char *log_buf = NULL;
|
||||
int btf_fd, ret;
|
||||
|
||||
if (prog->type == BPF_PROG_TYPE_UNSPEC) {
|
||||
/*
|
||||
* The program type must be set. Most likely we couldn't find a proper
|
||||
* section definition at load time, and thus we didn't infer the type.
|
||||
*/
|
||||
pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
|
||||
prog->name, prog->sec_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!insns || !insns_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
|
||||
load_attr.prog_type = prog->type;
|
||||
/* old kernels might not support specifying expected_attach_type */
|
||||
if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
|
||||
@ -6646,19 +6850,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
|
||||
if (kernel_supports(FEAT_PROG_NAME))
|
||||
load_attr.name = prog->name;
|
||||
load_attr.insns = insns;
|
||||
load_attr.insns_cnt = insns_cnt;
|
||||
load_attr.insn_cnt = insns_cnt;
|
||||
load_attr.license = license;
|
||||
if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
|
||||
prog->type == BPF_PROG_TYPE_LSM) {
|
||||
load_attr.attach_btf_id = prog->attach_btf_id;
|
||||
} else if (prog->type == BPF_PROG_TYPE_TRACING ||
|
||||
prog->type == BPF_PROG_TYPE_EXT) {
|
||||
load_attr.attach_btf_id = prog->attach_btf_id;
|
||||
if (prog->attach_prog_fd)
|
||||
load_attr.attach_prog_fd = prog->attach_prog_fd;
|
||||
load_attr.attach_btf_id = prog->attach_btf_id;
|
||||
} else {
|
||||
load_attr.kern_version = kern_version;
|
||||
load_attr.prog_ifindex = prog->prog_ifindex;
|
||||
}
|
||||
else
|
||||
load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
|
||||
load_attr.attach_btf_id = prog->attach_btf_id;
|
||||
load_attr.kern_version = kern_version;
|
||||
load_attr.prog_ifindex = prog->prog_ifindex;
|
||||
|
||||
/* specify func_info/line_info only if kernel supports them */
|
||||
btf_fd = bpf_object__btf_fd(prog->obj);
|
||||
if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
|
||||
@ -6682,7 +6884,9 @@ retry_load:
|
||||
*log_buf = 0;
|
||||
}
|
||||
|
||||
ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
|
||||
load_attr.log_buf = log_buf;
|
||||
load_attr.log_buf_sz = log_buf_size;
|
||||
ret = libbpf__bpf_prog_load(&load_attr);
|
||||
|
||||
if (ret >= 0) {
|
||||
if (log_buf && load_attr.log_level)
|
||||
@ -6723,9 +6927,9 @@ retry_load:
|
||||
pr_warn("-- BEGIN DUMP LOG ---\n");
|
||||
pr_warn("\n%s\n", log_buf);
|
||||
pr_warn("-- END LOG --\n");
|
||||
} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
|
||||
} else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
|
||||
pr_warn("Program too large (%zu insns), at most %d insns\n",
|
||||
load_attr.insns_cnt, BPF_MAXINSNS);
|
||||
load_attr.insn_cnt, BPF_MAXINSNS);
|
||||
ret = -LIBBPF_ERRNO__PROG2BIG;
|
||||
} else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
|
||||
/* Wrong program type? */
|
||||
@ -6733,7 +6937,9 @@ retry_load:
|
||||
|
||||
load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
|
||||
load_attr.expected_attach_type = 0;
|
||||
fd = bpf_load_program_xattr(&load_attr, NULL, 0);
|
||||
load_attr.log_buf = NULL;
|
||||
load_attr.log_buf_sz = 0;
|
||||
fd = libbpf__bpf_prog_load(&load_attr);
|
||||
if (fd >= 0) {
|
||||
close(fd);
|
||||
ret = -LIBBPF_ERRNO__PROGTYPE;
|
||||
@ -6746,11 +6952,11 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int libbpf_find_attach_btf_id(struct bpf_program *prog);
|
||||
static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
|
||||
|
||||
int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
|
||||
{
|
||||
int err = 0, fd, i, btf_id;
|
||||
int err = 0, fd, i;
|
||||
|
||||
if (prog->obj->loaded) {
|
||||
pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
|
||||
@ -6760,10 +6966,14 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
|
||||
if ((prog->type == BPF_PROG_TYPE_TRACING ||
|
||||
prog->type == BPF_PROG_TYPE_LSM ||
|
||||
prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
|
||||
btf_id = libbpf_find_attach_btf_id(prog);
|
||||
if (btf_id <= 0)
|
||||
return btf_id;
|
||||
prog->attach_btf_id = btf_id;
|
||||
int btf_obj_fd = 0, btf_type_id = 0;
|
||||
|
||||
err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
prog->attach_btf_obj_fd = btf_obj_fd;
|
||||
prog->attach_btf_id = btf_type_id;
|
||||
}
|
||||
|
||||
if (prog->instances.nr < 0 || !prog->instances.fds) {
|
||||
@ -6923,9 +7133,12 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
prog->sec_def = find_sec_def(prog->sec_name);
|
||||
if (!prog->sec_def)
|
||||
if (!prog->sec_def) {
|
||||
/* couldn't guess, but user might manually specify */
|
||||
pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
|
||||
prog->name, prog->sec_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (prog->sec_def->is_sleepable)
|
||||
prog->prog_flags |= BPF_F_SLEEPABLE;
|
||||
@ -7271,6 +7484,15 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
|
||||
err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
|
||||
err = err ? : bpf_object__load_progs(obj, attr->log_level);
|
||||
|
||||
/* clean up module BTFs */
|
||||
for (i = 0; i < obj->btf_module_cnt; i++) {
|
||||
close(obj->btf_modules[i].fd);
|
||||
btf__free(obj->btf_modules[i].btf);
|
||||
free(obj->btf_modules[i].name);
|
||||
}
|
||||
free(obj->btf_modules);
|
||||
|
||||
/* clean up vmlinux BTF */
|
||||
btf__free(obj->btf_vmlinux);
|
||||
obj->btf_vmlinux = NULL;
|
||||
|
||||
@ -7649,6 +7871,16 @@ bool bpf_map__is_pinned(const struct bpf_map *map)
|
||||
return map->pinned;
|
||||
}
|
||||
|
||||
static void sanitize_pin_path(char *s)
|
||||
{
|
||||
/* bpffs disallows periods in path names */
|
||||
while (*s) {
|
||||
if (*s == '.')
|
||||
*s = '_';
|
||||
s++;
|
||||
}
|
||||
}
|
||||
|
||||
int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
@ -7678,6 +7910,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
|
||||
err = -ENAMETOOLONG;
|
||||
goto err_unpin_maps;
|
||||
}
|
||||
sanitize_pin_path(buf);
|
||||
pin_path = buf;
|
||||
} else if (!map->pin_path) {
|
||||
continue;
|
||||
@ -7722,6 +7955,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
|
||||
return -EINVAL;
|
||||
else if (len >= PATH_MAX)
|
||||
return -ENAMETOOLONG;
|
||||
sanitize_pin_path(buf);
|
||||
pin_path = buf;
|
||||
} else if (!map->pin_path) {
|
||||
continue;
|
||||
@ -8607,8 +8841,8 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
|
||||
return btf__find_by_name_kind(btf, btf_type_name, kind);
|
||||
}
|
||||
|
||||
static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
|
||||
enum bpf_attach_type attach_type)
|
||||
static inline int find_attach_btf_id(struct btf *btf, const char *name,
|
||||
enum bpf_attach_type attach_type)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -8624,9 +8858,6 @@ static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
|
||||
else
|
||||
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
|
||||
|
||||
if (err <= 0)
|
||||
pr_warn("%s is not found in vmlinux BTF\n", name);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -8642,7 +8873,10 @@ int libbpf_find_vmlinux_btf_id(const char *name,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = __find_vmlinux_btf_id(btf, name, attach_type);
|
||||
err = find_attach_btf_id(btf, name, attach_type);
|
||||
if (err <= 0)
|
||||
pr_warn("%s is not found in vmlinux BTF\n", name);
|
||||
|
||||
btf__free(btf);
|
||||
return err;
|
||||
}
|
||||
@ -8680,11 +8914,49 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int libbpf_find_attach_btf_id(struct bpf_program *prog)
|
||||
static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
|
||||
enum bpf_attach_type attach_type,
|
||||
int *btf_obj_fd, int *btf_type_id)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
|
||||
if (ret > 0) {
|
||||
*btf_obj_fd = 0; /* vmlinux BTF */
|
||||
*btf_type_id = ret;
|
||||
return 0;
|
||||
}
|
||||
if (ret != -ENOENT)
|
||||
return ret;
|
||||
|
||||
ret = load_module_btfs(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < obj->btf_module_cnt; i++) {
|
||||
const struct module_btf *mod = &obj->btf_modules[i];
|
||||
|
||||
ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
|
||||
if (ret > 0) {
|
||||
*btf_obj_fd = mod->fd;
|
||||
*btf_type_id = ret;
|
||||
return 0;
|
||||
}
|
||||
if (ret == -ENOENT)
|
||||
continue;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
|
||||
{
|
||||
enum bpf_attach_type attach_type = prog->expected_attach_type;
|
||||
__u32 attach_prog_fd = prog->attach_prog_fd;
|
||||
const char *name = prog->sec_name;
|
||||
const char *name = prog->sec_name, *attach_name;
|
||||
const struct bpf_sec_def *sec = NULL;
|
||||
int i, err;
|
||||
|
||||
if (!name)
|
||||
@ -8695,17 +8967,37 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog)
|
||||
continue;
|
||||
if (strncmp(name, section_defs[i].sec, section_defs[i].len))
|
||||
continue;
|
||||
if (attach_prog_fd)
|
||||
err = libbpf_find_prog_btf_id(name + section_defs[i].len,
|
||||
attach_prog_fd);
|
||||
else
|
||||
err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
|
||||
name + section_defs[i].len,
|
||||
attach_type);
|
||||
|
||||
sec = §ion_defs[i];
|
||||
break;
|
||||
}
|
||||
|
||||
if (!sec) {
|
||||
pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
|
||||
return -ESRCH;
|
||||
}
|
||||
attach_name = name + sec->len;
|
||||
|
||||
/* BPF program's BTF ID */
|
||||
if (attach_prog_fd) {
|
||||
err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
|
||||
if (err < 0) {
|
||||
pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
|
||||
attach_prog_fd, attach_name, err);
|
||||
return err;
|
||||
}
|
||||
*btf_obj_fd = 0;
|
||||
*btf_type_id = err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* kernel/module BTF ID */
|
||||
err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
|
||||
if (err) {
|
||||
pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
|
||||
return err;
|
||||
}
|
||||
pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
|
||||
return -ESRCH;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int libbpf_attach_type_by_name(const char *name,
|
||||
@ -10594,6 +10886,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
|
||||
return btf_id;
|
||||
|
||||
prog->attach_btf_id = btf_id;
|
||||
prog->attach_btf_obj_fd = 0;
|
||||
prog->attach_prog_fd = attach_prog_fd;
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user