Merge branch 'libbpf-type-suffixes-and-autocreate-flag-for-struct_ops-maps'
Eduard Zingerman says: ==================== libbpf: type suffixes and autocreate flag for struct_ops maps Tweak struct_ops related APIs to allow the following features: - specify version suffixes for stuct_ops map types; - share same BPF program between several map definitions with different local BTF types, assuming only maps with same kernel BTF type would be selected for load; - toggle autocreate flag for struct_ops maps; - automatically toggle autoload for struct_ops programs referenced from struct_ops maps, depending on autocreate status of the corresponding map; - use SEC("?.struct_ops") and SEC("?.struct_ops.link") to define struct_ops maps with autocreate == false after object open. This would allow loading programs like below: SEC("struct_ops/foo") int BPF_PROG(foo) { ... } SEC("struct_ops/bar") int BPF_PROG(bar) { ... } struct bpf_testmod_ops___v1 { int (*foo)(void); }; struct bpf_testmod_ops___v2 { int (*foo)(void); int (*bar)(void); }; /* Assume kernel type name to be 'test_ops' */ SEC(".struct_ops.link") struct test_ops___v1 map_v1 = { /* Program 'foo' shared by maps with * different local BTF type */ .foo = (void *)foo }; SEC(".struct_ops.link") struct test_ops___v2 map_v2 = { .foo = (void *)foo, .bar = (void *)bar }; Assuming the following tweaks are done before loading: /* to load v1 */ bpf_map__set_autocreate(skel->maps.map_v1, true); bpf_map__set_autocreate(skel->maps.map_v2, false); /* to load v2 */ bpf_map__set_autocreate(skel->maps.map_v1, false); bpf_map__set_autocreate(skel->maps.map_v2, true); Patch #8 ties autocreate and autoload flags for struct_ops maps and programs. Changelog: - v3 [3] -> v4: - changes for multiple styling suggestions from Andrii; - patch #5: libbpf log capture now happens for LIBBPF_INFO and LIBBPF_WARN messages and does not depend on verbosity flags (Andrii); - patch #6: fixed runtime crash caused by conflict with newly added test case struct_ops_multi_pages; - patch #7: fixed free of possibly uninitialized pointer (Daniel) - patch #8: simpler algorithm to detect which programs to autoload (Andrii); - patch #9: added assertions for autoload flag after object load (Andrii); - patch #12: DATASEC name rewrite in libbpf is now done inplace, no new strings added to BTF (Andrii); - patch #14: allow any printable characters in DATASEC names when kernel validates BTF (Andrii) - v2 [2] -> v3: - moved patch #8 logic to be fully done on load (requested by Andrii in offlist discussion); - in patch #9 added test case for shadow vars and autocreate/autoload interaction. - v1 [1] -> v2: - fixed memory leak in patch #1 (Kui-Feng); - improved error messages in patch #2 (Martin, Andrii); - in bad_struct_ops selftest from patch #6 added .test_2 map member setup (David); - added utility functions to capture libbpf log from selftests (David) - in selftests replaced usage of ...__open_and_load by separate calls to ..._open() and ..._load() (Andrii); - removed serial_... in selftest definitions (Andrii); - improved comments in selftest struct_ops_autocreate from patch #7 (David); - removed autoload toggling logic incompatible with shadow variables from bpf_map__set_autocreate(), instead struct_ops programs autoload property is computed at struct_ops maps load phase, see patch #8 (Kui-Feng, Martin, Andrii); - added support for SEC("?.struct_ops") and SEC("?.struct_ops.link") (Andrii). [1] https://lore.kernel.org/bpf/20240227204556.17524-1-eddyz87@gmail.com/ [2] https://lore.kernel.org/bpf/20240302011920.15302-1-eddyz87@gmail.com/ [3] https://lore.kernel.org/bpf/20240304225156.24765-1-eddyz87@gmail.com/ ==================== Link: https://lore.kernel.org/r/20240306104529.6453-1-eddyz87@gmail.com Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
This commit is contained in:
commit
516fca5a75
@ -809,9 +809,23 @@ static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
|
||||
return __btf_name_valid(btf, offset);
|
||||
}
|
||||
|
||||
/* Allow any printable character in DATASEC names */
|
||||
static bool btf_name_valid_section(const struct btf *btf, u32 offset)
|
||||
{
|
||||
return __btf_name_valid(btf, offset);
|
||||
/* offset must be valid */
|
||||
const char *src = btf_str_by_offset(btf, offset);
|
||||
const char *src_limit;
|
||||
|
||||
/* set a limit on identifier length */
|
||||
src_limit = src + KSYM_NAME_LEN;
|
||||
src++;
|
||||
while (*src && src < src_limit) {
|
||||
if (!isprint(*src))
|
||||
return false;
|
||||
src++;
|
||||
}
|
||||
|
||||
return !*src;
|
||||
}
|
||||
|
||||
static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
|
||||
|
@ -147,6 +147,25 @@ static int probe_kern_btf_datasec(int token_fd)
|
||||
strs, sizeof(strs), token_fd));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_qmark_datasec(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0x\0?.data";
|
||||
/* static int a; */
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* VAR x */ /* [2] */
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
||||
BTF_VAR_STATIC,
|
||||
/* DATASEC ?.data */ /* [3] */
|
||||
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
|
||||
BTF_VAR_SECINFO_ENC(2, 0, 4),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs), token_fd));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_float(int token_fd)
|
||||
{
|
||||
static const char strs[] = "\0float";
|
||||
@ -534,6 +553,9 @@ static struct kern_feature_desc {
|
||||
[FEAT_ARG_CTX_TAG] = {
|
||||
"kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
|
||||
},
|
||||
[FEAT_BTF_QMARK_DATASEC] = {
|
||||
"BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
|
||||
},
|
||||
};
|
||||
|
||||
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
|
||||
|
@ -612,6 +612,7 @@ enum sec_type {
|
||||
SEC_BSS,
|
||||
SEC_DATA,
|
||||
SEC_RODATA,
|
||||
SEC_ST_OPS,
|
||||
};
|
||||
|
||||
struct elf_sec_desc {
|
||||
@ -627,8 +628,6 @@ struct elf_state {
|
||||
Elf *elf;
|
||||
Elf64_Ehdr *ehdr;
|
||||
Elf_Data *symbols;
|
||||
Elf_Data *st_ops_data;
|
||||
Elf_Data *st_ops_link_data;
|
||||
size_t shstrndx; /* section index for section name strings */
|
||||
size_t strtabidx;
|
||||
struct elf_sec_desc *secs;
|
||||
@ -637,8 +636,7 @@ struct elf_state {
|
||||
__u32 btf_maps_sec_btf_id;
|
||||
int text_shndx;
|
||||
int symbols_shndx;
|
||||
int st_ops_shndx;
|
||||
int st_ops_link_shndx;
|
||||
bool has_st_ops;
|
||||
};
|
||||
|
||||
struct usdt_manager;
|
||||
@ -948,7 +946,7 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
|
||||
const char *name, __u32 kind);
|
||||
|
||||
static int
|
||||
find_struct_ops_kern_types(struct bpf_object *obj, const char *tname,
|
||||
find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
|
||||
struct module_btf **mod_btf,
|
||||
const struct btf_type **type, __u32 *type_id,
|
||||
const struct btf_type **vtype, __u32 *vtype_id,
|
||||
@ -958,8 +956,12 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname,
|
||||
const struct btf_member *kern_data_member;
|
||||
struct btf *btf;
|
||||
__s32 kern_vtype_id, kern_type_id;
|
||||
char tname[256];
|
||||
__u32 i;
|
||||
|
||||
snprintf(tname, sizeof(tname), "%.*s",
|
||||
(int)bpf_core_essential_name_len(tname_raw), tname_raw);
|
||||
|
||||
kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
|
||||
&btf, mod_btf);
|
||||
if (kern_type_id < 0) {
|
||||
@ -1027,6 +1029,48 @@ static bool is_valid_st_ops_program(struct bpf_object *obj,
|
||||
return false;
|
||||
}
|
||||
|
||||
/* For each struct_ops program P, referenced from some struct_ops map M,
|
||||
* enable P.autoload if there are Ms for which M.autocreate is true,
|
||||
* disable P.autoload if for all Ms M.autocreate is false.
|
||||
* Don't change P.autoload for programs that are not referenced from any maps.
|
||||
*/
|
||||
static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
|
||||
{
|
||||
struct bpf_program *prog, *slot_prog;
|
||||
struct bpf_map *map;
|
||||
int i, j, k, vlen;
|
||||
|
||||
for (i = 0; i < obj->nr_programs; ++i) {
|
||||
int should_load = false;
|
||||
int use_cnt = 0;
|
||||
|
||||
prog = &obj->programs[i];
|
||||
if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < obj->nr_maps; ++j) {
|
||||
map = &obj->maps[j];
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
continue;
|
||||
|
||||
vlen = btf_vlen(map->st_ops->type);
|
||||
for (k = 0; k < vlen; ++k) {
|
||||
slot_prog = map->st_ops->progs[k];
|
||||
if (prog != slot_prog)
|
||||
continue;
|
||||
|
||||
use_cnt++;
|
||||
if (map->autocreate)
|
||||
should_load = true;
|
||||
}
|
||||
}
|
||||
if (use_cnt)
|
||||
prog->autoload = should_load;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Init the map's fields that depend on kern_btf */
|
||||
static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
|
||||
{
|
||||
@ -1142,8 +1186,32 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
|
||||
|
||||
if (mod_btf)
|
||||
prog->attach_btf_obj_fd = mod_btf->fd;
|
||||
prog->attach_btf_id = kern_type_id;
|
||||
prog->expected_attach_type = kern_member_idx;
|
||||
|
||||
/* if we haven't yet processed this BPF program, record proper
|
||||
* attach_btf_id and member_idx
|
||||
*/
|
||||
if (!prog->attach_btf_id) {
|
||||
prog->attach_btf_id = kern_type_id;
|
||||
prog->expected_attach_type = kern_member_idx;
|
||||
}
|
||||
|
||||
/* struct_ops BPF prog can be re-used between multiple
|
||||
* .struct_ops & .struct_ops.link as long as it's the
|
||||
* same struct_ops struct definition and the same
|
||||
* function pointer field
|
||||
*/
|
||||
if (prog->attach_btf_id != kern_type_id) {
|
||||
pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
|
||||
map->name, mname, prog->name, prog->sec_name, prog->type,
|
||||
prog->attach_btf_id, kern_type_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (prog->expected_attach_type != kern_member_idx) {
|
||||
pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
|
||||
map->name, mname, prog->name, prog->sec_name, prog->type,
|
||||
prog->expected_attach_type, kern_member_idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
st_ops->kern_func_off[i] = kern_data_off + kern_moff;
|
||||
|
||||
@ -1184,6 +1252,9 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
continue;
|
||||
|
||||
if (!map->autocreate)
|
||||
continue;
|
||||
|
||||
err = bpf_map__init_kern_struct_ops(map);
|
||||
if (err)
|
||||
return err;
|
||||
@ -1193,7 +1264,7 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
|
||||
}
|
||||
|
||||
static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
|
||||
int shndx, Elf_Data *data, __u32 map_flags)
|
||||
int shndx, Elf_Data *data)
|
||||
{
|
||||
const struct btf_type *type, *datasec;
|
||||
const struct btf_var_secinfo *vsi;
|
||||
@ -1251,11 +1322,20 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
|
||||
return -ENOMEM;
|
||||
map->btf_value_type_id = type_id;
|
||||
|
||||
/* Follow same convention as for programs autoload:
|
||||
* SEC("?.struct_ops") means map is not created by default.
|
||||
*/
|
||||
if (sec_name[0] == '?') {
|
||||
map->autocreate = false;
|
||||
/* from now on forget there was ? in section name */
|
||||
sec_name++;
|
||||
}
|
||||
|
||||
map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
|
||||
map->def.key_size = sizeof(int);
|
||||
map->def.value_size = type->size;
|
||||
map->def.max_entries = 1;
|
||||
map->def.map_flags = map_flags;
|
||||
map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
|
||||
|
||||
map->st_ops = calloc(1, sizeof(*map->st_ops));
|
||||
if (!map->st_ops)
|
||||
@ -1290,15 +1370,25 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
|
||||
|
||||
static int bpf_object_init_struct_ops(struct bpf_object *obj)
|
||||
{
|
||||
int err;
|
||||
const char *sec_name;
|
||||
int sec_idx, err;
|
||||
|
||||
err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx,
|
||||
obj->efile.st_ops_data, 0);
|
||||
err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC,
|
||||
obj->efile.st_ops_link_shndx,
|
||||
obj->efile.st_ops_link_data,
|
||||
BPF_F_LINK);
|
||||
return err;
|
||||
for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
|
||||
struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
|
||||
|
||||
if (desc->sec_type != SEC_ST_OPS)
|
||||
continue;
|
||||
|
||||
sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
|
||||
if (!sec_name)
|
||||
return -LIBBPF_ERRNO__FORMAT;
|
||||
|
||||
err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bpf_object *bpf_object__new(const char *path,
|
||||
@ -1336,8 +1426,6 @@ static struct bpf_object *bpf_object__new(const char *path,
|
||||
obj->efile.obj_buf = obj_buf;
|
||||
obj->efile.obj_buf_sz = obj_buf_sz;
|
||||
obj->efile.btf_maps_shndx = -1;
|
||||
obj->efile.st_ops_shndx = -1;
|
||||
obj->efile.st_ops_link_shndx = -1;
|
||||
obj->kconfig_map_idx = -1;
|
||||
|
||||
obj->kern_version = get_kernel_version();
|
||||
@ -1354,8 +1442,6 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
|
||||
elf_end(obj->efile.elf);
|
||||
obj->efile.elf = NULL;
|
||||
obj->efile.symbols = NULL;
|
||||
obj->efile.st_ops_data = NULL;
|
||||
obj->efile.st_ops_link_data = NULL;
|
||||
|
||||
zfree(&obj->efile.secs);
|
||||
obj->efile.sec_cnt = 0;
|
||||
@ -2783,6 +2869,11 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
|
||||
return sh->sh_flags & SHF_EXECINSTR;
|
||||
}
|
||||
|
||||
static bool starts_with_qmark(const char *s)
|
||||
{
|
||||
return s && s[0] == '?';
|
||||
}
|
||||
|
||||
static bool btf_needs_sanitization(struct bpf_object *obj)
|
||||
{
|
||||
bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
|
||||
@ -2792,9 +2883,10 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
|
||||
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
|
||||
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
|
||||
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
|
||||
bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
|
||||
|
||||
return !has_func || !has_datasec || !has_func_global || !has_float ||
|
||||
!has_decl_tag || !has_type_tag || !has_enum64;
|
||||
!has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
|
||||
}
|
||||
|
||||
static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
@ -2806,6 +2898,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
|
||||
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
|
||||
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
|
||||
bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
|
||||
int enum64_placeholder_id = 0;
|
||||
struct btf_type *t;
|
||||
int i, j, vlen;
|
||||
@ -2832,7 +2925,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
|
||||
name = (char *)btf__name_by_offset(btf, t->name_off);
|
||||
while (*name) {
|
||||
if (*name == '.')
|
||||
if (*name == '.' || *name == '?')
|
||||
*name = '_';
|
||||
name++;
|
||||
}
|
||||
@ -2847,6 +2940,14 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
vt = (void *)btf__type_by_id(btf, v->type);
|
||||
m->name_off = vt->name_off;
|
||||
}
|
||||
} else if (!has_qmark_datasec && btf_is_datasec(t) &&
|
||||
starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
|
||||
/* replace '?' prefix with '_' for DATASEC names */
|
||||
char *name;
|
||||
|
||||
name = (char *)btf__name_by_offset(btf, t->name_off);
|
||||
if (name[0] == '?')
|
||||
name[0] = '_';
|
||||
} else if (!has_func && btf_is_func_proto(t)) {
|
||||
/* replace FUNC_PROTO with ENUM */
|
||||
vlen = btf_vlen(t);
|
||||
@ -2900,14 +3001,13 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
static bool libbpf_needs_btf(const struct bpf_object *obj)
|
||||
{
|
||||
return obj->efile.btf_maps_shndx >= 0 ||
|
||||
obj->efile.st_ops_shndx >= 0 ||
|
||||
obj->efile.st_ops_link_shndx >= 0 ||
|
||||
obj->efile.has_st_ops ||
|
||||
obj->nr_extern > 0;
|
||||
}
|
||||
|
||||
static bool kernel_needs_btf(const struct bpf_object *obj)
|
||||
{
|
||||
return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0;
|
||||
return obj->efile.has_st_ops;
|
||||
}
|
||||
|
||||
static int bpf_object__init_btf(struct bpf_object *obj,
|
||||
@ -3608,12 +3708,14 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
|
||||
sec_desc->sec_type = SEC_RODATA;
|
||||
sec_desc->shdr = sh;
|
||||
sec_desc->data = data;
|
||||
} else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
|
||||
obj->efile.st_ops_data = data;
|
||||
obj->efile.st_ops_shndx = idx;
|
||||
} else if (strcmp(name, STRUCT_OPS_LINK_SEC) == 0) {
|
||||
obj->efile.st_ops_link_data = data;
|
||||
obj->efile.st_ops_link_shndx = idx;
|
||||
} else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
|
||||
strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
|
||||
strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
|
||||
strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
|
||||
sec_desc->sec_type = SEC_ST_OPS;
|
||||
sec_desc->shdr = sh;
|
||||
sec_desc->data = data;
|
||||
obj->efile.has_st_ops = true;
|
||||
} else {
|
||||
pr_info("elf: skipping unrecognized data section(%d) %s\n",
|
||||
idx, name);
|
||||
@ -3629,6 +3731,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
|
||||
if (!section_have_execinstr(obj, targ_sec_idx) &&
|
||||
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
|
||||
strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
|
||||
strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
|
||||
strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
|
||||
strcmp(name, ".rel" MAPS_ELF_SEC)) {
|
||||
pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
|
||||
idx, name, targ_sec_idx,
|
||||
@ -6926,12 +7030,12 @@ static int bpf_object__collect_relos(struct bpf_object *obj)
|
||||
data = sec_desc->data;
|
||||
idx = shdr->sh_info;
|
||||
|
||||
if (shdr->sh_type != SHT_REL) {
|
||||
if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
|
||||
pr_warn("internal error at %d\n", __LINE__);
|
||||
return -LIBBPF_ERRNO__INTERNAL;
|
||||
}
|
||||
|
||||
if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx)
|
||||
if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
|
||||
err = bpf_object__collect_st_ops_relos(obj, shdr, data);
|
||||
else if (idx == obj->efile.btf_maps_shndx)
|
||||
err = bpf_object__collect_map_relos(obj, shdr, data);
|
||||
@ -8105,11 +8209,20 @@ static void bpf_map_prepare_vdata(const struct bpf_map *map)
|
||||
|
||||
static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < obj->nr_maps; i++)
|
||||
if (bpf_map__is_struct_ops(&obj->maps[i]))
|
||||
bpf_map_prepare_vdata(&obj->maps[i]);
|
||||
for (i = 0; i < obj->nr_maps; i++) {
|
||||
map = &obj->maps[i];
|
||||
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
continue;
|
||||
|
||||
if (!map->autocreate)
|
||||
continue;
|
||||
|
||||
bpf_map_prepare_vdata(map);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -8135,6 +8248,7 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
|
||||
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
|
||||
err = err ? : bpf_object__sanitize_maps(obj);
|
||||
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
|
||||
err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
|
||||
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
|
||||
err = err ? : bpf_object__sanitize_and_load_btf(obj);
|
||||
err = err ? : bpf_object__create_maps(obj);
|
||||
@ -9424,27 +9538,6 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* if we haven't yet processed this BPF program, record proper
|
||||
* attach_btf_id and member_idx
|
||||
*/
|
||||
if (!prog->attach_btf_id) {
|
||||
prog->attach_btf_id = st_ops->type_id;
|
||||
prog->expected_attach_type = member_idx;
|
||||
}
|
||||
|
||||
/* struct_ops BPF prog can be re-used between multiple
|
||||
* .struct_ops & .struct_ops.link as long as it's the
|
||||
* same struct_ops struct definition and the same
|
||||
* function pointer field
|
||||
*/
|
||||
if (prog->attach_btf_id != st_ops->type_id ||
|
||||
prog->expected_attach_type != member_idx) {
|
||||
pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
|
||||
map->name, prog->name, prog->sec_name, prog->type,
|
||||
prog->attach_btf_id, prog->expected_attach_type, name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
st_ops->progs[member_idx] = prog;
|
||||
|
||||
/* st_ops->data will be exposed to users, being returned by
|
||||
|
@ -374,6 +374,8 @@ enum kern_feature_id {
|
||||
FEAT_UPROBE_MULTI_LINK,
|
||||
/* Kernel supports arg:ctx tag (__arg_ctx) for global subprogs natively */
|
||||
FEAT_ARG_CTX_TAG,
|
||||
/* Kernel supports '?' at the front of datasec names */
|
||||
FEAT_BTF_QMARK_DATASEC,
|
||||
__FEAT_CNT,
|
||||
};
|
||||
|
||||
|
@ -564,6 +564,8 @@ static int bpf_dummy_reg(void *kdata)
|
||||
{
|
||||
struct bpf_testmod_ops *ops = kdata;
|
||||
|
||||
if (ops->test_1)
|
||||
ops->test_1();
|
||||
/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
|
||||
* initialized, so we need to check for NULL.
|
||||
*/
|
||||
@ -609,6 +611,29 @@ struct bpf_struct_ops bpf_bpf_testmod_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int bpf_dummy_reg2(void *kdata)
|
||||
{
|
||||
struct bpf_testmod_ops2 *ops = kdata;
|
||||
|
||||
ops->test_1();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
|
||||
.test_1 = bpf_testmod_test_1,
|
||||
};
|
||||
|
||||
struct bpf_struct_ops bpf_testmod_ops2 = {
|
||||
.verifier_ops = &bpf_testmod_verifier_ops,
|
||||
.init = bpf_testmod_ops_init,
|
||||
.init_member = bpf_testmod_ops_init_member,
|
||||
.reg = bpf_dummy_reg2,
|
||||
.unreg = bpf_dummy_unreg,
|
||||
.cfi_stubs = &__bpf_testmod_ops2,
|
||||
.name = "bpf_testmod_ops2",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
extern int bpf_fentry_test1(int a);
|
||||
|
||||
static int bpf_testmod_init(void)
|
||||
@ -620,6 +645,7 @@ static int bpf_testmod_init(void)
|
||||
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
|
||||
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
|
||||
ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
|
||||
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (bpf_fentry_test1(0) < 0)
|
||||
|
@ -89,4 +89,8 @@ struct bpf_testmod_ops {
|
||||
int (*tramp_40)(int value);
|
||||
};
|
||||
|
||||
struct bpf_testmod_ops2 {
|
||||
int (*test_1)(void);
|
||||
};
|
||||
|
||||
#endif /* _BPF_TESTMOD_H */
|
||||
|
67
tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c
Normal file
67
tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c
Normal file
@ -0,0 +1,67 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <test_progs.h>
|
||||
#include "bad_struct_ops.skel.h"
|
||||
#include "bad_struct_ops2.skel.h"
|
||||
|
||||
static void invalid_prog_reuse(void)
|
||||
{
|
||||
struct bad_struct_ops *skel;
|
||||
char *log = NULL;
|
||||
int err;
|
||||
|
||||
skel = bad_struct_ops__open();
|
||||
if (!ASSERT_OK_PTR(skel, "bad_struct_ops__open"))
|
||||
return;
|
||||
|
||||
if (start_libbpf_log_capture())
|
||||
goto cleanup;
|
||||
|
||||
err = bad_struct_ops__load(skel);
|
||||
log = stop_libbpf_log_capture();
|
||||
ASSERT_ERR(err, "bad_struct_ops__load should fail");
|
||||
ASSERT_HAS_SUBSTR(log,
|
||||
"struct_ops init_kern testmod_2 func ptr test_1: invalid reuse of prog test_1",
|
||||
"expected init_kern message");
|
||||
|
||||
cleanup:
|
||||
free(log);
|
||||
bad_struct_ops__destroy(skel);
|
||||
}
|
||||
|
||||
static void unused_program(void)
|
||||
{
|
||||
struct bad_struct_ops2 *skel;
|
||||
char *log = NULL;
|
||||
int err;
|
||||
|
||||
skel = bad_struct_ops2__open();
|
||||
if (!ASSERT_OK_PTR(skel, "bad_struct_ops2__open"))
|
||||
return;
|
||||
|
||||
/* struct_ops programs not referenced from any maps are open
|
||||
* with autoload set to true.
|
||||
*/
|
||||
ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo autoload == true");
|
||||
|
||||
if (start_libbpf_log_capture())
|
||||
goto cleanup;
|
||||
|
||||
err = bad_struct_ops2__load(skel);
|
||||
ASSERT_ERR(err, "bad_struct_ops2__load should fail");
|
||||
log = stop_libbpf_log_capture();
|
||||
ASSERT_HAS_SUBSTR(log, "prog 'foo': failed to load",
|
||||
"message about 'foo' failing to load");
|
||||
|
||||
cleanup:
|
||||
free(log);
|
||||
bad_struct_ops2__destroy(skel);
|
||||
}
|
||||
|
||||
void test_bad_struct_ops(void)
|
||||
{
|
||||
if (test__start_subtest("invalid_prog_reuse"))
|
||||
invalid_prog_reuse();
|
||||
if (test__start_subtest("unused_program"))
|
||||
unused_program();
|
||||
}
|
@ -3535,6 +3535,32 @@ static struct btf_raw_test raw_tests[] = {
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
},
|
||||
{
|
||||
.descr = "datasec: name '?.foo bar:buz' is ok",
|
||||
.raw_types = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* VAR x */ /* [2] */
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
||||
BTF_VAR_STATIC,
|
||||
/* DATASEC ?.data */ /* [3] */
|
||||
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
|
||||
BTF_VAR_SECINFO_ENC(2, 0, 4),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0x\0?.foo bar:buz"),
|
||||
},
|
||||
{
|
||||
.descr = "type name '?foo' is not ok",
|
||||
.raw_types = {
|
||||
/* union ?foo; */
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0?foo"),
|
||||
.err_str = "Invalid name",
|
||||
.btf_load_err = true,
|
||||
},
|
||||
|
||||
{
|
||||
.descr = "float test #1, well-formed",
|
||||
@ -4363,6 +4389,9 @@ static void do_test_raw(unsigned int test_num)
|
||||
if (err || btf_fd < 0)
|
||||
goto done;
|
||||
|
||||
if (!test->map_type)
|
||||
goto done;
|
||||
|
||||
opts.btf_fd = btf_fd;
|
||||
opts.btf_key_type_id = test->key_type_id;
|
||||
opts.btf_value_type_id = test->value_type_id;
|
||||
|
159
tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c
Normal file
159
tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c
Normal file
@ -0,0 +1,159 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <test_progs.h>
|
||||
#include "struct_ops_autocreate.skel.h"
|
||||
#include "struct_ops_autocreate2.skel.h"
|
||||
|
||||
static void cant_load_full_object(void)
|
||||
{
|
||||
struct struct_ops_autocreate *skel;
|
||||
char *log = NULL;
|
||||
int err;
|
||||
|
||||
skel = struct_ops_autocreate__open();
|
||||
if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open"))
|
||||
return;
|
||||
|
||||
if (start_libbpf_log_capture())
|
||||
goto cleanup;
|
||||
/* The testmod_2 map BTF type (struct bpf_testmod_ops___v2) doesn't
|
||||
* match the BTF of the actual struct bpf_testmod_ops defined in the
|
||||
* kernel, so we should fail to load it if we don't disable autocreate
|
||||
* for that map.
|
||||
*/
|
||||
err = struct_ops_autocreate__load(skel);
|
||||
log = stop_libbpf_log_capture();
|
||||
if (!ASSERT_ERR(err, "struct_ops_autocreate__load"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_HAS_SUBSTR(log, "libbpf: struct_ops init_kern", "init_kern message");
|
||||
ASSERT_EQ(err, -ENOTSUP, "errno should be ENOTSUP");
|
||||
|
||||
cleanup:
|
||||
free(log);
|
||||
struct_ops_autocreate__destroy(skel);
|
||||
}
|
||||
|
||||
static int check_test_1_link(struct struct_ops_autocreate *skel, struct bpf_map *map)
|
||||
{
|
||||
struct bpf_link *link;
|
||||
int err;
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
|
||||
if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops"))
|
||||
return -1;
|
||||
|
||||
/* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */
|
||||
err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result");
|
||||
bpf_link__destroy(link);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void can_load_partial_object(void)
|
||||
{
|
||||
struct struct_ops_autocreate *skel;
|
||||
int err;
|
||||
|
||||
skel = struct_ops_autocreate__open();
|
||||
if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts"))
|
||||
return;
|
||||
|
||||
err = bpf_map__set_autocreate(skel->maps.testmod_2, false);
|
||||
if (!ASSERT_OK(err, "bpf_map__set_autocreate"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 default autoload");
|
||||
ASSERT_TRUE(bpf_program__autoload(skel->progs.test_2), "test_2 default autoload");
|
||||
|
||||
err = struct_ops_autocreate__load(skel);
|
||||
if (ASSERT_OK(err, "struct_ops_autocreate__load"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 actual autoload");
|
||||
ASSERT_FALSE(bpf_program__autoload(skel->progs.test_2), "test_2 actual autoload");
|
||||
|
||||
check_test_1_link(skel, skel->maps.testmod_1);
|
||||
|
||||
cleanup:
|
||||
struct_ops_autocreate__destroy(skel);
|
||||
}
|
||||
|
||||
static void optional_maps(void)
|
||||
{
|
||||
struct struct_ops_autocreate *skel;
|
||||
int err;
|
||||
|
||||
skel = struct_ops_autocreate__open();
|
||||
if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open"))
|
||||
return;
|
||||
|
||||
ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_1), "testmod_1 autocreate");
|
||||
ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_2), "testmod_2 autocreate");
|
||||
ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map), "optional_map autocreate");
|
||||
ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map2), "optional_map2 autocreate");
|
||||
|
||||
err = bpf_map__set_autocreate(skel->maps.testmod_1, false);
|
||||
err |= bpf_map__set_autocreate(skel->maps.testmod_2, false);
|
||||
err |= bpf_map__set_autocreate(skel->maps.optional_map2, true);
|
||||
if (!ASSERT_OK(err, "bpf_map__set_autocreate"))
|
||||
goto cleanup;
|
||||
|
||||
err = struct_ops_autocreate__load(skel);
|
||||
if (ASSERT_OK(err, "struct_ops_autocreate__load"))
|
||||
goto cleanup;
|
||||
|
||||
check_test_1_link(skel, skel->maps.optional_map2);
|
||||
|
||||
cleanup:
|
||||
struct_ops_autocreate__destroy(skel);
|
||||
}
|
||||
|
||||
/* Swap test_mod1->test_1 program from 'bar' to 'foo' using shadow vars.
|
||||
* test_mod1 load should enable autoload for 'foo'.
|
||||
*/
|
||||
static void autoload_and_shadow_vars(void)
|
||||
{
|
||||
struct struct_ops_autocreate2 *skel = NULL;
|
||||
struct bpf_link *link = NULL;
|
||||
int err;
|
||||
|
||||
skel = struct_ops_autocreate2__open();
|
||||
if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts"))
|
||||
return;
|
||||
|
||||
ASSERT_FALSE(bpf_program__autoload(skel->progs.foo), "foo default autoload");
|
||||
ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar default autoload");
|
||||
|
||||
/* loading map testmod_1 would switch foo's autoload to true */
|
||||
skel->struct_ops.testmod_1->test_1 = skel->progs.foo;
|
||||
|
||||
err = struct_ops_autocreate2__load(skel);
|
||||
if (ASSERT_OK(err, "struct_ops_autocreate__load"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo actual autoload");
|
||||
ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar actual autoload");
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
|
||||
if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops"))
|
||||
goto cleanup;
|
||||
|
||||
/* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */
|
||||
err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result");
|
||||
|
||||
cleanup:
|
||||
bpf_link__destroy(link);
|
||||
struct_ops_autocreate2__destroy(skel);
|
||||
}
|
||||
|
||||
void test_struct_ops_autocreate(void)
|
||||
{
|
||||
if (test__start_subtest("cant_load_full_object"))
|
||||
cant_load_full_object();
|
||||
if (test__start_subtest("can_load_partial_object"))
|
||||
can_load_partial_object();
|
||||
if (test__start_subtest("autoload_and_shadow_vars"))
|
||||
autoload_and_shadow_vars();
|
||||
if (test__start_subtest("optional_maps"))
|
||||
optional_maps();
|
||||
}
|
@ -30,11 +30,29 @@ cleanup:
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static int attach_ops_and_check(struct struct_ops_module *skel,
|
||||
struct bpf_map *map,
|
||||
int expected_test_2_result)
|
||||
{
|
||||
struct bpf_link *link;
|
||||
|
||||
link = bpf_map__attach_struct_ops(map);
|
||||
ASSERT_OK_PTR(link, "attach_test_mod_1");
|
||||
if (!link)
|
||||
return -1;
|
||||
|
||||
/* test_{1,2}() would be called from bpf_dummy_reg() in bpf_testmod.c */
|
||||
ASSERT_EQ(skel->bss->test_1_result, 0xdeadbeef, "test_1_result");
|
||||
ASSERT_EQ(skel->bss->test_2_result, expected_test_2_result, "test_2_result");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void test_struct_ops_load(void)
|
||||
{
|
||||
struct struct_ops_module *skel;
|
||||
struct bpf_map_info info = {};
|
||||
struct bpf_link *link;
|
||||
int err;
|
||||
u32 len;
|
||||
|
||||
@ -59,20 +77,17 @@ static void test_struct_ops_load(void)
|
||||
if (!ASSERT_OK(err, "bpf_map_get_info_by_fd"))
|
||||
goto cleanup;
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
|
||||
ASSERT_OK_PTR(link, "attach_test_mod_1");
|
||||
|
||||
check_map_info(&info);
|
||||
/* test_3() will be called from bpf_dummy_reg() in bpf_testmod.c
|
||||
*
|
||||
* In bpf_testmod.c it will pass 4 and 13 (the value of data) to
|
||||
* .test_2. So, the value of test_2_result should be 20 (4 + 13 +
|
||||
* 3).
|
||||
*/
|
||||
ASSERT_EQ(skel->bss->test_2_result, 20, "check_shadow_variables");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
|
||||
check_map_info(&info);
|
||||
if (!attach_ops_and_check(skel, skel->maps.testmod_1, 20))
|
||||
goto cleanup;
|
||||
if (!attach_ops_and_check(skel, skel->maps.testmod_2, 12))
|
||||
goto cleanup;
|
||||
|
||||
cleanup:
|
||||
struct_ops_module__destroy(skel);
|
||||
|
25
tools/testing/selftests/bpf/progs/bad_struct_ops.c
Normal file
25
tools/testing/selftests/bpf/progs/bad_struct_ops.c
Normal file
@ -0,0 +1,25 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include "../bpf_testmod/bpf_testmod.h"
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
SEC("struct_ops/test_1")
|
||||
int BPF_PROG(test_1) { return 0; }
|
||||
|
||||
SEC("struct_ops/test_2")
|
||||
int BPF_PROG(test_2) { return 0; }
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct bpf_testmod_ops testmod_1 = {
|
||||
.test_1 = (void *)test_1,
|
||||
.test_2 = (void *)test_2
|
||||
};
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct bpf_testmod_ops2 testmod_2 = {
|
||||
.test_1 = (void *)test_1
|
||||
};
|
14
tools/testing/selftests/bpf/progs/bad_struct_ops2.c
Normal file
14
tools/testing/selftests/bpf/progs/bad_struct_ops2.c
Normal file
@ -0,0 +1,14 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
/* This is an unused struct_ops program, it lacks corresponding
|
||||
* struct_ops map, which provides attachment information.
|
||||
* W/o additional configuration attempt to load such
|
||||
* BPF object file would fail.
|
||||
*/
|
||||
SEC("struct_ops/foo")
|
||||
void foo(void) {}
|
52
tools/testing/selftests/bpf/progs/struct_ops_autocreate.c
Normal file
52
tools/testing/selftests/bpf/progs/struct_ops_autocreate.c
Normal file
@ -0,0 +1,52 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
int test_1_result = 0;
|
||||
|
||||
SEC("struct_ops/test_1")
|
||||
int BPF_PROG(test_1)
|
||||
{
|
||||
test_1_result = 42;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("struct_ops/test_1")
|
||||
int BPF_PROG(test_2)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_testmod_ops___v1 {
|
||||
int (*test_1)(void);
|
||||
};
|
||||
|
||||
struct bpf_testmod_ops___v2 {
|
||||
int (*test_1)(void);
|
||||
int (*does_not_exist)(void);
|
||||
};
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct bpf_testmod_ops___v1 testmod_1 = {
|
||||
.test_1 = (void *)test_1
|
||||
};
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct bpf_testmod_ops___v2 testmod_2 = {
|
||||
.test_1 = (void *)test_1,
|
||||
.does_not_exist = (void *)test_2
|
||||
};
|
||||
|
||||
SEC("?.struct_ops")
|
||||
struct bpf_testmod_ops___v1 optional_map = {
|
||||
.test_1 = (void *)test_1,
|
||||
};
|
||||
|
||||
SEC("?.struct_ops.link")
|
||||
struct bpf_testmod_ops___v1 optional_map2 = {
|
||||
.test_1 = (void *)test_1,
|
||||
};
|
32
tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c
Normal file
32
tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c
Normal file
@ -0,0 +1,32 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
int test_1_result = 0;
|
||||
|
||||
SEC("?struct_ops/test_1")
|
||||
int BPF_PROG(foo)
|
||||
{
|
||||
test_1_result = 42;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?struct_ops/test_1")
|
||||
int BPF_PROG(bar)
|
||||
{
|
||||
test_1_result = 24;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_testmod_ops {
|
||||
int (*test_1)(void);
|
||||
};
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct bpf_testmod_ops testmod_1 = {
|
||||
.test_1 = (void *)bar
|
||||
};
|
@ -7,12 +7,14 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
int test_1_result = 0;
|
||||
int test_2_result = 0;
|
||||
|
||||
SEC("struct_ops/test_1")
|
||||
int BPF_PROG(test_1)
|
||||
{
|
||||
return 0xdeadbeef;
|
||||
test_1_result = 0xdeadbeef;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("struct_ops/test_2")
|
||||
@ -35,3 +37,20 @@ struct bpf_testmod_ops testmod_1 = {
|
||||
.data = 0x1,
|
||||
};
|
||||
|
||||
SEC("struct_ops/test_2")
|
||||
void BPF_PROG(test_2_v2, int a, int b)
|
||||
{
|
||||
test_2_result = a * b;
|
||||
}
|
||||
|
||||
struct bpf_testmod_ops___v2 {
|
||||
int (*test_1)(void);
|
||||
void (*test_2)(int a, int b);
|
||||
int (*test_maybe_null)(int dummy, struct task_struct *task);
|
||||
};
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct bpf_testmod_ops___v2 testmod_2 = {
|
||||
.test_1 = (void *)test_1,
|
||||
.test_2 = (void *)test_2_v2,
|
||||
};
|
||||
|
@ -683,11 +683,69 @@ static const struct argp_option opts[] = {
|
||||
{},
|
||||
};
|
||||
|
||||
static FILE *libbpf_capture_stream;
|
||||
|
||||
static struct {
|
||||
char *buf;
|
||||
size_t buf_sz;
|
||||
} libbpf_output_capture;
|
||||
|
||||
/* Creates a global memstream capturing INFO and WARN level output
|
||||
* passed to libbpf_print_fn.
|
||||
* Returns 0 on success, negative value on failure.
|
||||
* On failure the description is printed using PRINT_FAIL and
|
||||
* current test case is marked as fail.
|
||||
*/
|
||||
int start_libbpf_log_capture(void)
|
||||
{
|
||||
if (libbpf_capture_stream) {
|
||||
PRINT_FAIL("%s: libbpf_capture_stream != NULL\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
libbpf_capture_stream = open_memstream(&libbpf_output_capture.buf,
|
||||
&libbpf_output_capture.buf_sz);
|
||||
if (!libbpf_capture_stream) {
|
||||
PRINT_FAIL("%s: open_memstream failed errno=%d\n", __func__, errno);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Destroys global memstream created by start_libbpf_log_capture().
|
||||
* Returns a pointer to captured data which has to be freed.
|
||||
* Returned buffer is null terminated.
|
||||
*/
|
||||
char *stop_libbpf_log_capture(void)
|
||||
{
|
||||
char *buf;
|
||||
|
||||
if (!libbpf_capture_stream)
|
||||
return NULL;
|
||||
|
||||
fputc(0, libbpf_capture_stream);
|
||||
fclose(libbpf_capture_stream);
|
||||
libbpf_capture_stream = NULL;
|
||||
/* get 'buf' after fclose(), see open_memstream() documentation */
|
||||
buf = libbpf_output_capture.buf;
|
||||
memset(&libbpf_output_capture, 0, sizeof(libbpf_output_capture));
|
||||
return buf;
|
||||
}
|
||||
|
||||
static int libbpf_print_fn(enum libbpf_print_level level,
|
||||
const char *format, va_list args)
|
||||
{
|
||||
if (libbpf_capture_stream && level != LIBBPF_DEBUG) {
|
||||
va_list args2;
|
||||
|
||||
va_copy(args2, args);
|
||||
vfprintf(libbpf_capture_stream, format, args2);
|
||||
}
|
||||
|
||||
if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
|
||||
return 0;
|
||||
|
||||
vfprintf(stdout, format, args);
|
||||
return 0;
|
||||
}
|
||||
@ -1081,6 +1139,7 @@ static void run_one_test(int test_num)
|
||||
cleanup_cgroup_environment();
|
||||
|
||||
stdio_restore();
|
||||
free(stop_libbpf_log_capture());
|
||||
|
||||
dump_test_log(test, state, false, false, NULL);
|
||||
}
|
||||
|
@ -397,6 +397,9 @@ int test__join_cgroup(const char *path);
|
||||
system(cmd); \
|
||||
})
|
||||
|
||||
int start_libbpf_log_capture(void);
|
||||
char *stop_libbpf_log_capture(void);
|
||||
|
||||
static inline __u64 ptr_to_u64(const void *ptr)
|
||||
{
|
||||
return (__u64) (unsigned long) ptr;
|
||||
|
Loading…
x
Reference in New Issue
Block a user