18f4fccbf3
This change updates the BPF syscall loader to relocate BTF_KIND_FUNC relocations, with support for weak kfunc relocations. The general idea is to move map_fds to loader map, and also use the data for storing kfunc BTF fds. Since both reuse the fd_array parameter, they need to be kept together. For map_fds, we reserve MAX_USED_MAPS slots in a region, and for kfunc, we reserve MAX_KFUNC_DESCS. This is done so that insn->off has more chances of being <= INT16_MAX than treating data map as a sparse array and adding fd as needed. When the MAX_KFUNC_DESCS limit is reached, we fall back to the sparse array model, so that as long as it does remain <= INT16_MAX, we pass an index relative to the start of fd_array. We store all ksyms in an array where we try to avoid calling the bpf_btf_find_by_name_kind helper, and also reuse the BTF fd that was already stored. This also speeds up the loading process compared to emitting calls in all cases, in later tests. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20211002011757.311265-9-memxor@gmail.com
56 lines
1.5 KiB
C
56 lines
1.5 KiB
C
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
|
/* Copyright (c) 2021 Facebook */
|
|
#ifndef __BPF_GEN_INTERNAL_H
|
|
#define __BPF_GEN_INTERNAL_H
|
|
|
|
struct ksym_relo_desc {
|
|
const char *name;
|
|
int kind;
|
|
int insn_idx;
|
|
bool is_weak;
|
|
};
|
|
|
|
struct ksym_desc {
|
|
const char *name;
|
|
int ref;
|
|
int kind;
|
|
int off;
|
|
int insn;
|
|
};
|
|
|
|
struct bpf_gen {
|
|
struct gen_loader_opts *opts;
|
|
void *data_start;
|
|
void *data_cur;
|
|
void *insn_start;
|
|
void *insn_cur;
|
|
ssize_t cleanup_label;
|
|
__u32 nr_progs;
|
|
__u32 nr_maps;
|
|
int log_level;
|
|
int error;
|
|
struct ksym_relo_desc *relos;
|
|
int relo_cnt;
|
|
char attach_target[128];
|
|
int attach_kind;
|
|
struct ksym_desc *ksyms;
|
|
__u32 nr_ksyms;
|
|
int fd_array;
|
|
int nr_fd_array;
|
|
};
|
|
|
|
void bpf_gen__init(struct bpf_gen *gen, int log_level);
|
|
int bpf_gen__finish(struct bpf_gen *gen);
|
|
void bpf_gen__free(struct bpf_gen *gen);
|
|
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
|
|
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_attr *map_attr, int map_idx);
|
|
struct bpf_prog_load_params;
|
|
void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx);
|
|
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
|
|
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
|
|
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
|
|
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, int kind,
|
|
int insn_idx);
|
|
|
|
#endif
|