bpf: propagate poke descriptors to subprograms
Previously, there was no need for poke descriptors being present in subprogram's bpf_prog_aux struct since tailcalls were simply not allowed in them. Each subprog is JITed independently so in order to enable JITing subprograms that use tailcalls, do the following: - in fixup_bpf_calls() store the index of tailcall insn onto the generated poke descriptor, - in case when insn patching occurs, adjust the tailcall insn idx from bpf_patch_insn_data, - then in jit_subprogs() check whether the given poke descriptor belongs to the current subprog by checking if that previously stored absolute index of tail call insn is in the scope of the insns of given subprog, - update the insn->imm with new poke descriptor slot so that while JITing the proper poke descriptor will be grabbed This way each of the main program's poke descriptors are distributed across the subprograms poke descriptor array, so main program's descriptors can be untracked out of the prog array map. Add also subprog's aux struct to the BPF map poke_progs list by calling on it map_poke_track(). In case of any error, call the map_poke_untrack() on subprog's aux structs that have already been registered to prog array map. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
0d4ddce300
commit
a748c6975d
@ -707,6 +707,7 @@ struct bpf_jit_poke_descriptor {
|
||||
bool ip_stable;
|
||||
u8 adj_off;
|
||||
u16 reason;
|
||||
u32 insn_idx;
|
||||
};
|
||||
|
||||
/* reg_type info for ctx arguments */
|
||||
|
@ -9623,6 +9623,18 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
|
||||
}
|
||||
}
|
||||
|
||||
static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
|
||||
{
|
||||
struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
|
||||
int i, sz = prog->aux->size_poke_tab;
|
||||
struct bpf_jit_poke_descriptor *desc;
|
||||
|
||||
for (i = 0; i < sz; i++) {
|
||||
desc = &tab[i];
|
||||
desc->insn_idx += len - 1;
|
||||
}
|
||||
}
|
||||
|
||||
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
|
||||
const struct bpf_insn *patch, u32 len)
|
||||
{
|
||||
@ -9639,6 +9651,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
|
||||
if (adjust_insn_aux_data(env, new_prog, off, len))
|
||||
return NULL;
|
||||
adjust_subprog_starts(env, off, len);
|
||||
adjust_poke_descs(new_prog, len);
|
||||
return new_prog;
|
||||
}
|
||||
|
||||
@ -10169,6 +10182,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_prog *prog = env->prog, **func, *tmp;
|
||||
int i, j, subprog_start, subprog_end = 0, len, subprog;
|
||||
struct bpf_map *map_ptr;
|
||||
struct bpf_insn *insn;
|
||||
void *old_bpf_func;
|
||||
int err, num_exentries;
|
||||
@ -10236,6 +10250,31 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
||||
func[i]->aux->btf = prog->aux->btf;
|
||||
func[i]->aux->func_info = prog->aux->func_info;
|
||||
|
||||
for (j = 0; j < prog->aux->size_poke_tab; j++) {
|
||||
u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
|
||||
int ret;
|
||||
|
||||
if (!(insn_idx >= subprog_start &&
|
||||
insn_idx <= subprog_end))
|
||||
continue;
|
||||
|
||||
ret = bpf_jit_add_poke_descriptor(func[i],
|
||||
&prog->aux->poke_tab[j]);
|
||||
if (ret < 0) {
|
||||
verbose(env, "adding tail call poke descriptor failed\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
|
||||
|
||||
map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
|
||||
ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
|
||||
if (ret < 0) {
|
||||
verbose(env, "tracking tail call prog failed\n");
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
|
||||
/* Use bpf_prog_F_tag to indicate functions in stack traces.
|
||||
* Long term would need debug info to populate names
|
||||
*/
|
||||
@ -10261,6 +10300,19 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* Untrack main program's aux structs so that during map_poke_run()
|
||||
* we will not stumble upon the unfilled poke descriptors; each
|
||||
* of the main program's poke descs got distributed across subprogs
|
||||
* and got tracked onto map, so we are sure that none of them will
|
||||
* be missed after the operation below
|
||||
*/
|
||||
for (i = 0; i < prog->aux->size_poke_tab; i++) {
|
||||
map_ptr = prog->aux->poke_tab[i].tail_call.map;
|
||||
|
||||
map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
|
||||
}
|
||||
|
||||
/* at this point all bpf functions were successfully JITed
|
||||
* now populate all bpf_calls with correct addresses and
|
||||
* run last pass of JIT
|
||||
@ -10329,9 +10381,16 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
||||
bpf_prog_free_unused_jited_linfo(prog);
|
||||
return 0;
|
||||
out_free:
|
||||
for (i = 0; i < env->subprog_cnt; i++)
|
||||
if (func[i])
|
||||
bpf_jit_free(func[i]);
|
||||
for (i = 0; i < env->subprog_cnt; i++) {
|
||||
if (!func[i])
|
||||
continue;
|
||||
|
||||
for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
|
||||
map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
|
||||
map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
|
||||
}
|
||||
bpf_jit_free(func[i]);
|
||||
}
|
||||
kfree(func);
|
||||
out_undo_insn:
|
||||
/* cleanup main prog to be interpreted */
|
||||
@ -10549,6 +10608,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||
.reason = BPF_POKE_REASON_TAIL_CALL,
|
||||
.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
|
||||
.tail_call.key = bpf_map_key_immediate(aux),
|
||||
.insn_idx = i + delta,
|
||||
};
|
||||
|
||||
ret = bpf_jit_add_poke_descriptor(prog, &desc);
|
||||
|
Loading…
Reference in New Issue
Block a user