bpf: handle ldimm64 properly in check_cfg()

ldimm64 instructions are 16-byte long, and so have to be handled
appropriately in check_cfg(), just like the rest of BPF verifier does.

This has implications in three places:
  - when determining next instruction for non-jump instructions;
  - when determining next instruction for callback address ldimm64
    instructions (in visit_func_call_insn());
  - when checking for unreachable instructions, where second half of
    ldimm64 is expected to be unreachable;

We take this also as an opportunity to report jump into the middle of
ldimm64. And adjust few test_verifier tests accordingly.

Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Reported-by: Hao Sun <sunhao.th@gmail.com>
Fixes: 475fb78fbf ("bpf: verifier (add branch/goto checks)")
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231110002638.4168352-2-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Andrii Nakryiko 2023-11-09 16:26:36 -08:00 committed by Alexei Starovoitov
parent fe69a1b1b6
commit 3feb263bb5
3 changed files with 30 additions and 13 deletions

View File

@ -909,10 +909,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size; aux->ctx_field_size = size;
} }
static bool bpf_is_ldimm64(const struct bpf_insn *insn)
{
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
}
static inline bool bpf_pseudo_func(const struct bpf_insn *insn) static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
{ {
return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
insn->src_reg == BPF_PSEUDO_FUNC;
} }
struct bpf_prog_ops { struct bpf_prog_ops {

View File

@ -15439,15 +15439,16 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
struct bpf_verifier_env *env, struct bpf_verifier_env *env,
bool visit_callee) bool visit_callee)
{ {
int ret; int ret, insn_sz;
ret = push_insn(t, t + 1, FALLTHROUGH, env, false); insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
ret = push_insn(t, t + insn_sz, FALLTHROUGH, env, false);
if (ret) if (ret)
return ret; return ret;
mark_prune_point(env, t + 1); mark_prune_point(env, t + insn_sz);
/* when we exit from subprog, we need to record non-linear history */ /* when we exit from subprog, we need to record non-linear history */
mark_jmp_point(env, t + 1); mark_jmp_point(env, t + insn_sz);
if (visit_callee) { if (visit_callee) {
mark_prune_point(env, t); mark_prune_point(env, t);
@ -15469,15 +15470,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
static int visit_insn(int t, struct bpf_verifier_env *env) static int visit_insn(int t, struct bpf_verifier_env *env)
{ {
struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
int ret, off; int ret, off, insn_sz;
if (bpf_pseudo_func(insn)) if (bpf_pseudo_func(insn))
return visit_func_call_insn(t, insns, env, true); return visit_func_call_insn(t, insns, env, true);
/* All non-branch instructions have a single fall-through edge. */ /* All non-branch instructions have a single fall-through edge. */
if (BPF_CLASS(insn->code) != BPF_JMP && if (BPF_CLASS(insn->code) != BPF_JMP &&
BPF_CLASS(insn->code) != BPF_JMP32) BPF_CLASS(insn->code) != BPF_JMP32) {
return push_insn(t, t + 1, FALLTHROUGH, env, false); insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
return push_insn(t, t + insn_sz, FALLTHROUGH, env, false);
}
switch (BPF_OP(insn->code)) { switch (BPF_OP(insn->code)) {
case BPF_EXIT: case BPF_EXIT:
@ -15607,11 +15610,21 @@ walk_cfg:
} }
for (i = 0; i < insn_cnt; i++) { for (i = 0; i < insn_cnt; i++) {
struct bpf_insn *insn = &env->prog->insnsi[i];
if (insn_state[i] != EXPLORED) { if (insn_state[i] != EXPLORED) {
verbose(env, "unreachable insn %d\n", i); verbose(env, "unreachable insn %d\n", i);
ret = -EINVAL; ret = -EINVAL;
goto err_free; goto err_free;
} }
if (bpf_is_ldimm64(insn)) {
if (insn_state[i + 1] != 0) {
verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
ret = -EINVAL;
goto err_free;
}
i++; /* skip second half of ldimm64 */
}
} }
ret = 0; /* cfg looks good */ ret = 0; /* cfg looks good */

View File

@ -9,8 +9,8 @@
BPF_MOV64_IMM(BPF_REG_0, 2), BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid BPF_LD_IMM insn", .errstr = "jump into the middle of ldimm64 insn 1",
.errstr_unpriv = "R1 pointer comparison", .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -23,8 +23,8 @@
BPF_LD_IMM64(BPF_REG_0, 1), BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid BPF_LD_IMM insn", .errstr = "jump into the middle of ldimm64 insn 1",
.errstr_unpriv = "R1 pointer comparison", .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
.result = REJECT, .result = REJECT,
}, },
{ {