bpf: free up BPF_JMP | BPF_CALL | BPF_X opcode
free up BPF_JMP | BPF_CALL | BPF_X opcode to be used by actual indirect call by register and use kernel internal opcode to mark call instruction into bpf_tail_call() helper. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d2e0ef493a
commit
71189fa9b0
@ -586,7 +586,7 @@ emit_cond_jmp:
|
||||
break;
|
||||
}
|
||||
/* tail call */
|
||||
case BPF_JMP | BPF_CALL | BPF_X:
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
if (emit_bpf_tail_call(ctx))
|
||||
return -EFAULT;
|
||||
break;
|
||||
|
@ -938,7 +938,7 @@ common_load:
|
||||
/*
|
||||
* Tail call
|
||||
*/
|
||||
case BPF_JMP | BPF_CALL | BPF_X:
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
ctx->seen |= SEEN_TAILCALL;
|
||||
bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
|
||||
break;
|
||||
|
@ -991,7 +991,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BPF_JMP | BPF_CALL | BPF_X:
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
/*
|
||||
* Implicit input:
|
||||
* B1: pointer to ctx
|
||||
|
@ -1217,7 +1217,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
}
|
||||
|
||||
/* tail call */
|
||||
case BPF_JMP | BPF_CALL |BPF_X:
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
emit_tail_call(ctx);
|
||||
break;
|
||||
|
||||
|
@ -877,7 +877,7 @@ xadd: if (is_imm8(insn->off))
|
||||
}
|
||||
break;
|
||||
|
||||
case BPF_JMP | BPF_CALL | BPF_X:
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
emit_bpf_tail_call(&prog);
|
||||
break;
|
||||
|
||||
|
@ -57,6 +57,9 @@ struct bpf_prog_aux;
|
||||
#define BPF_REG_AX MAX_BPF_REG
|
||||
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
|
||||
|
||||
/* unused opcode to mark special call to bpf_tail_call() helper */
|
||||
#define BPF_TAIL_CALL 0xf0
|
||||
|
||||
/* As per nm, we expose JITed images as text (code) section for
|
||||
* kallsyms. That way, tools like perf can find it to match
|
||||
* addresses.
|
||||
|
@ -824,7 +824,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
|
||||
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
|
||||
/* Call instruction */
|
||||
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
|
||||
[BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
|
||||
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
|
||||
/* Jumps */
|
||||
[BPF_JMP | BPF_JA] = &&JMP_JA,
|
||||
[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
|
||||
|
@ -3469,7 +3469,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||
* that doesn't support bpf_tail_call yet
|
||||
*/
|
||||
insn->imm = 0;
|
||||
insn->code |= BPF_X;
|
||||
insn->code = BPF_JMP | BPF_TAIL_CALL;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user