Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2021-02-10 The following pull-request contains BPF updates for your *net* tree. We've added 5 non-merge commits during the last 8 day(s) which contain a total of 3 files changed, 22 insertions(+), 21 deletions(-). The main changes are: 1) Fix missed execution of kprobes BPF progs when kprobe is firing via int3, from Alexei Starovoitov. 2) Fix potential integer overflow in map max_entries for stackmap on 32 bit archs, from Bui Quang Minh. 3) Fix a verifier pruning and a insn rewrite issue related to 32 bit ops, from Daniel Borkmann. ==================== Signed-off-by: David S. Miller <davem@davemloft.net> c# Please enter a commit message to explain why this merge is necessary,
This commit is contained in:
commit
b8776f14a4
@ -115,6 +115,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
|
||||
|
||||
/* hash table size must be power of 2 */
|
||||
n_buckets = roundup_pow_of_two(attr->max_entries);
|
||||
if (!n_buckets)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
|
||||
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
|
||||
|
@ -6877,7 +6877,7 @@ static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
|
||||
case BPF_JSGT:
|
||||
if (reg->s32_min_value > sval)
|
||||
return 1;
|
||||
else if (reg->s32_max_value < sval)
|
||||
else if (reg->s32_max_value <= sval)
|
||||
return 0;
|
||||
break;
|
||||
case BPF_JLT:
|
||||
@ -6950,7 +6950,7 @@ static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
|
||||
case BPF_JSGT:
|
||||
if (reg->smin_value > sval)
|
||||
return 1;
|
||||
else if (reg->smax_value < sval)
|
||||
else if (reg->smax_value <= sval)
|
||||
return 0;
|
||||
break;
|
||||
case BPF_JLT:
|
||||
@ -8590,7 +8590,11 @@ static bool range_within(struct bpf_reg_state *old,
|
||||
return old->umin_value <= cur->umin_value &&
|
||||
old->umax_value >= cur->umax_value &&
|
||||
old->smin_value <= cur->smin_value &&
|
||||
old->smax_value >= cur->smax_value;
|
||||
old->smax_value >= cur->smax_value &&
|
||||
old->u32_min_value <= cur->u32_min_value &&
|
||||
old->u32_max_value >= cur->u32_max_value &&
|
||||
old->s32_min_value <= cur->s32_min_value &&
|
||||
old->s32_max_value >= cur->s32_max_value;
|
||||
}
|
||||
|
||||
/* Maximum number of register states that can exist at once */
|
||||
@ -10999,30 +11003,28 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
|
||||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
|
||||
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
|
||||
struct bpf_insn mask_and_div[] = {
|
||||
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
|
||||
bool isdiv = BPF_OP(insn->code) == BPF_DIV;
|
||||
struct bpf_insn *patchlet;
|
||||
struct bpf_insn chk_and_div[] = {
|
||||
/* Rx div 0 -> 0 */
|
||||
BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
|
||||
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
|
||||
BPF_JNE | BPF_K, insn->src_reg,
|
||||
0, 2, 0),
|
||||
BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
||||
*insn,
|
||||
};
|
||||
struct bpf_insn mask_and_mod[] = {
|
||||
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
|
||||
struct bpf_insn chk_and_mod[] = {
|
||||
/* Rx mod 0 -> Rx */
|
||||
BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
|
||||
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
|
||||
BPF_JEQ | BPF_K, insn->src_reg,
|
||||
0, 1, 0),
|
||||
*insn,
|
||||
};
|
||||
struct bpf_insn *patchlet;
|
||||
|
||||
if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
|
||||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
|
||||
patchlet = mask_and_div + (is64 ? 1 : 0);
|
||||
cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
|
||||
} else {
|
||||
patchlet = mask_and_mod + (is64 ? 1 : 0);
|
||||
cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
|
||||
}
|
||||
patchlet = isdiv ? chk_and_div : chk_and_mod;
|
||||
cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
|
||||
ARRAY_SIZE(chk_and_mod);
|
||||
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
|
||||
if (!new_prog)
|
||||
|
@ -96,9 +96,6 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
if (in_nmi()) /* not supported yet */
|
||||
return 1;
|
||||
|
||||
cant_sleep();
|
||||
|
||||
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user