Merge branch 'bpf_gso_size'
Willem de Bruijn says: ==================== See first patch for details. Patch split across three parts { kernel feature, uapi header, tools } following the custom for such __sk_buff changes. ==================== Acked-by: Petar Penkov <ppenkov@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
5fc40f3739
@ -3176,6 +3176,7 @@ struct __sk_buff {
|
||||
__u32 wire_len;
|
||||
__u32 gso_segs;
|
||||
__bpf_md_ptr(struct bpf_sock *, sk);
|
||||
__u32 gso_size;
|
||||
};
|
||||
|
||||
struct bpf_tunnel_key {
|
||||
|
@ -277,6 +277,12 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
|
||||
/* gso_segs is allowed */
|
||||
|
||||
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
|
||||
offsetof(struct __sk_buff, gso_size)))
|
||||
return -EINVAL;
|
||||
|
||||
/* gso_size is allowed */
|
||||
|
||||
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
|
||||
sizeof(struct __sk_buff)))
|
||||
return -EINVAL;
|
||||
|
||||
@ -297,6 +303,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
|
||||
if (__skb->gso_segs > GSO_MAX_SEGS)
|
||||
return -EINVAL;
|
||||
skb_shinfo(skb)->gso_segs = __skb->gso_segs;
|
||||
skb_shinfo(skb)->gso_size = __skb->gso_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -7139,6 +7139,27 @@ static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
|
||||
return insn - insn_buf;
|
||||
}
|
||||
|
||||
static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si,
|
||||
struct bpf_insn *insn)
|
||||
{
|
||||
/* si->dst_reg = skb_shinfo(SKB); */
|
||||
#ifdef NET_SKBUFF_DATA_USES_OFFSET
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
|
||||
BPF_REG_AX, si->src_reg,
|
||||
offsetof(struct sk_buff, end));
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
|
||||
si->dst_reg, si->src_reg,
|
||||
offsetof(struct sk_buff, head));
|
||||
*insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
|
||||
#else
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
|
||||
si->dst_reg, si->src_reg,
|
||||
offsetof(struct sk_buff, end));
|
||||
#endif
|
||||
|
||||
return insn;
|
||||
}
|
||||
|
||||
static u32 bpf_convert_ctx_access(enum bpf_access_type type,
|
||||
const struct bpf_insn *si,
|
||||
struct bpf_insn *insn_buf,
|
||||
@ -7461,26 +7482,21 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
|
||||
break;
|
||||
|
||||
case offsetof(struct __sk_buff, gso_segs):
|
||||
/* si->dst_reg = skb_shinfo(SKB); */
|
||||
#ifdef NET_SKBUFF_DATA_USES_OFFSET
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
|
||||
BPF_REG_AX, si->src_reg,
|
||||
offsetof(struct sk_buff, end));
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
|
||||
si->dst_reg, si->src_reg,
|
||||
offsetof(struct sk_buff, head));
|
||||
*insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
|
||||
#else
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
|
||||
si->dst_reg, si->src_reg,
|
||||
offsetof(struct sk_buff, end));
|
||||
#endif
|
||||
insn = bpf_convert_shinfo_access(si, insn);
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
|
||||
si->dst_reg, si->dst_reg,
|
||||
bpf_target_off(struct skb_shared_info,
|
||||
gso_segs, 2,
|
||||
target_size));
|
||||
break;
|
||||
case offsetof(struct __sk_buff, gso_size):
|
||||
insn = bpf_convert_shinfo_access(si, insn);
|
||||
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_size),
|
||||
si->dst_reg, si->dst_reg,
|
||||
bpf_target_off(struct skb_shared_info,
|
||||
gso_size, 2,
|
||||
target_size));
|
||||
break;
|
||||
case offsetof(struct __sk_buff, wire_len):
|
||||
BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4);
|
||||
|
||||
|
@ -3176,6 +3176,7 @@ struct __sk_buff {
|
||||
__u32 wire_len;
|
||||
__u32 gso_segs;
|
||||
__bpf_md_ptr(struct bpf_sock *, sk);
|
||||
__u32 gso_size;
|
||||
};
|
||||
|
||||
struct bpf_tunnel_key {
|
||||
|
@ -14,6 +14,7 @@ void test_skb_ctx(void)
|
||||
.wire_len = 100,
|
||||
.gso_segs = 8,
|
||||
.mark = 9,
|
||||
.gso_size = 10,
|
||||
};
|
||||
struct bpf_prog_test_run_attr tattr = {
|
||||
.data_in = &pkt_v4,
|
||||
|
@ -23,6 +23,8 @@ int process(struct __sk_buff *skb)
|
||||
return 1;
|
||||
if (skb->gso_segs != 8)
|
||||
return 1;
|
||||
if (skb->gso_size != 10)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1010,6 +1010,53 @@
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"read gso_size from CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, gso_size)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"read gso_size from CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, gso_size)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"write gso_size from CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, gso_size)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr = "invalid bpf_context access off=176 size=4",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"read gso_size from CLS",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, gso_size)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"check wire_len is not readable by sockets",
|
||||
.insns = {
|
||||
|
Loading…
x
Reference in New Issue
Block a user