Merge branch 'First set of verifier/*.c migrated to inline assembly'
Eduard Zingerman says: ==================== This is a follow up for RFC [1]. It migrates a first batch of 38 verifier/*.c tests to inline assembly and use of ./test_progs for actual execution. The migration is done by a python script (see [2]). Each migrated verifier/xxx.c file is mapped to progs/verifier_xxx.c plus an entry in the prog_tests/verifier.c. One patch per each file. A few patches at the beginning of the patch-set extend test_loader with necessary functionality, mainly: - support for tests execution in unprivileged mode; - support for test runs for test programs. Migrated tests could be selected for execution using the following filter: ./test_progs -a verifier_* An example of the migrated test: SEC("xdp") __description("XDP pkt read, pkt_data' > pkt_end, corner case, good access") __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) __naked void end_corner_case_good_access_1(void) { asm volatile (" \ r2 = *(u32*)(r1 + %[xdp_md_data]); \ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ r1 = r2; \ r1 += 8; \ if r1 > r3 goto l0_%=; \ r0 = *(u64*)(r1 - 8); \ l0_%=: r0 = 0; \ exit; \ " : : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) : __clobber_all); } Changes compared to RFC: - test_loader.c is extended to support test program runs; - capabilities handling now matches behavior of test_verifier; - BPF_ST_MEM instructions are automatically replaced by BPF_STX_MEM instructions to overcome current clang limitations; - tests styling updates according to RFC feedback; - 38 migrated files are included instead of 1. I used the following means for testing: - migration tool itself has a set of self-tests; - migrated tests are passing; - manually compared each old/new file side-by-side. While doing side-by-side comparison I've noted a few defects in the original tests: - and.c: - One of the jump targets is off by one; - BPF_ST_MEM wrong OFF/IMM ordering; - array_access.c: - BPF_ST_MEM wrong OFF/IMM ordering; - value_or_null.c: - BPF_ST_MEM wrong OFF/IMM ordering. These defects would be addressed separately. [1] RFC https://lore.kernel.org/bpf/20230123145148.2791939-1-eddyz87@gmail.com/ [2] Migration tool https://github.com/eddyz87/verifier-tests-migrator ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
e99360762a
@ -231,8 +231,9 @@ TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL)
|
||||
|
||||
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
|
||||
|
||||
CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o
|
||||
TESTING_HELPERS := $(OUTPUT)/testing_helpers.o
|
||||
CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o
|
||||
UNPRIV_HELPERS := $(OUTPUT)/unpriv_helpers.o
|
||||
TRACE_HELPERS := $(OUTPUT)/trace_helpers.o
|
||||
JSON_WRITER := $(OUTPUT)/json_writer.o
|
||||
CAP_HELPERS := $(OUTPUT)/cap_helpers.o
|
||||
@ -252,7 +253,7 @@ $(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS)
|
||||
$(OUTPUT)/xdping: $(TESTING_HELPERS)
|
||||
$(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS)
|
||||
$(OUTPUT)/test_maps: $(TESTING_HELPERS)
|
||||
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS)
|
||||
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS) $(UNPRIV_HELPERS)
|
||||
$(OUTPUT)/xsk.o: $(BPFOBJ)
|
||||
|
||||
BPFTOOL ?= $(DEFAULT_BPFTOOL)
|
||||
@ -560,8 +561,9 @@ TRUNNER_BPF_PROGS_DIR := progs
|
||||
TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
|
||||
network_helpers.c testing_helpers.c \
|
||||
btf_helpers.c flow_dissector_load.h \
|
||||
cap_helpers.c test_loader.c xsk.c disasm.c \
|
||||
json_writer.c
|
||||
cap_helpers.c test_loader.c xsk.c disasm.c \
|
||||
json_writer.c unpriv_helpers.c
|
||||
|
||||
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
|
||||
$(OUTPUT)/liburandom_read.so \
|
||||
$(OUTPUT)/xdp_synproxy \
|
||||
|
9
tools/testing/selftests/bpf/autoconf_helper.h
Normal file
9
tools/testing/selftests/bpf/autoconf_helper.h
Normal file
@ -0,0 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#ifdef HAVE_GENHDR
|
||||
# include "autoconf.h"
|
||||
#else
|
||||
# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
|
||||
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
|
||||
# endif
|
||||
#endif
|
104
tools/testing/selftests/bpf/prog_tests/verifier.c
Normal file
104
tools/testing/selftests/bpf/prog_tests/verifier.c
Normal file
@ -0,0 +1,104 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <test_progs.h>
|
||||
|
||||
#include "cap_helpers.h"
|
||||
#include "verifier_and.skel.h"
|
||||
#include "verifier_array_access.skel.h"
|
||||
#include "verifier_basic_stack.skel.h"
|
||||
#include "verifier_bounds_deduction.skel.h"
|
||||
#include "verifier_bounds_mix_sign_unsign.skel.h"
|
||||
#include "verifier_cfg.skel.h"
|
||||
#include "verifier_cgroup_inv_retcode.skel.h"
|
||||
#include "verifier_cgroup_skb.skel.h"
|
||||
#include "verifier_cgroup_storage.skel.h"
|
||||
#include "verifier_const_or.skel.h"
|
||||
#include "verifier_ctx_sk_msg.skel.h"
|
||||
#include "verifier_direct_stack_access_wraparound.skel.h"
|
||||
#include "verifier_div0.skel.h"
|
||||
#include "verifier_div_overflow.skel.h"
|
||||
#include "verifier_helper_access_var_len.skel.h"
|
||||
#include "verifier_helper_packet_access.skel.h"
|
||||
#include "verifier_helper_restricted.skel.h"
|
||||
#include "verifier_helper_value_access.skel.h"
|
||||
#include "verifier_int_ptr.skel.h"
|
||||
#include "verifier_ld_ind.skel.h"
|
||||
#include "verifier_leak_ptr.skel.h"
|
||||
#include "verifier_map_ptr.skel.h"
|
||||
#include "verifier_map_ret_val.skel.h"
|
||||
#include "verifier_masking.skel.h"
|
||||
#include "verifier_meta_access.skel.h"
|
||||
#include "verifier_raw_stack.skel.h"
|
||||
#include "verifier_raw_tp_writable.skel.h"
|
||||
#include "verifier_ringbuf.skel.h"
|
||||
#include "verifier_spill_fill.skel.h"
|
||||
#include "verifier_stack_ptr.skel.h"
|
||||
#include "verifier_uninit.skel.h"
|
||||
#include "verifier_value_adj_spill.skel.h"
|
||||
#include "verifier_value.skel.h"
|
||||
#include "verifier_value_or_null.skel.h"
|
||||
#include "verifier_var_off.skel.h"
|
||||
#include "verifier_xadd.skel.h"
|
||||
#include "verifier_xdp.skel.h"
|
||||
|
||||
__maybe_unused
|
||||
static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory)
|
||||
{
|
||||
struct test_loader tester = {};
|
||||
__u64 old_caps;
|
||||
int err;
|
||||
|
||||
/* test_verifier tests are executed w/o CAP_SYS_ADMIN, do the same here */
|
||||
err = cap_disable_effective(1ULL << CAP_SYS_ADMIN, &old_caps);
|
||||
if (err) {
|
||||
PRINT_FAIL("failed to drop CAP_SYS_ADMIN: %i, %s\n", err, strerror(err));
|
||||
return;
|
||||
}
|
||||
|
||||
test_loader__run_subtests(&tester, skel_name, elf_bytes_factory);
|
||||
test_loader_fini(&tester);
|
||||
|
||||
err = cap_enable_effective(old_caps, NULL);
|
||||
if (err)
|
||||
PRINT_FAIL("failed to restore CAP_SYS_ADMIN: %i, %s\n", err, strerror(err));
|
||||
}
|
||||
|
||||
#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes)
|
||||
|
||||
void test_verifier_and(void) { RUN(verifier_and); }
|
||||
void test_verifier_array_access(void) { RUN(verifier_array_access); }
|
||||
void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); }
|
||||
void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); }
|
||||
void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); }
|
||||
void test_verifier_cfg(void) { RUN(verifier_cfg); }
|
||||
void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); }
|
||||
void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); }
|
||||
void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); }
|
||||
void test_verifier_const_or(void) { RUN(verifier_const_or); }
|
||||
void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); }
|
||||
void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); }
|
||||
void test_verifier_div0(void) { RUN(verifier_div0); }
|
||||
void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); }
|
||||
void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }
|
||||
void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }
|
||||
void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); }
|
||||
void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); }
|
||||
void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
|
||||
void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
|
||||
void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
|
||||
void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); }
|
||||
void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); }
|
||||
void test_verifier_masking(void) { RUN(verifier_masking); }
|
||||
void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
|
||||
void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
|
||||
void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); }
|
||||
void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); }
|
||||
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
|
||||
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
|
||||
void test_verifier_uninit(void) { RUN(verifier_uninit); }
|
||||
void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); }
|
||||
void test_verifier_value(void) { RUN(verifier_value); }
|
||||
void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); }
|
||||
void test_verifier_var_off(void) { RUN(verifier_var_off); }
|
||||
void test_verifier_xadd(void) { RUN(verifier_xadd); }
|
||||
void test_verifier_xdp(void) { RUN(verifier_xdp); }
|
@ -5,12 +5,42 @@
|
||||
/* This set of attributes controls behavior of the
|
||||
* test_loader.c:test_loader__run_subtests().
|
||||
*
|
||||
* The test_loader sequentially loads each program in a skeleton.
|
||||
* Programs could be loaded in privileged and unprivileged modes.
|
||||
* - __success, __failure, __msg imply privileged mode;
|
||||
* - __success_unpriv, __failure_unpriv, __msg_unpriv imply
|
||||
* unprivileged mode.
|
||||
* If combination of privileged and unprivileged attributes is present
|
||||
* both modes are used. If none are present privileged mode is implied.
|
||||
*
|
||||
* See test_loader.c:drop_capabilities() for exact set of capabilities
|
||||
* that differ between privileged and unprivileged modes.
|
||||
*
|
||||
* For test filtering purposes the name of the program loaded in
|
||||
* unprivileged mode is derived from the usual program name by adding
|
||||
* `@unpriv' suffix.
|
||||
*
|
||||
* __msg Message expected to be found in the verifier log.
|
||||
* Multiple __msg attributes could be specified.
|
||||
* __msg_unpriv Same as __msg but for unprivileged mode.
|
||||
*
|
||||
* __success Expect program load success in privileged mode.
|
||||
* __success_unpriv Expect program load success in unprivileged mode.
|
||||
*
|
||||
* __failure Expect program load failure in privileged mode.
|
||||
* __failure_unpriv Expect program load failure in unprivileged mode.
|
||||
*
|
||||
* __retval Execute the program using BPF_PROG_TEST_RUN command,
|
||||
* expect return value to match passed parameter:
|
||||
* - a decimal number
|
||||
* - a hexadecimal number, when starts from 0x
|
||||
* - literal INT_MIN
|
||||
* - literal POINTER_VALUE (see definition below)
|
||||
* - literal TEST_DATA_LEN (see definition below)
|
||||
* __retval_unpriv Same, but load program in unprivileged mode.
|
||||
*
|
||||
* __description Text to be used instead of a program name for display
|
||||
* and filtering purposes.
|
||||
*
|
||||
* __log_level Log level to use for the program, numeric value expected.
|
||||
*
|
||||
@ -27,16 +57,28 @@
|
||||
#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg)))
|
||||
#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
|
||||
#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
|
||||
#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
|
||||
#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg)))
|
||||
#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
|
||||
#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
|
||||
#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
|
||||
#define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag)))
|
||||
#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="#val)))
|
||||
#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="#val)))
|
||||
|
||||
/* Convenience macro for use with 'asm volatile' blocks */
|
||||
#define __naked __attribute__((naked))
|
||||
#define __clobber_all "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "memory"
|
||||
#define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory"
|
||||
#define __imm(name) [name]"i"(name)
|
||||
#define __imm_const(name, expr) [name]"i"(expr)
|
||||
#define __imm_addr(name) [name]"i"(&name)
|
||||
#define __imm_ptr(name) [name]"p"(&name)
|
||||
#define __imm_insn(name, expr) [name]"i"(*(long *)&(expr))
|
||||
|
||||
/* Magic constants used with __retval() */
|
||||
#define POINTER_VALUE 0xcafe4all
|
||||
#define TEST_DATA_LEN 64
|
||||
|
||||
#if defined(__TARGET_ARCH_x86)
|
||||
#define SYSCALL_WRAPPER 1
|
||||
|
107
tools/testing/selftests/bpf/progs/verifier_and.c
Normal file
107
tools/testing/selftests/bpf/progs/verifier_and.c
Normal file
@ -0,0 +1,107 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/and.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define MAX_ENTRIES 11
|
||||
|
||||
struct test_val {
|
||||
unsigned int index;
|
||||
int foo[MAX_ENTRIES];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, struct test_val);
|
||||
} map_hash_48b SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid and of negative number")
|
||||
__failure __msg("R0 max value is outside of the allowed memory range")
|
||||
__failure_unpriv
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void invalid_and_of_negative_number(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u8*)(r0 + 0); \
|
||||
r1 &= -4; \
|
||||
r1 <<= 2; \
|
||||
r0 += r1; \
|
||||
l0_%=: r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid range check")
|
||||
__failure __msg("R0 max value is outside of the allowed memory range")
|
||||
__failure_unpriv
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void invalid_range_check(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
r9 = 1; \
|
||||
w1 %%= 2; \
|
||||
w1 += 1; \
|
||||
w9 &= w1; \
|
||||
w9 += 1; \
|
||||
w9 >>= 1; \
|
||||
w3 = 1; \
|
||||
w3 -= w9; \
|
||||
w3 *= 0x10000000; \
|
||||
r0 += r3; \
|
||||
*(u32*)(r0 + 0) = r3; \
|
||||
l0_%=: r0 = r0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check known subreg with unknown reg")
|
||||
__success __failure_unpriv __msg_unpriv("R1 !read_ok")
|
||||
__retval(0)
|
||||
__naked void known_subreg_with_unknown_reg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_get_prandom_u32]; \
|
||||
r0 <<= 32; \
|
||||
r0 += 1; \
|
||||
r0 &= 0xFFFF1234; \
|
||||
/* Upper bits are unknown but AND above masks out 1 zero'ing lower bits */\
|
||||
if w0 < 1 goto l0_%=; \
|
||||
r1 = *(u32*)(r1 + 512); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_prandom_u32)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
529
tools/testing/selftests/bpf/progs/verifier_array_access.c
Normal file
529
tools/testing/selftests/bpf/progs/verifier_array_access.c
Normal file
@ -0,0 +1,529 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/array_access.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define MAX_ENTRIES 11
|
||||
|
||||
struct test_val {
|
||||
unsigned int index;
|
||||
int foo[MAX_ENTRIES];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct test_val);
|
||||
__uint(map_flags, BPF_F_RDONLY_PROG);
|
||||
} map_array_ro SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct test_val);
|
||||
__uint(map_flags, BPF_F_WRONLY_PROG);
|
||||
} map_array_wo SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, struct test_val);
|
||||
} map_hash_48b SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("valid map access into an array with a constant")
|
||||
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__retval(0)
|
||||
__naked void an_array_with_a_constant_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("valid map access into an array with a register")
|
||||
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void an_array_with_a_register_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 4; \
|
||||
r1 <<= 2; \
|
||||
r0 += r1; \
|
||||
r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("valid map access into an array with a variable")
|
||||
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void an_array_with_a_variable_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
if r1 >= %[max_entries] goto l0_%=; \
|
||||
r1 <<= 2; \
|
||||
r0 += r1; \
|
||||
r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(max_entries, MAX_ENTRIES),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("valid map access into an array with a signed variable")
|
||||
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void array_with_a_signed_variable(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
if w1 s> 0xffffffff goto l1_%=; \
|
||||
w1 = 0; \
|
||||
l1_%=: w2 = %[max_entries]; \
|
||||
if r2 s> r1 goto l2_%=; \
|
||||
w1 = 0; \
|
||||
l2_%=: w1 <<= 2; \
|
||||
r0 += r1; \
|
||||
r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(max_entries, MAX_ENTRIES),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid map access into an array with a constant")
|
||||
__failure __msg("invalid access to map value, value_size=48 off=48 size=8")
|
||||
__failure_unpriv
|
||||
__naked void an_array_with_a_constant_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + %[__imm_0]) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(__imm_0, (MAX_ENTRIES + 1) << 2),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid map access into an array with a register")
|
||||
__failure __msg("R0 min value is outside of the allowed memory range")
|
||||
__failure_unpriv
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void an_array_with_a_register_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = %[__imm_0]; \
|
||||
r1 <<= 2; \
|
||||
r0 += r1; \
|
||||
r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(__imm_0, MAX_ENTRIES + 1),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid map access into an array with a variable")
|
||||
__failure
|
||||
__msg("R0 unbounded memory access, make sure to bounds check any such access")
|
||||
__failure_unpriv
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void an_array_with_a_variable_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
r1 <<= 2; \
|
||||
r0 += r1; \
|
||||
r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid map access into an array with no floor check")
|
||||
__failure __msg("R0 unbounded memory access")
|
||||
__failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void array_with_no_floor_check(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r0 + 0); \
|
||||
w2 = %[max_entries]; \
|
||||
if r2 s> r1 goto l1_%=; \
|
||||
w1 = 0; \
|
||||
l1_%=: w1 <<= 2; \
|
||||
r0 += r1; \
|
||||
r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(max_entries, MAX_ENTRIES),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid map access into an array with a invalid max check")
|
||||
__failure __msg("invalid access to map value, value_size=48 off=44 size=8")
|
||||
__failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void with_a_invalid_max_check_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
w2 = %[__imm_0]; \
|
||||
if r2 > r1 goto l1_%=; \
|
||||
w1 = 0; \
|
||||
l1_%=: w1 <<= 2; \
|
||||
r0 += r1; \
|
||||
r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(__imm_0, MAX_ENTRIES + 1),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid map access into an array with a invalid max check")
|
||||
__failure __msg("R0 pointer += pointer")
|
||||
__failure_unpriv
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void with_a_invalid_max_check_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r8 = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r0 += r8; \
|
||||
r0 = *(u32*)(r0 + %[test_val_foo]); \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("valid read map access into a read-only array 1")
|
||||
__success __success_unpriv __retval(28)
|
||||
__naked void a_read_only_array_1_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_array_ro] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r0 = *(u32*)(r0 + 0); \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_array_ro)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("valid read map access into a read-only array 2")
|
||||
__success __retval(65507)
|
||||
__naked void a_read_only_array_2_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_array_ro] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = 4; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: r0 &= 0xffff; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_array_ro)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid write map access into a read-only array 1")
|
||||
__failure __msg("write into map forbidden")
|
||||
__failure_unpriv
|
||||
__naked void a_read_only_array_1_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_array_ro] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 42; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_array_ro)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("invalid write map access into a read-only array 2")
|
||||
__failure __msg("write into map forbidden")
|
||||
__naked void a_read_only_array_2_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_array_ro] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r6; \
|
||||
r2 = 0; \
|
||||
r3 = r0; \
|
||||
r4 = 8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_skb_load_bytes),
|
||||
__imm_addr(map_array_ro)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("valid write map access into a write-only array 1")
|
||||
__success __success_unpriv __retval(1)
|
||||
__naked void a_write_only_array_1_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_array_wo] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 42; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: r0 = 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_array_wo)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("valid write map access into a write-only array 2")
|
||||
__success __retval(0)
|
||||
__naked void a_write_only_array_2_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_array_wo] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r6; \
|
||||
r2 = 0; \
|
||||
r3 = r0; \
|
||||
r4 = 8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_skb_load_bytes),
|
||||
__imm_addr(map_array_wo)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid read map access into a write-only array 1")
|
||||
__failure __msg("read from map forbidden")
|
||||
__failure_unpriv
|
||||
__naked void a_write_only_array_1_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_array_wo] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r0 = *(u64*)(r0 + 0); \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_array_wo)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("invalid read map access into a write-only array 2")
|
||||
__failure __msg("read from map forbidden")
|
||||
__naked void a_write_only_array_2_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_array_wo] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = 4; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_array_wo)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
100
tools/testing/selftests/bpf/progs/verifier_basic_stack.c
Normal file
100
tools/testing/selftests/bpf/progs/verifier_basic_stack.c
Normal file
@ -0,0 +1,100 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/basic_stack.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("stack out of bounds")
|
||||
__failure __msg("invalid write to stack")
|
||||
__failure_unpriv
|
||||
__naked void stack_out_of_bounds(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 + 8) = r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("uninitialized stack1")
|
||||
__failure __msg("invalid indirect read from stack")
|
||||
__failure_unpriv
|
||||
__naked void uninitialized_stack1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("uninitialized stack2")
|
||||
__failure __msg("invalid read from stack")
|
||||
__failure_unpriv
|
||||
__naked void uninitialized_stack2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r0 = *(u64*)(r2 - 8); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid fp arithmetic")
|
||||
__failure __msg("R1 subtraction from stack pointer")
|
||||
__failure_unpriv
|
||||
__naked void invalid_fp_arithmetic(void)
|
||||
{
|
||||
/* If this gets ever changed, make sure JITs can deal with it. */
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
r1 = r10; \
|
||||
r1 -= 8; \
|
||||
*(u64*)(r1 + 0) = r0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("non-invalid fp arithmetic")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void non_invalid_fp_arithmetic(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("misaligned read from stack")
|
||||
__failure __msg("misaligned stack access")
|
||||
__failure_unpriv
|
||||
__naked void misaligned_read_from_stack(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r0 = *(u64*)(r2 - 4); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
171
tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c
Normal file
171
tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c
Normal file
@ -0,0 +1,171 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/bounds_deduction.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("socket")
|
||||
__description("check deducing bounds from const, 1")
|
||||
__failure __msg("R0 tried to subtract pointer from scalar")
|
||||
__msg_unpriv("R1 has pointer with unsupported alu operation")
|
||||
__naked void deducing_bounds_from_const_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 1; \
|
||||
if r0 s>= 1 goto l0_%=; \
|
||||
l0_%=: r0 -= r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check deducing bounds from const, 2")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("R1 has pointer with unsupported alu operation")
|
||||
__retval(1)
|
||||
__naked void deducing_bounds_from_const_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 1; \
|
||||
if r0 s>= 1 goto l0_%=; \
|
||||
exit; \
|
||||
l0_%=: if r0 s<= 1 goto l1_%=; \
|
||||
exit; \
|
||||
l1_%=: r1 -= r0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check deducing bounds from const, 3")
|
||||
__failure __msg("R0 tried to subtract pointer from scalar")
|
||||
__msg_unpriv("R1 has pointer with unsupported alu operation")
|
||||
__naked void deducing_bounds_from_const_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
if r0 s<= 0 goto l0_%=; \
|
||||
l0_%=: r0 -= r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check deducing bounds from const, 4")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("R6 has pointer with unsupported alu operation")
|
||||
__retval(0)
|
||||
__naked void deducing_bounds_from_const_4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
r0 = 0; \
|
||||
if r0 s<= 0 goto l0_%=; \
|
||||
exit; \
|
||||
l0_%=: if r0 s>= 0 goto l1_%=; \
|
||||
exit; \
|
||||
l1_%=: r6 -= r0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check deducing bounds from const, 5")
|
||||
__failure __msg("R0 tried to subtract pointer from scalar")
|
||||
__msg_unpriv("R1 has pointer with unsupported alu operation")
|
||||
__naked void deducing_bounds_from_const_5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
if r0 s>= 1 goto l0_%=; \
|
||||
r0 -= r1; \
|
||||
l0_%=: exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check deducing bounds from const, 6")
|
||||
__failure __msg("R0 tried to subtract pointer from scalar")
|
||||
__msg_unpriv("R1 has pointer with unsupported alu operation")
|
||||
__naked void deducing_bounds_from_const_6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
if r0 s>= 0 goto l0_%=; \
|
||||
exit; \
|
||||
l0_%=: r0 -= r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check deducing bounds from const, 7")
|
||||
__failure __msg("dereference of modified ctx ptr")
|
||||
__msg_unpriv("R1 has pointer with unsupported alu operation")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void deducing_bounds_from_const_7(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = %[__imm_0]; \
|
||||
if r0 s>= 0 goto l0_%=; \
|
||||
l0_%=: r1 -= r0; \
|
||||
r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, ~0),
|
||||
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check deducing bounds from const, 8")
|
||||
__failure __msg("negative offset ctx ptr R1 off=-1 disallowed")
|
||||
__msg_unpriv("R1 has pointer with unsupported alu operation")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void deducing_bounds_from_const_8(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = %[__imm_0]; \
|
||||
if r0 s>= 0 goto l0_%=; \
|
||||
r1 += r0; \
|
||||
l0_%=: r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, ~0),
|
||||
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check deducing bounds from const, 9")
|
||||
__failure __msg("R0 tried to subtract pointer from scalar")
|
||||
__msg_unpriv("R1 has pointer with unsupported alu operation")
|
||||
__naked void deducing_bounds_from_const_9(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
if r0 s>= 0 goto l0_%=; \
|
||||
l0_%=: r0 -= r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check deducing bounds from const, 10")
|
||||
__failure
|
||||
__msg("math between ctx pointer and register with unbounded min value is not allowed")
|
||||
__failure_unpriv
|
||||
__naked void deducing_bounds_from_const_10(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
if r0 s<= 0 goto l0_%=; \
|
||||
l0_%=: /* Marks reg as unknown. */ \
|
||||
r0 = -r0; \
|
||||
r0 -= r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -0,0 +1,554 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, positive bounds")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_positive_bounds(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = 2; \
|
||||
if r2 >= r1 goto l0_%=; \
|
||||
if r1 s> 4 goto l0_%=; \
|
||||
r0 += r1; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void checks_mixing_signed_and_unsigned(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = -1; \
|
||||
if r1 > r2 goto l0_%=; \
|
||||
if r1 s> 1 goto l0_%=; \
|
||||
r0 += r1; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 2")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = -1; \
|
||||
if r1 > r2 goto l0_%=; \
|
||||
r8 = 0; \
|
||||
r8 += r1; \
|
||||
if r8 s> 1 goto l0_%=; \
|
||||
r0 += r8; \
|
||||
r0 = 0; \
|
||||
*(u8*)(r8 + 0) = r0; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 3")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = -1; \
|
||||
if r1 > r2 goto l0_%=; \
|
||||
r8 = r1; \
|
||||
if r8 s> 1 goto l0_%=; \
|
||||
r0 += r8; \
|
||||
r0 = 0; \
|
||||
*(u8*)(r8 + 0) = r0; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 4")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void signed_and_unsigned_variant_4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = 1; \
|
||||
r1 &= r2; \
|
||||
if r1 s> 1 goto l0_%=; \
|
||||
r0 += r1; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 5")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = -1; \
|
||||
if r1 > r2 goto l0_%=; \
|
||||
if r1 s> 1 goto l0_%=; \
|
||||
r0 += 4; \
|
||||
r0 -= r1; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
r0 = 0; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 6")
|
||||
__failure __msg("R4 min value is negative, either use unsigned")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r9 = r1; \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = r9; \
|
||||
r2 = 0; \
|
||||
r3 = r10; \
|
||||
r3 += -512; \
|
||||
r4 = *(u64*)(r10 - 16); \
|
||||
r6 = -1; \
|
||||
if r4 > r6 goto l0_%=; \
|
||||
if r4 s> 1 goto l0_%=; \
|
||||
r4 += 1; \
|
||||
r5 = 0; \
|
||||
r6 = 0; \
|
||||
*(u16*)(r10 - 512) = r6; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 7")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void signed_and_unsigned_variant_7(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = %[__imm_0]; \
|
||||
if r1 > r2 goto l0_%=; \
|
||||
if r1 s> 1 goto l0_%=; \
|
||||
r0 += r1; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(__imm_0, 1024 * 1024 * 1024)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 8")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_8(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = -1; \
|
||||
if r2 > r1 goto l1_%=; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
l1_%=: if r1 s> 1 goto l0_%=; \
|
||||
r0 += r1; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 9")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void signed_and_unsigned_variant_9(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = -9223372036854775808ULL ll; \
|
||||
if r2 > r1 goto l1_%=; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
l1_%=: if r1 s> 1 goto l0_%=; \
|
||||
r0 += r1; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 10")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_10(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = 0; \
|
||||
if r2 > r1 goto l1_%=; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
l1_%=: if r1 s> 1 goto l0_%=; \
|
||||
r0 += r1; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 11")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_11(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = -1; \
|
||||
if r2 >= r1 goto l1_%=; \
|
||||
/* Dead branch. */ \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
l1_%=: if r1 s> 1 goto l0_%=; \
|
||||
r0 += r1; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 12")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_12(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = -6; \
|
||||
if r2 >= r1 goto l1_%=; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
l1_%=: if r1 s> 1 goto l0_%=; \
|
||||
r0 += r1; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 13")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_13(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = 2; \
|
||||
if r2 >= r1 goto l0_%=; \
|
||||
r7 = 1; \
|
||||
if r7 s> 0 goto l1_%=; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
l1_%=: r7 += r1; \
|
||||
if r7 s> 4 goto l2_%=; \
|
||||
r0 += r7; \
|
||||
r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l2_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 14")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_14(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r9 = *(u32*)(r1 + %[__sk_buff_mark]); \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = -1; \
|
||||
r8 = 2; \
|
||||
if r9 == 42 goto l1_%=; \
|
||||
if r8 s> r1 goto l2_%=; \
|
||||
l3_%=: if r1 s> 1 goto l2_%=; \
|
||||
r0 += r1; \
|
||||
l0_%=: r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
l2_%=: r0 = 0; \
|
||||
exit; \
|
||||
l1_%=: if r1 > r2 goto l2_%=; \
|
||||
goto l3_%=; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bounds checks mixing signed and unsigned, variant 15")
|
||||
__failure __msg("unbounded min value")
|
||||
__failure_unpriv
|
||||
__naked void signed_and_unsigned_variant_15(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
r2 = -6; \
|
||||
if r2 >= r1 goto l1_%=; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
l1_%=: r0 += r1; \
|
||||
if r0 > 1 goto l2_%=; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
l2_%=: r1 = 0; \
|
||||
*(u8*)(r0 + 0) = r1; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
100
tools/testing/selftests/bpf/progs/verifier_cfg.c
Normal file
100
tools/testing/selftests/bpf/progs/verifier_cfg.c
Normal file
@ -0,0 +1,100 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/cfg.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("socket")
|
||||
__description("unreachable")
|
||||
__failure __msg("unreachable")
|
||||
__failure_unpriv
|
||||
__naked void unreachable(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
exit; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("unreachable2")
|
||||
__failure __msg("unreachable")
|
||||
__failure_unpriv
|
||||
__naked void unreachable2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
goto l0_%=; \
|
||||
goto l0_%=; \
|
||||
l0_%=: exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("out of range jump")
|
||||
__failure __msg("jump out of range")
|
||||
__failure_unpriv
|
||||
__naked void out_of_range_jump(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
goto l0_%=; \
|
||||
exit; \
|
||||
l0_%=: \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("out of range jump2")
|
||||
__failure __msg("jump out of range")
|
||||
__failure_unpriv
|
||||
__naked void out_of_range_jump2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
goto -2; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("loop (back-edge)")
|
||||
__failure __msg("unreachable insn 1")
|
||||
__msg_unpriv("back-edge")
|
||||
__naked void loop_back_edge(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
l0_%=: goto l0_%=; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("loop2 (back-edge)")
|
||||
__failure __msg("unreachable insn 4")
|
||||
__msg_unpriv("back-edge")
|
||||
__naked void loop2_back_edge(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
l0_%=: r1 = r0; \
|
||||
r2 = r0; \
|
||||
r3 = r0; \
|
||||
goto l0_%=; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("conditional loop")
|
||||
__failure __msg("infinite loop detected")
|
||||
__msg_unpriv("back-edge")
|
||||
__naked void conditional_loop(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = r1; \
|
||||
l0_%=: r2 = r0; \
|
||||
r3 = r0; \
|
||||
if r1 == 0 goto l0_%=; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -0,0 +1,89 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("cgroup/sock")
|
||||
__description("bpf_exit with invalid return code. test1")
|
||||
__failure __msg("R0 has value (0x0; 0xffffffff)")
|
||||
__naked void with_invalid_return_code_test1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + 0); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/sock")
|
||||
__description("bpf_exit with invalid return code. test2")
|
||||
__success
|
||||
__naked void with_invalid_return_code_test2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + 0); \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/sock")
|
||||
__description("bpf_exit with invalid return code. test3")
|
||||
__failure __msg("R0 has value (0x0; 0x3)")
|
||||
__naked void with_invalid_return_code_test3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + 0); \
|
||||
r0 &= 3; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/sock")
|
||||
__description("bpf_exit with invalid return code. test4")
|
||||
__success
|
||||
__naked void with_invalid_return_code_test4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/sock")
|
||||
__description("bpf_exit with invalid return code. test5")
|
||||
__failure __msg("R0 has value (0x2; 0x0)")
|
||||
__naked void with_invalid_return_code_test5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 2; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/sock")
|
||||
__description("bpf_exit with invalid return code. test6")
|
||||
__failure __msg("R0 is not a known value (ctx)")
|
||||
__naked void with_invalid_return_code_test6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/sock")
|
||||
__description("bpf_exit with invalid return code. test7")
|
||||
__failure __msg("R0 has unknown scalar value")
|
||||
__naked void with_invalid_return_code_test7(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + 0); \
|
||||
r2 = *(u32*)(r1 + 4); \
|
||||
r0 *= r2; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
227
tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c
Normal file
227
tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c
Normal file
@ -0,0 +1,227 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/cgroup_skb.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("direct packet read test#1 for CGROUP_SKB")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("invalid bpf_context access off=76 size=4")
|
||||
__retval(0)
|
||||
__naked void test_1_for_cgroup_skb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r4 = *(u32*)(r1 + %[__sk_buff_len]); \
|
||||
r5 = *(u32*)(r1 + %[__sk_buff_pkt_type]); \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
|
||||
*(u32*)(r1 + %[__sk_buff_mark]) = r6; \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_queue_mapping]); \
|
||||
r8 = *(u32*)(r1 + %[__sk_buff_protocol]); \
|
||||
r9 = *(u32*)(r1 + %[__sk_buff_vlan_present]); \
|
||||
r0 = r2; \
|
||||
r0 += 8; \
|
||||
if r0 > r3 goto l0_%=; \
|
||||
r0 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
|
||||
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
|
||||
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
|
||||
__imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
|
||||
__imm_const(__sk_buff_protocol, offsetof(struct __sk_buff, protocol)),
|
||||
__imm_const(__sk_buff_queue_mapping, offsetof(struct __sk_buff, queue_mapping)),
|
||||
__imm_const(__sk_buff_vlan_present, offsetof(struct __sk_buff, vlan_present))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("direct packet read test#2 for CGROUP_SKB")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_2_for_cgroup_skb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r4 = *(u32*)(r1 + %[__sk_buff_vlan_tci]); \
|
||||
r5 = *(u32*)(r1 + %[__sk_buff_vlan_proto]); \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_priority]); \
|
||||
*(u32*)(r1 + %[__sk_buff_priority]) = r6; \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_ingress_ifindex]);\
|
||||
r8 = *(u32*)(r1 + %[__sk_buff_tc_index]); \
|
||||
r9 = *(u32*)(r1 + %[__sk_buff_hash]); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_hash, offsetof(struct __sk_buff, hash)),
|
||||
__imm_const(__sk_buff_ingress_ifindex, offsetof(struct __sk_buff, ingress_ifindex)),
|
||||
__imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)),
|
||||
__imm_const(__sk_buff_tc_index, offsetof(struct __sk_buff, tc_index)),
|
||||
__imm_const(__sk_buff_vlan_proto, offsetof(struct __sk_buff, vlan_proto)),
|
||||
__imm_const(__sk_buff_vlan_tci, offsetof(struct __sk_buff, vlan_tci))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("direct packet read test#3 for CGROUP_SKB")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_3_for_cgroup_skb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r4 = *(u32*)(r1 + %[__sk_buff_cb_0]); \
|
||||
r5 = *(u32*)(r1 + %[__sk_buff_cb_1]); \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_cb_2]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_cb_3]); \
|
||||
r8 = *(u32*)(r1 + %[__sk_buff_cb_4]); \
|
||||
r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \
|
||||
*(u32*)(r1 + %[__sk_buff_cb_0]) = r4; \
|
||||
*(u32*)(r1 + %[__sk_buff_cb_1]) = r5; \
|
||||
*(u32*)(r1 + %[__sk_buff_cb_2]) = r6; \
|
||||
*(u32*)(r1 + %[__sk_buff_cb_3]) = r7; \
|
||||
*(u32*)(r1 + %[__sk_buff_cb_4]) = r8; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])),
|
||||
__imm_const(__sk_buff_cb_1, offsetof(struct __sk_buff, cb[1])),
|
||||
__imm_const(__sk_buff_cb_2, offsetof(struct __sk_buff, cb[2])),
|
||||
__imm_const(__sk_buff_cb_3, offsetof(struct __sk_buff, cb[3])),
|
||||
__imm_const(__sk_buff_cb_4, offsetof(struct __sk_buff, cb[4])),
|
||||
__imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("direct packet read test#4 for CGROUP_SKB")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_4_for_cgroup_skb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_family]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_remote_ip4]); \
|
||||
r4 = *(u32*)(r1 + %[__sk_buff_local_ip4]); \
|
||||
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_0]); \
|
||||
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_1]); \
|
||||
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_2]); \
|
||||
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_3]); \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_0]); \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_1]); \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_2]); \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_3]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_remote_port]); \
|
||||
r8 = *(u32*)(r1 + %[__sk_buff_local_port]); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_family, offsetof(struct __sk_buff, family)),
|
||||
__imm_const(__sk_buff_local_ip4, offsetof(struct __sk_buff, local_ip4)),
|
||||
__imm_const(__sk_buff_local_ip6_0, offsetof(struct __sk_buff, local_ip6[0])),
|
||||
__imm_const(__sk_buff_local_ip6_1, offsetof(struct __sk_buff, local_ip6[1])),
|
||||
__imm_const(__sk_buff_local_ip6_2, offsetof(struct __sk_buff, local_ip6[2])),
|
||||
__imm_const(__sk_buff_local_ip6_3, offsetof(struct __sk_buff, local_ip6[3])),
|
||||
__imm_const(__sk_buff_local_port, offsetof(struct __sk_buff, local_port)),
|
||||
__imm_const(__sk_buff_remote_ip4, offsetof(struct __sk_buff, remote_ip4)),
|
||||
__imm_const(__sk_buff_remote_ip6_0, offsetof(struct __sk_buff, remote_ip6[0])),
|
||||
__imm_const(__sk_buff_remote_ip6_1, offsetof(struct __sk_buff, remote_ip6[1])),
|
||||
__imm_const(__sk_buff_remote_ip6_2, offsetof(struct __sk_buff, remote_ip6[2])),
|
||||
__imm_const(__sk_buff_remote_ip6_3, offsetof(struct __sk_buff, remote_ip6[3])),
|
||||
__imm_const(__sk_buff_remote_port, offsetof(struct __sk_buff, remote_port))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid access of tc_classid for CGROUP_SKB")
|
||||
__failure __msg("invalid bpf_context access")
|
||||
__failure_unpriv
|
||||
__naked void tc_classid_for_cgroup_skb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid access of data_meta for CGROUP_SKB")
|
||||
__failure __msg("invalid bpf_context access")
|
||||
__failure_unpriv
|
||||
__naked void data_meta_for_cgroup_skb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[__sk_buff_data_meta]); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid access of flow_keys for CGROUP_SKB")
|
||||
__failure __msg("invalid bpf_context access")
|
||||
__failure_unpriv
|
||||
__naked void flow_keys_for_cgroup_skb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[__sk_buff_flow_keys]); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_flow_keys, offsetof(struct __sk_buff, flow_keys))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid write access to napi_id for CGROUP_SKB")
|
||||
__failure __msg("invalid bpf_context access")
|
||||
__failure_unpriv
|
||||
__naked void napi_id_for_cgroup_skb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \
|
||||
*(u32*)(r1 + %[__sk_buff_napi_id]) = r9; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("write tstamp from CGROUP_SKB")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("invalid bpf_context access off=152 size=8")
|
||||
__retval(0)
|
||||
__naked void write_tstamp_from_cgroup_skb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
*(u64*)(r1 + %[__sk_buff_tstamp]) = r0; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("read tstamp from CGROUP_SKB")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void read_tstamp_from_cgroup_skb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u64*)(r1 + %[__sk_buff_tstamp]); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
308
tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c
Normal file
308
tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c
Normal file
@ -0,0 +1,308 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/cgroup_storage.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "../../../include/linux/filter.h"
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
|
||||
__uint(max_entries, 0);
|
||||
__type(key, struct bpf_cgroup_storage_key);
|
||||
__type(value, char[TEST_DATA_LEN]);
|
||||
} cgroup_storage SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
|
||||
__uint(max_entries, 0);
|
||||
__type(key, struct bpf_cgroup_storage_key);
|
||||
__type(value, char[64]);
|
||||
} percpu_cgroup_storage SEC(".maps");
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("valid cgroup storage access")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void valid_cgroup_storage_access(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
r1 = %[cgroup_storage] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
r0 = r1; \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(cgroup_storage)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid cgroup storage access 1")
|
||||
__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage")
|
||||
__failure_unpriv
|
||||
__naked void invalid_cgroup_storage_access_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
r0 = r1; \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid cgroup storage access 2")
|
||||
__failure __msg("fd 1 is not pointing to valid bpf_map")
|
||||
__failure_unpriv
|
||||
__naked void invalid_cgroup_storage_access_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
.8byte %[ld_map_fd]; \
|
||||
.8byte 0; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid cgroup storage access 3")
|
||||
__failure __msg("invalid access to map value, value_size=64 off=256 size=4")
|
||||
__failure_unpriv
|
||||
__naked void invalid_cgroup_storage_access_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
r1 = %[cgroup_storage] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 + 256); \
|
||||
r1 += 1; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(cgroup_storage)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid cgroup storage access 4")
|
||||
__failure __msg("invalid access to map value, value_size=64 off=-2 size=4")
|
||||
__failure_unpriv
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void invalid_cgroup_storage_access_4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
r1 = %[cgroup_storage] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 - 2); \
|
||||
r0 = r1; \
|
||||
r1 += 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(cgroup_storage)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid cgroup storage access 5")
|
||||
__failure __msg("get_local_storage() doesn't support non-zero flags")
|
||||
__failure_unpriv
|
||||
__naked void invalid_cgroup_storage_access_5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 7; \
|
||||
r1 = %[cgroup_storage] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
r0 = r1; \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(cgroup_storage)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid cgroup storage access 6")
|
||||
__failure __msg("get_local_storage() doesn't support non-zero flags")
|
||||
__msg_unpriv("R2 leaks addr into helper function")
|
||||
__naked void invalid_cgroup_storage_access_6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r1; \
|
||||
r1 = %[cgroup_storage] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
r0 = r1; \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(cgroup_storage)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("valid per-cpu cgroup storage access")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void per_cpu_cgroup_storage_access(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
r1 = %[percpu_cgroup_storage] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
r0 = r1; \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(percpu_cgroup_storage)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid per-cpu cgroup storage access 1")
|
||||
__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage")
|
||||
__failure_unpriv
|
||||
__naked void cpu_cgroup_storage_access_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
r0 = r1; \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid per-cpu cgroup storage access 2")
|
||||
__failure __msg("fd 1 is not pointing to valid bpf_map")
|
||||
__failure_unpriv
|
||||
__naked void cpu_cgroup_storage_access_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
.8byte %[ld_map_fd]; \
|
||||
.8byte 0; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid per-cpu cgroup storage access 3")
|
||||
__failure __msg("invalid access to map value, value_size=64 off=256 size=4")
|
||||
__failure_unpriv
|
||||
__naked void cpu_cgroup_storage_access_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
r1 = %[percpu_cgroup_storage] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 + 256); \
|
||||
r1 += 1; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(percpu_cgroup_storage)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid per-cpu cgroup storage access 4")
|
||||
__failure __msg("invalid access to map value, value_size=64 off=-2 size=4")
|
||||
__failure_unpriv
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void cpu_cgroup_storage_access_4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
r1 = %[cgroup_storage] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 - 2); \
|
||||
r0 = r1; \
|
||||
r1 += 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(cgroup_storage)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid per-cpu cgroup storage access 5")
|
||||
__failure __msg("get_local_storage() doesn't support non-zero flags")
|
||||
__failure_unpriv
|
||||
__naked void cpu_cgroup_storage_access_5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 7; \
|
||||
r1 = %[percpu_cgroup_storage] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
r0 = r1; \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(percpu_cgroup_storage)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("invalid per-cpu cgroup storage access 6")
|
||||
__failure __msg("get_local_storage() doesn't support non-zero flags")
|
||||
__msg_unpriv("R2 leaks addr into helper function")
|
||||
__naked void cpu_cgroup_storage_access_6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r1; \
|
||||
r1 = %[percpu_cgroup_storage] ll; \
|
||||
call %[bpf_get_local_storage]; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
r0 = r1; \
|
||||
r0 &= 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_local_storage),
|
||||
__imm_addr(percpu_cgroup_storage)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
82
tools/testing/selftests/bpf/progs/verifier_const_or.c
Normal file
82
tools/testing/selftests/bpf/progs/verifier_const_or.c
Normal file
@ -0,0 +1,82 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/const_or.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("constant register |= constant should keep constant type")
|
||||
__success
|
||||
__naked void constant_should_keep_constant_type(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -48; \
|
||||
r2 = 34; \
|
||||
r2 |= 13; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("constant register |= constant should not bypass stack boundary checks")
|
||||
__failure __msg("invalid indirect access to stack R1 off=-48 size=58")
|
||||
__naked void not_bypass_stack_boundary_checks_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -48; \
|
||||
r2 = 34; \
|
||||
r2 |= 24; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("constant register |= constant register should keep constant type")
|
||||
__success
|
||||
__naked void register_should_keep_constant_type(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -48; \
|
||||
r2 = 34; \
|
||||
r4 = 13; \
|
||||
r2 |= r4; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("constant register |= constant register should not bypass stack boundary checks")
|
||||
__failure __msg("invalid indirect access to stack R1 off=-48 size=58")
|
||||
__naked void not_bypass_stack_boundary_checks_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -48; \
|
||||
r2 = 34; \
|
||||
r4 = 24; \
|
||||
r2 |= r4; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
228
tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c
Normal file
228
tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c
Normal file
@ -0,0 +1,228 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/ctx_sk_msg.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("valid access family in SK_MSG")
|
||||
__success
|
||||
__naked void access_family_in_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_family]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_family, offsetof(struct sk_msg_md, family))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("valid access remote_ip4 in SK_MSG")
|
||||
__success
|
||||
__naked void remote_ip4_in_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip4]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_remote_ip4, offsetof(struct sk_msg_md, remote_ip4))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("valid access local_ip4 in SK_MSG")
|
||||
__success
|
||||
__naked void local_ip4_in_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip4]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_local_ip4, offsetof(struct sk_msg_md, local_ip4))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("valid access remote_port in SK_MSG")
|
||||
__success
|
||||
__naked void remote_port_in_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_remote_port]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_remote_port, offsetof(struct sk_msg_md, remote_port))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("valid access local_port in SK_MSG")
|
||||
__success
|
||||
__naked void local_port_in_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_local_port]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_local_port, offsetof(struct sk_msg_md, local_port))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_skb")
|
||||
__description("valid access remote_ip6 in SK_MSG")
|
||||
__success
|
||||
__naked void remote_ip6_in_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_0]); \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_1]); \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_2]); \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_3]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_remote_ip6_0, offsetof(struct sk_msg_md, remote_ip6[0])),
|
||||
__imm_const(sk_msg_md_remote_ip6_1, offsetof(struct sk_msg_md, remote_ip6[1])),
|
||||
__imm_const(sk_msg_md_remote_ip6_2, offsetof(struct sk_msg_md, remote_ip6[2])),
|
||||
__imm_const(sk_msg_md_remote_ip6_3, offsetof(struct sk_msg_md, remote_ip6[3]))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_skb")
|
||||
__description("valid access local_ip6 in SK_MSG")
|
||||
__success
|
||||
__naked void local_ip6_in_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_0]); \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_1]); \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_2]); \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_3]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_local_ip6_0, offsetof(struct sk_msg_md, local_ip6[0])),
|
||||
__imm_const(sk_msg_md_local_ip6_1, offsetof(struct sk_msg_md, local_ip6[1])),
|
||||
__imm_const(sk_msg_md_local_ip6_2, offsetof(struct sk_msg_md, local_ip6[2])),
|
||||
__imm_const(sk_msg_md_local_ip6_3, offsetof(struct sk_msg_md, local_ip6[3]))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("valid access size in SK_MSG")
|
||||
__success
|
||||
__naked void access_size_in_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = *(u32*)(r1 + %[sk_msg_md_size]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("invalid 64B read of size in SK_MSG")
|
||||
__failure __msg("invalid bpf_context access")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void of_size_in_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u64*)(r1 + %[sk_msg_md_size]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("invalid read past end of SK_MSG")
|
||||
__failure __msg("invalid bpf_context access")
|
||||
__naked void past_end_of_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__imm_0]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, offsetof(struct sk_msg_md, size) + 4)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("invalid read offset in SK_MSG")
|
||||
__failure __msg("invalid bpf_context access")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void read_offset_in_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__imm_0]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, offsetof(struct sk_msg_md, family) + 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("direct packet read for SK_MSG")
|
||||
__success
|
||||
__naked void packet_read_for_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
|
||||
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
|
||||
r0 = r2; \
|
||||
r0 += 8; \
|
||||
if r0 > r3 goto l0_%=; \
|
||||
r0 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
|
||||
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("direct packet write for SK_MSG")
|
||||
__success
|
||||
__naked void packet_write_for_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
|
||||
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
|
||||
r0 = r2; \
|
||||
r0 += 8; \
|
||||
if r0 > r3 goto l0_%=; \
|
||||
*(u8*)(r2 + 0) = r2; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
|
||||
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sk_msg")
|
||||
__description("overlapping checks for direct packet access SK_MSG")
|
||||
__success
|
||||
__naked void direct_packet_access_sk_msg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u64*)(r1 + %[sk_msg_md_data]); \
|
||||
r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \
|
||||
r0 = r2; \
|
||||
r0 += 8; \
|
||||
if r0 > r3 goto l0_%=; \
|
||||
r1 = r2; \
|
||||
r1 += 6; \
|
||||
if r1 > r3 goto l0_%=; \
|
||||
r0 = *(u16*)(r2 + 6); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
|
||||
__imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -0,0 +1,56 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("socket")
|
||||
__description("direct stack access with 32-bit wraparound. test1")
|
||||
__failure __msg("fp pointer and 2147483647")
|
||||
__failure_unpriv
|
||||
__naked void with_32_bit_wraparound_test1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += 0x7fffffff; \
|
||||
r1 += 0x7fffffff; \
|
||||
w0 = 0; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("direct stack access with 32-bit wraparound. test2")
|
||||
__failure __msg("fp pointer and 1073741823")
|
||||
__failure_unpriv
|
||||
__naked void with_32_bit_wraparound_test2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += 0x3fffffff; \
|
||||
r1 += 0x3fffffff; \
|
||||
w0 = 0; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("direct stack access with 32-bit wraparound. test3")
|
||||
__failure __msg("fp pointer offset 1073741822")
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__naked void with_32_bit_wraparound_test3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += 0x1fffffff; \
|
||||
r1 += 0x1fffffff; \
|
||||
w0 = 0; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
213
tools/testing/selftests/bpf/progs/verifier_div0.c
Normal file
213
tools/testing/selftests/bpf/progs/verifier_div0.c
Normal file
@ -0,0 +1,213 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/div0.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("socket")
|
||||
__description("DIV32 by 0, zero check 1")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void by_0_zero_check_1_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w0 = 42; \
|
||||
w1 = 0; \
|
||||
w2 = 1; \
|
||||
w2 /= w1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("DIV32 by 0, zero check 2")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void by_0_zero_check_2_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w0 = 42; \
|
||||
r1 = 0xffffffff00000000LL ll; \
|
||||
w2 = 1; \
|
||||
w2 /= w1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("DIV64 by 0, zero check")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void div64_by_0_zero_check(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w0 = 42; \
|
||||
w1 = 0; \
|
||||
w2 = 1; \
|
||||
r2 /= r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("MOD32 by 0, zero check 1")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void by_0_zero_check_1_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w0 = 42; \
|
||||
w1 = 0; \
|
||||
w2 = 1; \
|
||||
w2 %%= w1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("MOD32 by 0, zero check 2")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void by_0_zero_check_2_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w0 = 42; \
|
||||
r1 = 0xffffffff00000000LL ll; \
|
||||
w2 = 1; \
|
||||
w2 %%= w1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("MOD64 by 0, zero check")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void mod64_by_0_zero_check(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w0 = 42; \
|
||||
w1 = 0; \
|
||||
w2 = 1; \
|
||||
r2 %%= r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("DIV32 by 0, zero check ok, cls")
|
||||
__success __retval(8)
|
||||
__naked void _0_zero_check_ok_cls_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w0 = 42; \
|
||||
w1 = 2; \
|
||||
w2 = 16; \
|
||||
w2 /= w1; \
|
||||
r0 = r2; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("DIV32 by 0, zero check 1, cls")
|
||||
__success __retval(0)
|
||||
__naked void _0_zero_check_1_cls_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0; \
|
||||
w0 = 1; \
|
||||
w0 /= w1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("DIV32 by 0, zero check 2, cls")
|
||||
__success __retval(0)
|
||||
__naked void _0_zero_check_2_cls_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0xffffffff00000000LL ll; \
|
||||
w0 = 1; \
|
||||
w0 /= w1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("DIV64 by 0, zero check, cls")
|
||||
__success __retval(0)
|
||||
__naked void by_0_zero_check_cls(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0; \
|
||||
w0 = 1; \
|
||||
r0 /= r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("MOD32 by 0, zero check ok, cls")
|
||||
__success __retval(2)
|
||||
__naked void _0_zero_check_ok_cls_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w0 = 42; \
|
||||
w1 = 3; \
|
||||
w2 = 5; \
|
||||
w2 %%= w1; \
|
||||
r0 = r2; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("MOD32 by 0, zero check 1, cls")
|
||||
__success __retval(1)
|
||||
__naked void _0_zero_check_1_cls_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0; \
|
||||
w0 = 1; \
|
||||
w0 %%= w1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("MOD32 by 0, zero check 2, cls")
|
||||
__success __retval(1)
|
||||
__naked void _0_zero_check_2_cls_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0xffffffff00000000LL ll; \
|
||||
w0 = 1; \
|
||||
w0 %%= w1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("MOD64 by 0, zero check 1, cls")
|
||||
__success __retval(2)
|
||||
__naked void _0_zero_check_1_cls_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0; \
|
||||
w0 = 2; \
|
||||
r0 %%= r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("MOD64 by 0, zero check 2, cls")
|
||||
__success __retval(-1)
|
||||
__naked void _0_zero_check_2_cls_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0; \
|
||||
w0 = -1; \
|
||||
r0 %%= r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
144
tools/testing/selftests/bpf/progs/verifier_div_overflow.c
Normal file
144
tools/testing/selftests/bpf/progs/verifier_div_overflow.c
Normal file
@ -0,0 +1,144 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/div_overflow.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <limits.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
/* Just make sure that JITs used udiv/umod as otherwise we get
|
||||
* an exception from INT_MIN/-1 overflow similarly as with div
|
||||
* by zero.
|
||||
*/
|
||||
|
||||
SEC("tc")
|
||||
__description("DIV32 overflow, check 1")
|
||||
__success __retval(0)
|
||||
__naked void div32_overflow_check_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = -1; \
|
||||
w0 = %[int_min]; \
|
||||
w0 /= w1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(int_min, INT_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("DIV32 overflow, check 2")
|
||||
__success __retval(0)
|
||||
__naked void div32_overflow_check_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w0 = %[int_min]; \
|
||||
w0 /= -1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(int_min, INT_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("DIV64 overflow, check 1")
|
||||
__success __retval(0)
|
||||
__naked void div64_overflow_check_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = -1; \
|
||||
r2 = %[llong_min] ll; \
|
||||
r2 /= r1; \
|
||||
w0 = 0; \
|
||||
if r0 == r2 goto l0_%=; \
|
||||
w0 = 1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm_const(llong_min, LLONG_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("DIV64 overflow, check 2")
|
||||
__success __retval(0)
|
||||
__naked void div64_overflow_check_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = %[llong_min] ll; \
|
||||
r1 /= -1; \
|
||||
w0 = 0; \
|
||||
if r0 == r1 goto l0_%=; \
|
||||
w0 = 1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm_const(llong_min, LLONG_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("MOD32 overflow, check 1")
|
||||
__success __retval(INT_MIN)
|
||||
__naked void mod32_overflow_check_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = -1; \
|
||||
w0 = %[int_min]; \
|
||||
w0 %%= w1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(int_min, INT_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("MOD32 overflow, check 2")
|
||||
__success __retval(INT_MIN)
|
||||
__naked void mod32_overflow_check_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w0 = %[int_min]; \
|
||||
w0 %%= -1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(int_min, INT_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("MOD64 overflow, check 1")
|
||||
__success __retval(1)
|
||||
__naked void mod64_overflow_check_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = -1; \
|
||||
r2 = %[llong_min] ll; \
|
||||
r3 = r2; \
|
||||
r2 %%= r1; \
|
||||
w0 = 0; \
|
||||
if r3 != r2 goto l0_%=; \
|
||||
w0 = 1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm_const(llong_min, LLONG_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("MOD64 overflow, check 2")
|
||||
__success __retval(1)
|
||||
__naked void mod64_overflow_check_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = %[llong_min] ll; \
|
||||
r3 = r2; \
|
||||
r2 %%= -1; \
|
||||
w0 = 0; \
|
||||
if r3 != r2 goto l0_%=; \
|
||||
w0 = 1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm_const(llong_min, LLONG_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -0,0 +1,825 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/helper_access_var_len.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define MAX_ENTRIES 11
|
||||
|
||||
struct test_val {
|
||||
unsigned int index;
|
||||
int foo[MAX_ENTRIES];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, struct test_val);
|
||||
} map_hash_48b SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_RINGBUF);
|
||||
__uint(max_entries, 4096);
|
||||
} map_ringbuf SEC(".maps");
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: stack, bitwise AND + JMP, correct bounds")
|
||||
__success
|
||||
__naked void bitwise_and_jmp_correct_bounds(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -64; \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 64) = r0; \
|
||||
*(u64*)(r10 - 56) = r0; \
|
||||
*(u64*)(r10 - 48) = r0; \
|
||||
*(u64*)(r10 - 40) = r0; \
|
||||
*(u64*)(r10 - 32) = r0; \
|
||||
*(u64*)(r10 - 24) = r0; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
r2 = 16; \
|
||||
*(u64*)(r1 - 128) = r2; \
|
||||
r2 = *(u64*)(r1 - 128); \
|
||||
r2 &= 64; \
|
||||
r4 = 0; \
|
||||
if r4 >= r2 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("helper access to variable memory: stack, bitwise AND, zero included")
|
||||
/* in privileged mode reads from uninitialized stack locations are permitted */
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64")
|
||||
__retval(0)
|
||||
__naked void stack_bitwise_and_zero_included(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* set max stack size */ \
|
||||
r6 = 0; \
|
||||
*(u64*)(r10 - 128) = r6; \
|
||||
/* set r3 to a random value */ \
|
||||
call %[bpf_get_prandom_u32]; \
|
||||
r3 = r0; \
|
||||
/* use bitwise AND to limit r3 range to [0, 64] */\
|
||||
r3 &= 64; \
|
||||
r1 = %[map_ringbuf] ll; \
|
||||
r2 = r10; \
|
||||
r2 += -64; \
|
||||
r4 = 0; \
|
||||
/* Call bpf_ringbuf_output(), it is one of a few helper functions with\
|
||||
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
|
||||
* For unpriv this should signal an error, because memory at &fp[-64] is\
|
||||
* not initialized. \
|
||||
*/ \
|
||||
call %[bpf_ringbuf_output]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_prandom_u32),
|
||||
__imm(bpf_ringbuf_output),
|
||||
__imm_addr(map_ringbuf)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: stack, bitwise AND + JMP, wrong max")
|
||||
__failure __msg("invalid indirect access to stack R1 off=-64 size=65")
|
||||
__naked void bitwise_and_jmp_wrong_max(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u64*)(r1 + 8); \
|
||||
r1 = r10; \
|
||||
r1 += -64; \
|
||||
*(u64*)(r1 - 128) = r2; \
|
||||
r2 = *(u64*)(r1 - 128); \
|
||||
r2 &= 65; \
|
||||
r4 = 0; \
|
||||
if r4 >= r2 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: stack, JMP, correct bounds")
|
||||
__success
|
||||
__naked void memory_stack_jmp_correct_bounds(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -64; \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 64) = r0; \
|
||||
*(u64*)(r10 - 56) = r0; \
|
||||
*(u64*)(r10 - 48) = r0; \
|
||||
*(u64*)(r10 - 40) = r0; \
|
||||
*(u64*)(r10 - 32) = r0; \
|
||||
*(u64*)(r10 - 24) = r0; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
r2 = 16; \
|
||||
*(u64*)(r1 - 128) = r2; \
|
||||
r2 = *(u64*)(r1 - 128); \
|
||||
if r2 > 64 goto l0_%=; \
|
||||
r4 = 0; \
|
||||
if r4 >= r2 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: stack, JMP (signed), correct bounds")
|
||||
__success
|
||||
__naked void stack_jmp_signed_correct_bounds(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -64; \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 64) = r0; \
|
||||
*(u64*)(r10 - 56) = r0; \
|
||||
*(u64*)(r10 - 48) = r0; \
|
||||
*(u64*)(r10 - 40) = r0; \
|
||||
*(u64*)(r10 - 32) = r0; \
|
||||
*(u64*)(r10 - 24) = r0; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
r2 = 16; \
|
||||
*(u64*)(r1 - 128) = r2; \
|
||||
r2 = *(u64*)(r1 - 128); \
|
||||
if r2 s> 64 goto l0_%=; \
|
||||
r4 = 0; \
|
||||
if r4 s>= r2 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: stack, JMP, bounds + offset")
|
||||
__failure __msg("invalid indirect access to stack R1 off=-64 size=65")
|
||||
__naked void memory_stack_jmp_bounds_offset(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u64*)(r1 + 8); \
|
||||
r1 = r10; \
|
||||
r1 += -64; \
|
||||
*(u64*)(r1 - 128) = r2; \
|
||||
r2 = *(u64*)(r1 - 128); \
|
||||
if r2 > 64 goto l0_%=; \
|
||||
r4 = 0; \
|
||||
if r4 >= r2 goto l0_%=; \
|
||||
r2 += 1; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: stack, JMP, wrong max")
|
||||
__failure __msg("invalid indirect access to stack R1 off=-64 size=65")
|
||||
__naked void memory_stack_jmp_wrong_max(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u64*)(r1 + 8); \
|
||||
r1 = r10; \
|
||||
r1 += -64; \
|
||||
*(u64*)(r1 - 128) = r2; \
|
||||
r2 = *(u64*)(r1 - 128); \
|
||||
if r2 > 65 goto l0_%=; \
|
||||
r4 = 0; \
|
||||
if r4 >= r2 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: stack, JMP, no max check")
|
||||
__failure
|
||||
/* because max wasn't checked, signed min is negative */
|
||||
__msg("R2 min value is negative, either use unsigned or 'var &= const'")
|
||||
__naked void stack_jmp_no_max_check(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u64*)(r1 + 8); \
|
||||
r1 = r10; \
|
||||
r1 += -64; \
|
||||
*(u64*)(r1 - 128) = r2; \
|
||||
r2 = *(u64*)(r1 - 128); \
|
||||
r4 = 0; \
|
||||
if r4 >= r2 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("helper access to variable memory: stack, JMP, no min check")
|
||||
/* in privileged mode reads from uninitialized stack locations are permitted */
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64")
|
||||
__retval(0)
|
||||
__naked void stack_jmp_no_min_check(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* set max stack size */ \
|
||||
r6 = 0; \
|
||||
*(u64*)(r10 - 128) = r6; \
|
||||
/* set r3 to a random value */ \
|
||||
call %[bpf_get_prandom_u32]; \
|
||||
r3 = r0; \
|
||||
/* use JMP to limit r3 range to [0, 64] */ \
|
||||
if r3 > 64 goto l0_%=; \
|
||||
r1 = %[map_ringbuf] ll; \
|
||||
r2 = r10; \
|
||||
r2 += -64; \
|
||||
r4 = 0; \
|
||||
/* Call bpf_ringbuf_output(), it is one of a few helper functions with\
|
||||
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
|
||||
* For unpriv this should signal an error, because memory at &fp[-64] is\
|
||||
* not initialized. \
|
||||
*/ \
|
||||
call %[bpf_ringbuf_output]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_prandom_u32),
|
||||
__imm(bpf_ringbuf_output),
|
||||
__imm_addr(map_ringbuf)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: stack, JMP (signed), no min check")
|
||||
__failure __msg("R2 min value is negative")
|
||||
__naked void jmp_signed_no_min_check(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u64*)(r1 + 8); \
|
||||
r1 = r10; \
|
||||
r1 += -64; \
|
||||
*(u64*)(r1 - 128) = r2; \
|
||||
r2 = *(u64*)(r1 - 128); \
|
||||
if r2 s> 64 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
r0 = 0; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: map, JMP, correct bounds")
|
||||
__success
|
||||
__naked void memory_map_jmp_correct_bounds(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = %[sizeof_test_val]; \
|
||||
*(u64*)(r10 - 128) = r2; \
|
||||
r2 = *(u64*)(r10 - 128); \
|
||||
if r2 s> %[sizeof_test_val] goto l1_%=; \
|
||||
r4 = 0; \
|
||||
if r4 s>= r2 goto l1_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l1_%=: r0 = 0; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_probe_read_kernel),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(sizeof_test_val, sizeof(struct test_val))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: map, JMP, wrong max")
|
||||
__failure __msg("invalid access to map value, value_size=48 off=0 size=49")
|
||||
__naked void memory_map_jmp_wrong_max(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u64*)(r1 + 8); \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = r6; \
|
||||
*(u64*)(r10 - 128) = r2; \
|
||||
r2 = *(u64*)(r10 - 128); \
|
||||
if r2 s> %[__imm_0] goto l1_%=; \
|
||||
r4 = 0; \
|
||||
if r4 s>= r2 goto l1_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l1_%=: r0 = 0; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_probe_read_kernel),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(__imm_0, sizeof(struct test_val) + 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: map adjusted, JMP, correct bounds")
|
||||
__success
|
||||
__naked void map_adjusted_jmp_correct_bounds(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r1 += 20; \
|
||||
r2 = %[sizeof_test_val]; \
|
||||
*(u64*)(r10 - 128) = r2; \
|
||||
r2 = *(u64*)(r10 - 128); \
|
||||
if r2 s> %[__imm_0] goto l1_%=; \
|
||||
r4 = 0; \
|
||||
if r4 s>= r2 goto l1_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l1_%=: r0 = 0; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_probe_read_kernel),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(__imm_0, sizeof(struct test_val) - 20),
|
||||
__imm_const(sizeof_test_val, sizeof(struct test_val))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: map adjusted, JMP, wrong max")
|
||||
__failure __msg("R1 min value is outside of the allowed memory range")
|
||||
__naked void map_adjusted_jmp_wrong_max(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u64*)(r1 + 8); \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r1 += 20; \
|
||||
r2 = r6; \
|
||||
*(u64*)(r10 - 128) = r2; \
|
||||
r2 = *(u64*)(r10 - 128); \
|
||||
if r2 s> %[__imm_0] goto l1_%=; \
|
||||
r4 = 0; \
|
||||
if r4 s>= r2 goto l1_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l1_%=: r0 = 0; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_probe_read_kernel),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(__imm_0, sizeof(struct test_val) - 19)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__success __retval(0)
|
||||
__naked void ptr_to_mem_or_null_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
r2 = 0; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__failure __msg("R1 type=scalar expected=fp")
|
||||
__naked void ptr_to_mem_or_null_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 128) = r2; \
|
||||
r2 = *(u64*)(r10 - 128); \
|
||||
r2 &= 64; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__success __retval(0)
|
||||
__naked void ptr_to_mem_or_null_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -8; \
|
||||
r2 = 0; \
|
||||
*(u64*)(r1 + 0) = r2; \
|
||||
r2 &= 8; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__success __retval(0)
|
||||
__naked void ptr_to_mem_or_null_4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = 0; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__success __retval(0)
|
||||
__naked void ptr_to_mem_or_null_5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r2 = *(u64*)(r0 + 0); \
|
||||
if r2 > 8 goto l0_%=; \
|
||||
r1 = r10; \
|
||||
r1 += -8; \
|
||||
*(u64*)(r1 + 0) = r2; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__success __retval(0)
|
||||
__naked void ptr_to_mem_or_null_6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = *(u64*)(r0 + 0); \
|
||||
if r2 > 8 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__success __retval(0)
|
||||
/* csum_diff of 64-byte packet */
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void ptr_to_mem_or_null_7(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r0 = r6; \
|
||||
r0 += 8; \
|
||||
if r0 > r3 goto l0_%=; \
|
||||
r1 = r6; \
|
||||
r2 = *(u64*)(r6 + 0); \
|
||||
if r2 > 8 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__failure __msg("R1 type=scalar expected=fp")
|
||||
__naked void ptr_to_mem_or_null_8(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
r2 = 0; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__failure __msg("R1 type=scalar expected=fp")
|
||||
__naked void ptr_to_mem_or_null_9(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
r2 = 1; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__success
|
||||
__naked void ptr_to_mem_or_null_10(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -8; \
|
||||
r2 = 0; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__success
|
||||
__naked void ptr_to_mem_or_null_11(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = 0; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_probe_read_kernel),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__success
|
||||
__naked void ptr_to_mem_or_null_12(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r2 = *(u64*)(r0 + 0); \
|
||||
if r2 > 8 goto l0_%=; \
|
||||
r1 = r10; \
|
||||
r1 += -8; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_probe_read_kernel),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)")
|
||||
__success
|
||||
__naked void ptr_to_mem_or_null_13(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = *(u64*)(r0 + 0); \
|
||||
if r2 > 8 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_probe_read_kernel),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("helper access to variable memory: 8 bytes leak")
|
||||
/* in privileged mode reads from uninitialized stack locations are permitted */
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("invalid indirect read from stack R2 off -64+32 size 64")
|
||||
__retval(0)
|
||||
__naked void variable_memory_8_bytes_leak(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* set max stack size */ \
|
||||
r6 = 0; \
|
||||
*(u64*)(r10 - 128) = r6; \
|
||||
/* set r3 to a random value */ \
|
||||
call %[bpf_get_prandom_u32]; \
|
||||
r3 = r0; \
|
||||
r1 = %[map_ringbuf] ll; \
|
||||
r2 = r10; \
|
||||
r2 += -64; \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 64) = r0; \
|
||||
*(u64*)(r10 - 56) = r0; \
|
||||
*(u64*)(r10 - 48) = r0; \
|
||||
*(u64*)(r10 - 40) = r0; \
|
||||
/* Note: fp[-32] left uninitialized */ \
|
||||
*(u64*)(r10 - 24) = r0; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
/* Limit r3 range to [1, 64] */ \
|
||||
r3 &= 63; \
|
||||
r3 += 1; \
|
||||
r4 = 0; \
|
||||
/* Call bpf_ringbuf_output(), it is one of a few helper functions with\
|
||||
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
|
||||
* For unpriv this should signal an error, because memory region [1, 64]\
|
||||
* at &fp[-64] is not fully initialized. \
|
||||
*/ \
|
||||
call %[bpf_ringbuf_output]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_prandom_u32),
|
||||
__imm(bpf_ringbuf_output),
|
||||
__imm_addr(map_ringbuf)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("helper access to variable memory: 8 bytes no leak (init memory)")
|
||||
__success
|
||||
__naked void bytes_no_leak_init_memory(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r0 = 0; \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 64) = r0; \
|
||||
*(u64*)(r10 - 56) = r0; \
|
||||
*(u64*)(r10 - 48) = r0; \
|
||||
*(u64*)(r10 - 40) = r0; \
|
||||
*(u64*)(r10 - 32) = r0; \
|
||||
*(u64*)(r10 - 24) = r0; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
r1 += -64; \
|
||||
r2 = 0; \
|
||||
r2 &= 32; \
|
||||
r2 += 32; \
|
||||
r3 = 0; \
|
||||
call %[bpf_probe_read_kernel]; \
|
||||
r1 = *(u64*)(r10 - 16); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -0,0 +1,550 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/helper_packet_access.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
SEC("xdp")
|
||||
__description("helper access to packet: test1, valid packet_ptr range")
|
||||
__success __retval(0)
|
||||
__naked void test1_valid_packet_ptr_range(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
|
||||
r1 = r2; \
|
||||
r1 += 8; \
|
||||
if r1 > r3 goto l0_%=; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
r3 = r2; \
|
||||
r4 = 0; \
|
||||
call %[bpf_map_update_elem]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_update_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("helper access to packet: test2, unchecked packet_ptr")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void packet_test2_unchecked_packet_ptr(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(xdp_md_data, offsetof(struct xdp_md, data))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("helper access to packet: test3, variable add")
|
||||
__success __retval(0)
|
||||
__naked void to_packet_test3_variable_add(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
|
||||
r4 = r2; \
|
||||
r4 += 8; \
|
||||
if r4 > r3 goto l0_%=; \
|
||||
r5 = *(u8*)(r2 + 0); \
|
||||
r4 = r2; \
|
||||
r4 += r5; \
|
||||
r5 = r4; \
|
||||
r5 += 8; \
|
||||
if r5 > r3 goto l0_%=; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
r2 = r4; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("helper access to packet: test4, packet_ptr with bad range")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void packet_ptr_with_bad_range_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
|
||||
r4 = r2; \
|
||||
r4 += 4; \
|
||||
if r4 > r3 goto l0_%=; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
l0_%=: r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("helper access to packet: test5, packet_ptr with too short range")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void ptr_with_too_short_range_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
|
||||
r2 += 1; \
|
||||
r4 = r2; \
|
||||
r4 += 7; \
|
||||
if r4 > r3 goto l0_%=; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test6, cls valid packet_ptr range")
|
||||
__success __retval(0)
|
||||
__naked void cls_valid_packet_ptr_range(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r1 = r2; \
|
||||
r1 += 8; \
|
||||
if r1 > r3 goto l0_%=; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
r3 = r2; \
|
||||
r4 = 0; \
|
||||
call %[bpf_map_update_elem]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_update_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test7, cls unchecked packet_ptr")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void test7_cls_unchecked_packet_ptr(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test8, cls variable add")
|
||||
__success __retval(0)
|
||||
__naked void packet_test8_cls_variable_add(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r4 = r2; \
|
||||
r4 += 8; \
|
||||
if r4 > r3 goto l0_%=; \
|
||||
r5 = *(u8*)(r2 + 0); \
|
||||
r4 = r2; \
|
||||
r4 += r5; \
|
||||
r5 = r4; \
|
||||
r5 += 8; \
|
||||
if r5 > r3 goto l0_%=; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
r2 = r4; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test9, cls packet_ptr with bad range")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void packet_ptr_with_bad_range_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r4 = r2; \
|
||||
r4 += 4; \
|
||||
if r4 > r3 goto l0_%=; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
l0_%=: r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test10, cls packet_ptr with too short range")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void ptr_with_too_short_range_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r2 += 1; \
|
||||
r4 = r2; \
|
||||
r4 += 7; \
|
||||
if r4 > r3 goto l0_%=; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test11, cls unsuitable helper 1")
|
||||
__failure __msg("helper access to the packet")
|
||||
__naked void test11_cls_unsuitable_helper_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r6 += 1; \
|
||||
r3 = r6; \
|
||||
r3 += 7; \
|
||||
if r3 > r7 goto l0_%=; \
|
||||
r2 = 0; \
|
||||
r4 = 42; \
|
||||
r5 = 0; \
|
||||
call %[bpf_skb_store_bytes]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_store_bytes),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test12, cls unsuitable helper 2")
|
||||
__failure __msg("helper access to the packet")
|
||||
__naked void test12_cls_unsuitable_helper_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r3 = r6; \
|
||||
r6 += 8; \
|
||||
if r6 > r7 goto l0_%=; \
|
||||
r2 = 0; \
|
||||
r4 = 4; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test13, cls helper ok")
|
||||
__success __retval(0)
|
||||
__naked void packet_test13_cls_helper_ok(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r6 += 1; \
|
||||
r1 = r6; \
|
||||
r1 += 7; \
|
||||
if r1 > r7 goto l0_%=; \
|
||||
r1 = r6; \
|
||||
r2 = 4; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test14, cls helper ok sub")
|
||||
__success __retval(0)
|
||||
__naked void test14_cls_helper_ok_sub(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r6 += 1; \
|
||||
r1 = r6; \
|
||||
r1 += 7; \
|
||||
if r1 > r7 goto l0_%=; \
|
||||
r1 -= 4; \
|
||||
r2 = 4; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test15, cls helper fail sub")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void test15_cls_helper_fail_sub(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r6 += 1; \
|
||||
r1 = r6; \
|
||||
r1 += 7; \
|
||||
if r1 > r7 goto l0_%=; \
|
||||
r1 -= 12; \
|
||||
r2 = 4; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test16, cls helper fail range 1")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void cls_helper_fail_range_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r6 += 1; \
|
||||
r1 = r6; \
|
||||
r1 += 7; \
|
||||
if r1 > r7 goto l0_%=; \
|
||||
r1 = r6; \
|
||||
r2 = 8; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test17, cls helper fail range 2")
|
||||
__failure __msg("R2 min value is negative")
|
||||
__naked void cls_helper_fail_range_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r6 += 1; \
|
||||
r1 = r6; \
|
||||
r1 += 7; \
|
||||
if r1 > r7 goto l0_%=; \
|
||||
r1 = r6; \
|
||||
r2 = -9; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test18, cls helper fail range 3")
|
||||
__failure __msg("R2 min value is negative")
|
||||
__naked void cls_helper_fail_range_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r6 += 1; \
|
||||
r1 = r6; \
|
||||
r1 += 7; \
|
||||
if r1 > r7 goto l0_%=; \
|
||||
r1 = r6; \
|
||||
r2 = %[__imm_0]; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm_const(__imm_0, ~0),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test19, cls helper range zero")
|
||||
__success __retval(0)
|
||||
__naked void test19_cls_helper_range_zero(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r6 += 1; \
|
||||
r1 = r6; \
|
||||
r1 += 7; \
|
||||
if r1 > r7 goto l0_%=; \
|
||||
r1 = r6; \
|
||||
r2 = 0; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test20, pkt end as input")
|
||||
__failure __msg("R1 type=pkt_end expected=fp")
|
||||
__naked void test20_pkt_end_as_input(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r6 += 1; \
|
||||
r1 = r6; \
|
||||
r1 += 7; \
|
||||
if r1 > r7 goto l0_%=; \
|
||||
r1 = r7; \
|
||||
r2 = 4; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("helper access to packet: test21, wrong reg")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void to_packet_test21_wrong_reg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r6 += 1; \
|
||||
r1 = r6; \
|
||||
r1 += 7; \
|
||||
if r1 > r7 goto l0_%=; \
|
||||
r2 = 4; \
|
||||
r3 = 0; \
|
||||
r4 = 0; \
|
||||
r5 = 0; \
|
||||
call %[bpf_csum_diff]; \
|
||||
r0 = 0; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_csum_diff),
|
||||
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
279
tools/testing/selftests/bpf/progs/verifier_helper_restricted.c
Normal file
279
tools/testing/selftests/bpf/progs/verifier_helper_restricted.c
Normal file
@ -0,0 +1,279 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/helper_restricted.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct val {
|
||||
int cnt;
|
||||
struct bpf_spin_lock l;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct val);
|
||||
} map_spin_lock SEC(".maps");
|
||||
|
||||
struct timer {
|
||||
struct bpf_timer t;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct timer);
|
||||
} map_timer SEC(".maps");
|
||||
|
||||
SEC("kprobe")
|
||||
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE")
|
||||
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
|
||||
__naked void in_bpf_prog_type_kprobe_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_coarse_ns]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_coarse_ns)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT")
|
||||
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
|
||||
__naked void in_bpf_prog_type_tracepoint_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_coarse_ns]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_coarse_ns)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT")
|
||||
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
|
||||
__naked void bpf_prog_type_perf_event_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_coarse_ns]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_coarse_ns)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tracepoint")
|
||||
__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
|
||||
__failure __msg("unknown func bpf_ktime_get_coarse_ns")
|
||||
__naked void bpf_prog_type_raw_tracepoint_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_ktime_get_coarse_ns]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_coarse_ns)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("kprobe")
|
||||
__description("bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE")
|
||||
__failure __msg("tracing progs cannot use bpf_timer yet")
|
||||
__naked void in_bpf_prog_type_kprobe_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_timer] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = %[map_timer] ll; \
|
||||
r3 = 1; \
|
||||
l0_%=: call %[bpf_timer_init]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_timer_init),
|
||||
__imm_addr(map_timer)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT")
|
||||
__failure __msg("tracing progs cannot use bpf_timer yet")
|
||||
__naked void bpf_prog_type_perf_event_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_timer] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = %[map_timer] ll; \
|
||||
r3 = 1; \
|
||||
l0_%=: call %[bpf_timer_init]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_timer_init),
|
||||
__imm_addr(map_timer)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT")
|
||||
__failure __msg("tracing progs cannot use bpf_timer yet")
|
||||
__naked void in_bpf_prog_type_tracepoint_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_timer] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = %[map_timer] ll; \
|
||||
r3 = 1; \
|
||||
l0_%=: call %[bpf_timer_init]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_timer_init),
|
||||
__imm_addr(map_timer)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tracepoint")
|
||||
__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
|
||||
__failure __msg("tracing progs cannot use bpf_timer yet")
|
||||
__naked void bpf_prog_type_raw_tracepoint_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_timer] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
r2 = %[map_timer] ll; \
|
||||
r3 = 1; \
|
||||
l0_%=: call %[bpf_timer_init]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_timer_init),
|
||||
__imm_addr(map_timer)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("kprobe")
|
||||
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE")
|
||||
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
|
||||
__naked void in_bpf_prog_type_kprobe_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_spin_lock] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
call %[bpf_spin_lock]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_spin_lock),
|
||||
__imm_addr(map_spin_lock)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT")
|
||||
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
|
||||
__naked void in_bpf_prog_type_tracepoint_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_spin_lock] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
call %[bpf_spin_lock]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_spin_lock),
|
||||
__imm_addr(map_spin_lock)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT")
|
||||
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
|
||||
__naked void bpf_prog_type_perf_event_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_spin_lock] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
call %[bpf_spin_lock]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_spin_lock),
|
||||
__imm_addr(map_spin_lock)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tracepoint")
|
||||
__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT")
|
||||
__failure __msg("tracing progs cannot use bpf_spin_lock yet")
|
||||
__naked void bpf_prog_type_raw_tracepoint_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_spin_lock] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = r0; \
|
||||
call %[bpf_spin_lock]; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm(bpf_spin_lock),
|
||||
__imm_addr(map_spin_lock)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
1245
tools/testing/selftests/bpf/progs/verifier_helper_value_access.c
Normal file
1245
tools/testing/selftests/bpf/progs/verifier_helper_value_access.c
Normal file
File diff suppressed because it is too large
Load Diff
157
tools/testing/selftests/bpf/progs/verifier_int_ptr.c
Normal file
157
tools/testing/selftests/bpf/progs/verifier_int_ptr.c
Normal file
@ -0,0 +1,157 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/int_ptr.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("cgroup/sysctl")
|
||||
__description("ARG_PTR_TO_LONG uninitialized")
|
||||
__failure __msg("invalid indirect read from stack R4 off -16+0 size 8")
|
||||
__naked void arg_ptr_to_long_uninitialized(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* bpf_strtoul arg1 (buf) */ \
|
||||
r7 = r10; \
|
||||
r7 += -8; \
|
||||
r0 = 0x00303036; \
|
||||
*(u64*)(r7 + 0) = r0; \
|
||||
r1 = r7; \
|
||||
/* bpf_strtoul arg2 (buf_len) */ \
|
||||
r2 = 4; \
|
||||
/* bpf_strtoul arg3 (flags) */ \
|
||||
r3 = 0; \
|
||||
/* bpf_strtoul arg4 (res) */ \
|
||||
r7 += -8; \
|
||||
r4 = r7; \
|
||||
/* bpf_strtoul() */ \
|
||||
call %[bpf_strtoul]; \
|
||||
r0 = 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_strtoul)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("ARG_PTR_TO_LONG half-uninitialized")
|
||||
/* in privileged mode reads from uninitialized stack locations are permitted */
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("invalid indirect read from stack R4 off -16+4 size 8")
|
||||
__retval(0)
|
||||
__naked void ptr_to_long_half_uninitialized(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* bpf_strtoul arg1 (buf) */ \
|
||||
r7 = r10; \
|
||||
r7 += -8; \
|
||||
r0 = 0x00303036; \
|
||||
*(u64*)(r7 + 0) = r0; \
|
||||
r1 = r7; \
|
||||
/* bpf_strtoul arg2 (buf_len) */ \
|
||||
r2 = 4; \
|
||||
/* bpf_strtoul arg3 (flags) */ \
|
||||
r3 = 0; \
|
||||
/* bpf_strtoul arg4 (res) */ \
|
||||
r7 += -8; \
|
||||
*(u32*)(r7 + 0) = r0; \
|
||||
r4 = r7; \
|
||||
/* bpf_strtoul() */ \
|
||||
call %[bpf_strtoul]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_strtoul)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/sysctl")
|
||||
__description("ARG_PTR_TO_LONG misaligned")
|
||||
__failure __msg("misaligned stack access off (0x0; 0x0)+-20+0 size 8")
|
||||
__naked void arg_ptr_to_long_misaligned(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* bpf_strtoul arg1 (buf) */ \
|
||||
r7 = r10; \
|
||||
r7 += -8; \
|
||||
r0 = 0x00303036; \
|
||||
*(u64*)(r7 + 0) = r0; \
|
||||
r1 = r7; \
|
||||
/* bpf_strtoul arg2 (buf_len) */ \
|
||||
r2 = 4; \
|
||||
/* bpf_strtoul arg3 (flags) */ \
|
||||
r3 = 0; \
|
||||
/* bpf_strtoul arg4 (res) */ \
|
||||
r7 += -12; \
|
||||
r0 = 0; \
|
||||
*(u32*)(r7 + 0) = r0; \
|
||||
*(u64*)(r7 + 4) = r0; \
|
||||
r4 = r7; \
|
||||
/* bpf_strtoul() */ \
|
||||
call %[bpf_strtoul]; \
|
||||
r0 = 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_strtoul)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/sysctl")
|
||||
__description("ARG_PTR_TO_LONG size < sizeof(long)")
|
||||
__failure __msg("invalid indirect access to stack R4 off=-4 size=8")
|
||||
__naked void to_long_size_sizeof_long(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* bpf_strtoul arg1 (buf) */ \
|
||||
r7 = r10; \
|
||||
r7 += -16; \
|
||||
r0 = 0x00303036; \
|
||||
*(u64*)(r7 + 0) = r0; \
|
||||
r1 = r7; \
|
||||
/* bpf_strtoul arg2 (buf_len) */ \
|
||||
r2 = 4; \
|
||||
/* bpf_strtoul arg3 (flags) */ \
|
||||
r3 = 0; \
|
||||
/* bpf_strtoul arg4 (res) */ \
|
||||
r7 += 12; \
|
||||
*(u32*)(r7 + 0) = r0; \
|
||||
r4 = r7; \
|
||||
/* bpf_strtoul() */ \
|
||||
call %[bpf_strtoul]; \
|
||||
r0 = 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_strtoul)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/sysctl")
|
||||
__description("ARG_PTR_TO_LONG initialized")
|
||||
__success
|
||||
__naked void arg_ptr_to_long_initialized(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* bpf_strtoul arg1 (buf) */ \
|
||||
r7 = r10; \
|
||||
r7 += -8; \
|
||||
r0 = 0x00303036; \
|
||||
*(u64*)(r7 + 0) = r0; \
|
||||
r1 = r7; \
|
||||
/* bpf_strtoul arg2 (buf_len) */ \
|
||||
r2 = 4; \
|
||||
/* bpf_strtoul arg3 (flags) */ \
|
||||
r3 = 0; \
|
||||
/* bpf_strtoul arg4 (res) */ \
|
||||
r7 += -8; \
|
||||
*(u64*)(r7 + 0) = r0; \
|
||||
r4 = r7; \
|
||||
/* bpf_strtoul() */ \
|
||||
call %[bpf_strtoul]; \
|
||||
r0 = 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_strtoul)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
110
tools/testing/selftests/bpf/progs/verifier_ld_ind.c
Normal file
110
tools/testing/selftests/bpf/progs/verifier_ld_ind.c
Normal file
@ -0,0 +1,110 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/ld_ind.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "../../../include/linux/filter.h"
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("socket")
|
||||
__description("ld_ind: check calling conv, r1")
|
||||
__failure __msg("R1 !read_ok")
|
||||
__failure_unpriv
|
||||
__naked void ind_check_calling_conv_r1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
r1 = 1; \
|
||||
.8byte %[ld_ind]; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("ld_ind: check calling conv, r2")
|
||||
__failure __msg("R2 !read_ok")
|
||||
__failure_unpriv
|
||||
__naked void ind_check_calling_conv_r2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
r2 = 1; \
|
||||
.8byte %[ld_ind]; \
|
||||
r0 = r2; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("ld_ind: check calling conv, r3")
|
||||
__failure __msg("R3 !read_ok")
|
||||
__failure_unpriv
|
||||
__naked void ind_check_calling_conv_r3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
r3 = 1; \
|
||||
.8byte %[ld_ind]; \
|
||||
r0 = r3; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("ld_ind: check calling conv, r4")
|
||||
__failure __msg("R4 !read_ok")
|
||||
__failure_unpriv
|
||||
__naked void ind_check_calling_conv_r4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
r4 = 1; \
|
||||
.8byte %[ld_ind]; \
|
||||
r0 = r4; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("ld_ind: check calling conv, r5")
|
||||
__failure __msg("R5 !read_ok")
|
||||
__failure_unpriv
|
||||
__naked void ind_check_calling_conv_r5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
r5 = 1; \
|
||||
.8byte %[ld_ind]; \
|
||||
r0 = r5; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("ld_ind: check calling conv, r7")
|
||||
__success __success_unpriv __retval(1)
|
||||
__naked void ind_check_calling_conv_r7(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
r7 = 1; \
|
||||
.8byte %[ld_ind]; \
|
||||
r0 = r7; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
92
tools/testing/selftests/bpf/progs/verifier_leak_ptr.c
Normal file
92
tools/testing/selftests/bpf/progs/verifier_leak_ptr.c
Normal file
@ -0,0 +1,92 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/leak_ptr.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("leak pointer into ctx 1")
|
||||
__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed")
|
||||
__failure_unpriv __msg_unpriv("R2 leaks addr into mem")
|
||||
__naked void leak_pointer_into_ctx_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
*(u64*)(r1 + %[__sk_buff_cb_0]) = r0; \
|
||||
r2 = %[map_hash_8b] ll; \
|
||||
lock *(u64 *)(r1 + %[__sk_buff_cb_0]) += r2; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_addr(map_hash_8b),
|
||||
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("leak pointer into ctx 2")
|
||||
__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed")
|
||||
__failure_unpriv __msg_unpriv("R10 leaks addr into mem")
|
||||
__naked void leak_pointer_into_ctx_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
*(u64*)(r1 + %[__sk_buff_cb_0]) = r0; \
|
||||
lock *(u64 *)(r1 + %[__sk_buff_cb_0]) += r10; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("leak pointer into ctx 3")
|
||||
__success __failure_unpriv __msg_unpriv("R2 leaks addr into ctx")
|
||||
__retval(0)
|
||||
__naked void leak_pointer_into_ctx_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
r2 = %[map_hash_8b] ll; \
|
||||
*(u64*)(r1 + %[__sk_buff_cb_0]) = r2; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_addr(map_hash_8b),
|
||||
__imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0]))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("leak pointer into map val")
|
||||
__success __failure_unpriv __msg_unpriv("R6 leaks addr into mem")
|
||||
__retval(0)
|
||||
__naked void leak_pointer_into_map_val(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r3 = 0; \
|
||||
*(u64*)(r0 + 0) = r3; \
|
||||
lock *(u64 *)(r0 + 0) += r6; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
159
tools/testing/selftests/bpf/progs/verifier_map_ptr.c
Normal file
159
tools/testing/selftests/bpf/progs/verifier_map_ptr.c
Normal file
@ -0,0 +1,159 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/map_ptr.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define MAX_ENTRIES 11
|
||||
|
||||
struct test_val {
|
||||
unsigned int index;
|
||||
int foo[MAX_ENTRIES];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct test_val);
|
||||
} map_array_48b SEC(".maps");
|
||||
|
||||
struct other_val {
|
||||
long long foo;
|
||||
long long bar;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, struct other_val);
|
||||
} map_hash_16b SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("bpf_map_ptr: read with negative offset rejected")
|
||||
__failure __msg("R1 is bpf_array invalid negative access: off=-8")
|
||||
__failure_unpriv
|
||||
__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
|
||||
__naked void read_with_negative_offset_rejected(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 = %[map_array_48b] ll; \
|
||||
r6 = *(u64*)(r1 - 8); \
|
||||
r0 = 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_addr(map_array_48b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bpf_map_ptr: write rejected")
|
||||
__failure __msg("only read from bpf_array is supported")
|
||||
__failure_unpriv
|
||||
__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
|
||||
__naked void bpf_map_ptr_write_rejected(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_array_48b] ll; \
|
||||
*(u64*)(r1 + 0) = r2; \
|
||||
r0 = 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_addr(map_array_48b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bpf_map_ptr: read non-existent field rejected")
|
||||
__failure
|
||||
__msg("cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4")
|
||||
__failure_unpriv
|
||||
__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void read_non_existent_field_rejected(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = 0; \
|
||||
r1 = %[map_array_48b] ll; \
|
||||
r6 = *(u32*)(r1 + 1); \
|
||||
r0 = 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_addr(map_array_48b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bpf_map_ptr: read ops field accepted")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
|
||||
__retval(1)
|
||||
__naked void ptr_read_ops_field_accepted(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = 0; \
|
||||
r1 = %[map_array_48b] ll; \
|
||||
r6 = *(u64*)(r1 + 0); \
|
||||
r0 = 1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_addr(map_array_48b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bpf_map_ptr: r = 0, map_ptr = map_ptr + r")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("R1 has pointer with unsupported alu operation")
|
||||
__retval(0)
|
||||
__naked void map_ptr_map_ptr_r(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r0 = 0; \
|
||||
r1 = %[map_hash_16b] ll; \
|
||||
r1 += r0; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_16b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("bpf_map_ptr: r = 0, r = r + map_ptr")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("R0 has pointer with unsupported alu operation")
|
||||
__retval(0)
|
||||
__naked void _0_r_r_map_ptr(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
r0 = %[map_hash_16b] ll; \
|
||||
r1 += r0; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_16b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
110
tools/testing/selftests/bpf/progs/verifier_map_ret_val.c
Normal file
110
tools/testing/selftests/bpf/progs/verifier_map_ret_val.c
Normal file
@ -0,0 +1,110 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/map_ret_val.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "../../../include/linux/filter.h"
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid map_fd for function call")
|
||||
__failure __msg("fd 0 is not pointing to valid bpf_map")
|
||||
__failure_unpriv
|
||||
__naked void map_fd_for_function_call(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 0; \
|
||||
*(u64*)(r10 - 8) = r2; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
.8byte %[ld_map_fd]; \
|
||||
.8byte 0; \
|
||||
call %[bpf_map_delete_elem]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_delete_elem),
|
||||
__imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 0))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("don't check return value before access")
|
||||
__failure __msg("R0 invalid mem access 'map_value_or_null'")
|
||||
__failure_unpriv
|
||||
__naked void check_return_value_before_access(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("access memory with incorrect alignment")
|
||||
__failure __msg("misaligned value access")
|
||||
__failure_unpriv
|
||||
__flag(BPF_F_STRICT_ALIGNMENT)
|
||||
__naked void access_memory_with_incorrect_alignment_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r0 + 4) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("sometimes access memory with incorrect alignment")
|
||||
__failure __msg("R0 invalid mem access")
|
||||
__msg_unpriv("R0 leaks addr")
|
||||
__flag(BPF_F_STRICT_ALIGNMENT)
|
||||
__naked void access_memory_with_incorrect_alignment_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
exit; \
|
||||
l0_%=: r1 = 1; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
410
tools/testing/selftests/bpf/progs/verifier_masking.c
Normal file
410
tools/testing/selftests/bpf/progs/verifier_masking.c
Normal file
@ -0,0 +1,410 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/masking.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 1")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 5; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 5 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 2")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 1; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 1 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 3")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0xffffffff; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 0xffffffff - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 4")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0xffffffff; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 1 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 5")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = -1; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 1 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 6")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = -1; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 0xffffffff - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 7")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_7(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 5; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 5 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 8")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_8(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 1; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 1 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 9")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_9(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0xffffffff; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 0xffffffff - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 10")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_10(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0xffffffff; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 1 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 11")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_11(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = -1; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 1 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test out of bounds 12")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void test_out_of_bounds_12(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = -1; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 0xffffffff - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test in bounds 1")
|
||||
__success __success_unpriv __retval(4)
|
||||
__naked void masking_test_in_bounds_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 4; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 5 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test in bounds 2")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void masking_test_in_bounds_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 0xffffffff - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test in bounds 3")
|
||||
__success __success_unpriv __retval(0xfffffffe)
|
||||
__naked void masking_test_in_bounds_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0xfffffffe; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 0xffffffff - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test in bounds 4")
|
||||
__success __success_unpriv __retval(0xabcde)
|
||||
__naked void masking_test_in_bounds_4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0xabcde; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 0xabcdef - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test in bounds 5")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void masking_test_in_bounds_5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 0; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 1 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test in bounds 6")
|
||||
__success __success_unpriv __retval(46)
|
||||
__naked void masking_test_in_bounds_6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w1 = 46; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r1; \
|
||||
r2 |= r1; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r1 &= r2; \
|
||||
r0 = r1; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 47 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test in bounds 7")
|
||||
__success __success_unpriv __retval(46)
|
||||
__naked void masking_test_in_bounds_7(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r3 = -46; \
|
||||
r3 *= -1; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r3; \
|
||||
r2 |= r3; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r3 &= r2; \
|
||||
r0 = r3; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 47 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("masking, test in bounds 8")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void masking_test_in_bounds_8(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r3 = -47; \
|
||||
r3 *= -1; \
|
||||
w2 = %[__imm_0]; \
|
||||
r2 -= r3; \
|
||||
r2 |= r3; \
|
||||
r2 = -r2; \
|
||||
r2 s>>= 63; \
|
||||
r3 &= r2; \
|
||||
r0 = r3; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, 47 - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
284
tools/testing/selftests/bpf/progs/verifier_meta_access.c
Normal file
284
tools/testing/selftests/bpf/progs/verifier_meta_access.c
Normal file
@ -0,0 +1,284 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/meta_access.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test1")
|
||||
__success __retval(0)
|
||||
__naked void meta_access_test1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r0 = r2; \
|
||||
r0 += 8; \
|
||||
if r0 > r3 goto l0_%=; \
|
||||
r0 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test2")
|
||||
__failure __msg("invalid access to packet, off=-8")
|
||||
__naked void meta_access_test2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r0 = r2; \
|
||||
r0 -= 8; \
|
||||
r4 = r2; \
|
||||
r4 += 8; \
|
||||
if r4 > r3 goto l0_%=; \
|
||||
r0 = *(u8*)(r0 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test3")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void meta_access_test3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
|
||||
r0 = r2; \
|
||||
r0 += 8; \
|
||||
if r0 > r3 goto l0_%=; \
|
||||
r0 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test4")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void meta_access_test4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
|
||||
r4 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r0 = r4; \
|
||||
r0 += 8; \
|
||||
if r0 > r3 goto l0_%=; \
|
||||
r0 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test5")
|
||||
__failure __msg("R3 !read_ok")
|
||||
__naked void meta_access_test5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r4 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r0 = r3; \
|
||||
r0 += 8; \
|
||||
if r0 > r4 goto l0_%=; \
|
||||
r2 = -8; \
|
||||
call %[bpf_xdp_adjust_meta]; \
|
||||
r0 = *(u8*)(r3 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_xdp_adjust_meta),
|
||||
__imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test6")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void meta_access_test6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r0 = r3; \
|
||||
r0 += 8; \
|
||||
r4 = r2; \
|
||||
r4 += 8; \
|
||||
if r4 > r0 goto l0_%=; \
|
||||
r0 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test7")
|
||||
__success __retval(0)
|
||||
__naked void meta_access_test7(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r0 = r3; \
|
||||
r0 += 8; \
|
||||
r4 = r2; \
|
||||
r4 += 8; \
|
||||
if r4 > r3 goto l0_%=; \
|
||||
r0 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test8")
|
||||
__success __retval(0)
|
||||
__naked void meta_access_test8(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r4 = r2; \
|
||||
r4 += 0xFFFF; \
|
||||
if r4 > r3 goto l0_%=; \
|
||||
r0 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test9")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void meta_access_test9(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r4 = r2; \
|
||||
r4 += 0xFFFF; \
|
||||
r4 += 1; \
|
||||
if r4 > r3 goto l0_%=; \
|
||||
r0 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test10")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void meta_access_test10(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r4 = *(u32*)(r1 + %[xdp_md_data_end]); \
|
||||
r5 = 42; \
|
||||
r6 = 24; \
|
||||
*(u64*)(r10 - 8) = r5; \
|
||||
lock *(u64 *)(r10 - 8) += r6; \
|
||||
r5 = *(u64*)(r10 - 8); \
|
||||
if r5 > 100 goto l0_%=; \
|
||||
r3 += r5; \
|
||||
r5 = r3; \
|
||||
r6 = r2; \
|
||||
r6 += 8; \
|
||||
if r6 > r5 goto l0_%=; \
|
||||
r2 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test11")
|
||||
__success __retval(0)
|
||||
__naked void meta_access_test11(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r5 = 42; \
|
||||
r6 = 24; \
|
||||
*(u64*)(r10 - 8) = r5; \
|
||||
lock *(u64 *)(r10 - 8) += r6; \
|
||||
r5 = *(u64*)(r10 - 8); \
|
||||
if r5 > 100 goto l0_%=; \
|
||||
r2 += r5; \
|
||||
r5 = r2; \
|
||||
r6 = r2; \
|
||||
r6 += 8; \
|
||||
if r6 > r3 goto l0_%=; \
|
||||
r5 = *(u8*)(r5 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("meta access, test12")
|
||||
__success __retval(0)
|
||||
__naked void meta_access_test12(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r4 = *(u32*)(r1 + %[xdp_md_data_end]); \
|
||||
r5 = r3; \
|
||||
r5 += 16; \
|
||||
if r5 > r4 goto l0_%=; \
|
||||
r0 = *(u8*)(r3 + 0); \
|
||||
r5 = r2; \
|
||||
r5 += 16; \
|
||||
if r5 > r3 goto l0_%=; \
|
||||
r0 = *(u8*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
|
||||
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
371
tools/testing/selftests/bpf/progs/verifier_raw_stack.c
Normal file
371
tools/testing/selftests/bpf/progs/verifier_raw_stack.c
Normal file
@ -0,0 +1,371 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/raw_stack.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: no skb_load_bytes")
|
||||
__failure __msg("invalid read from stack R6 off=-8 size=8")
|
||||
__naked void stack_no_skb_load_bytes(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -8; \
|
||||
r3 = r6; \
|
||||
r4 = 8; \
|
||||
/* Call to skb_load_bytes() omitted. */ \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, negative len")
|
||||
__failure __msg("R4 min value is negative")
|
||||
__naked void skb_load_bytes_negative_len(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -8; \
|
||||
r3 = r6; \
|
||||
r4 = -8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, negative len 2")
|
||||
__failure __msg("R4 min value is negative")
|
||||
__naked void load_bytes_negative_len_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -8; \
|
||||
r3 = r6; \
|
||||
r4 = %[__imm_0]; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes),
|
||||
__imm_const(__imm_0, ~0)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, zero len")
|
||||
__failure __msg("invalid zero-sized read")
|
||||
__naked void skb_load_bytes_zero_len(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -8; \
|
||||
r3 = r6; \
|
||||
r4 = 0; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, no init")
|
||||
__success __retval(0)
|
||||
__naked void skb_load_bytes_no_init(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -8; \
|
||||
r3 = r6; \
|
||||
r4 = 8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, init")
|
||||
__success __retval(0)
|
||||
__naked void stack_skb_load_bytes_init(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -8; \
|
||||
r3 = 0xcafe; \
|
||||
*(u64*)(r6 + 0) = r3; \
|
||||
r3 = r6; \
|
||||
r4 = 8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, spilled regs around bounds")
|
||||
__success __retval(0)
|
||||
__naked void bytes_spilled_regs_around_bounds(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -16; \
|
||||
*(u64*)(r6 - 8) = r1; \
|
||||
*(u64*)(r6 + 8) = r1; \
|
||||
r3 = r6; \
|
||||
r4 = 8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 - 8); \
|
||||
r2 = *(u64*)(r6 + 8); \
|
||||
r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
|
||||
r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
|
||||
r0 += r2; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes),
|
||||
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
|
||||
__imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, spilled regs corruption")
|
||||
__failure __msg("R0 invalid mem access 'scalar'")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void load_bytes_spilled_regs_corruption(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -8; \
|
||||
*(u64*)(r6 + 0) = r1; \
|
||||
r3 = r6; \
|
||||
r4 = 8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes),
|
||||
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, spilled regs corruption 2")
|
||||
__failure __msg("R3 invalid mem access 'scalar'")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void bytes_spilled_regs_corruption_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -16; \
|
||||
*(u64*)(r6 - 8) = r1; \
|
||||
*(u64*)(r6 + 0) = r1; \
|
||||
*(u64*)(r6 + 8) = r1; \
|
||||
r3 = r6; \
|
||||
r4 = 8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 - 8); \
|
||||
r2 = *(u64*)(r6 + 8); \
|
||||
r3 = *(u64*)(r6 + 0); \
|
||||
r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
|
||||
r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
|
||||
r0 += r2; \
|
||||
r3 = *(u32*)(r3 + %[__sk_buff_pkt_type]); \
|
||||
r0 += r3; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes),
|
||||
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
|
||||
__imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
|
||||
__imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, spilled regs + data")
|
||||
__success __retval(0)
|
||||
__naked void load_bytes_spilled_regs_data(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -16; \
|
||||
*(u64*)(r6 - 8) = r1; \
|
||||
*(u64*)(r6 + 0) = r1; \
|
||||
*(u64*)(r6 + 8) = r1; \
|
||||
r3 = r6; \
|
||||
r4 = 8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 - 8); \
|
||||
r2 = *(u64*)(r6 + 8); \
|
||||
r3 = *(u64*)(r6 + 0); \
|
||||
r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
|
||||
r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
|
||||
r0 += r2; \
|
||||
r0 += r3; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes),
|
||||
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
|
||||
__imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, invalid access 1")
|
||||
__failure __msg("invalid indirect access to stack R3 off=-513 size=8")
|
||||
__naked void load_bytes_invalid_access_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -513; \
|
||||
r3 = r6; \
|
||||
r4 = 8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, invalid access 2")
|
||||
__failure __msg("invalid indirect access to stack R3 off=-1 size=8")
|
||||
__naked void load_bytes_invalid_access_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -1; \
|
||||
r3 = r6; \
|
||||
r4 = 8; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, invalid access 3")
|
||||
__failure __msg("R4 min value is negative")
|
||||
__naked void load_bytes_invalid_access_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += 0xffffffff; \
|
||||
r3 = r6; \
|
||||
r4 = 0xffffffff; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, invalid access 4")
|
||||
__failure
|
||||
__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
|
||||
__naked void load_bytes_invalid_access_4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -1; \
|
||||
r3 = r6; \
|
||||
r4 = 0x7fffffff; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, invalid access 5")
|
||||
__failure
|
||||
__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
|
||||
__naked void load_bytes_invalid_access_5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -512; \
|
||||
r3 = r6; \
|
||||
r4 = 0x7fffffff; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, invalid access 6")
|
||||
__failure __msg("invalid zero-sized read")
|
||||
__naked void load_bytes_invalid_access_6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -512; \
|
||||
r3 = r6; \
|
||||
r4 = 0; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("raw_stack: skb_load_bytes, large access")
|
||||
__success __retval(0)
|
||||
__naked void skb_load_bytes_large_access(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 4; \
|
||||
r6 = r10; \
|
||||
r6 += -512; \
|
||||
r3 = r6; \
|
||||
r4 = 512; \
|
||||
call %[bpf_skb_load_bytes]; \
|
||||
r0 = *(u64*)(r6 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_skb_load_bytes)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
50
tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c
Normal file
50
tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c
Normal file
@ -0,0 +1,50 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/raw_tp_writable.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
SEC("raw_tracepoint.w")
|
||||
__description("raw_tracepoint_writable: reject variable offset")
|
||||
__failure
|
||||
__msg("R6 invalid variable buffer offset: off=0, var_off=(0x0; 0xffffffff)")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void tracepoint_writable_reject_variable_offset(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* r6 is our tp buffer */ \
|
||||
r6 = *(u64*)(r1 + 0); \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
/* move the key (== 0) to r10-8 */ \
|
||||
w0 = 0; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
*(u64*)(r2 + 0) = r0; \
|
||||
/* lookup in the map */ \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
/* exit clean if null */ \
|
||||
if r0 != 0 goto l0_%=; \
|
||||
exit; \
|
||||
l0_%=: /* shift the buffer pointer to a variable location */\
|
||||
r0 = *(u32*)(r0 + 0); \
|
||||
r6 += r0; \
|
||||
/* clobber whatever's there */ \
|
||||
r7 = 4242; \
|
||||
*(u64*)(r6 + 0) = r7; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
131
tools/testing/selftests/bpf/progs/verifier_ringbuf.c
Normal file
131
tools/testing/selftests/bpf/progs/verifier_ringbuf.c
Normal file
@ -0,0 +1,131 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/ringbuf.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_RINGBUF);
|
||||
__uint(max_entries, 4096);
|
||||
} map_ringbuf SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("ringbuf: invalid reservation offset 1")
|
||||
__failure __msg("R1 must have zero offset when passed to release func")
|
||||
__failure_unpriv
|
||||
__naked void ringbuf_invalid_reservation_offset_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* reserve 8 byte ringbuf memory */ \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r1 = %[map_ringbuf] ll; \
|
||||
r2 = 8; \
|
||||
r3 = 0; \
|
||||
call %[bpf_ringbuf_reserve]; \
|
||||
/* store a pointer to the reserved memory in R6 */\
|
||||
r6 = r0; \
|
||||
/* check whether the reservation was successful */\
|
||||
if r0 == 0 goto l0_%=; \
|
||||
/* spill R6(mem) into the stack */ \
|
||||
*(u64*)(r10 - 8) = r6; \
|
||||
/* fill it back in R7 */ \
|
||||
r7 = *(u64*)(r10 - 8); \
|
||||
/* should be able to access *(R7) = 0 */ \
|
||||
r1 = 0; \
|
||||
*(u64*)(r7 + 0) = r1; \
|
||||
/* submit the reserved ringbuf memory */ \
|
||||
r1 = r7; \
|
||||
/* add invalid offset to reserved ringbuf memory */\
|
||||
r1 += 0xcafe; \
|
||||
r2 = 0; \
|
||||
call %[bpf_ringbuf_submit]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ringbuf_reserve),
|
||||
__imm(bpf_ringbuf_submit),
|
||||
__imm_addr(map_ringbuf)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("ringbuf: invalid reservation offset 2")
|
||||
__failure __msg("R7 min value is outside of the allowed memory range")
|
||||
__failure_unpriv
|
||||
__naked void ringbuf_invalid_reservation_offset_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* reserve 8 byte ringbuf memory */ \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r1 = %[map_ringbuf] ll; \
|
||||
r2 = 8; \
|
||||
r3 = 0; \
|
||||
call %[bpf_ringbuf_reserve]; \
|
||||
/* store a pointer to the reserved memory in R6 */\
|
||||
r6 = r0; \
|
||||
/* check whether the reservation was successful */\
|
||||
if r0 == 0 goto l0_%=; \
|
||||
/* spill R6(mem) into the stack */ \
|
||||
*(u64*)(r10 - 8) = r6; \
|
||||
/* fill it back in R7 */ \
|
||||
r7 = *(u64*)(r10 - 8); \
|
||||
/* add invalid offset to reserved ringbuf memory */\
|
||||
r7 += 0xcafe; \
|
||||
/* should be able to access *(R7) = 0 */ \
|
||||
r1 = 0; \
|
||||
*(u64*)(r7 + 0) = r1; \
|
||||
/* submit the reserved ringbuf memory */ \
|
||||
r1 = r7; \
|
||||
r2 = 0; \
|
||||
call %[bpf_ringbuf_submit]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ringbuf_reserve),
|
||||
__imm(bpf_ringbuf_submit),
|
||||
__imm_addr(map_ringbuf)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("ringbuf: check passing rb mem to helpers")
|
||||
__success __retval(0)
|
||||
__naked void passing_rb_mem_to_helpers(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
/* reserve 8 byte ringbuf memory */ \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r1 = %[map_ringbuf] ll; \
|
||||
r2 = 8; \
|
||||
r3 = 0; \
|
||||
call %[bpf_ringbuf_reserve]; \
|
||||
r7 = r0; \
|
||||
/* check whether the reservation was successful */\
|
||||
if r0 != 0 goto l0_%=; \
|
||||
exit; \
|
||||
l0_%=: /* pass allocated ring buffer memory to fib lookup */\
|
||||
r1 = r6; \
|
||||
r2 = r0; \
|
||||
r3 = 8; \
|
||||
r4 = 0; \
|
||||
call %[bpf_fib_lookup]; \
|
||||
/* submit the ringbuf memory */ \
|
||||
r1 = r7; \
|
||||
r2 = 0; \
|
||||
call %[bpf_ringbuf_submit]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_fib_lookup),
|
||||
__imm(bpf_ringbuf_reserve),
|
||||
__imm(bpf_ringbuf_submit),
|
||||
__imm_addr(map_ringbuf)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
374
tools/testing/selftests/bpf/progs/verifier_spill_fill.c
Normal file
374
tools/testing/selftests/bpf/progs/verifier_spill_fill.c
Normal file
@ -0,0 +1,374 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_RINGBUF);
|
||||
__uint(max_entries, 4096);
|
||||
} map_ringbuf SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("check valid spill/fill")
|
||||
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__retval(POINTER_VALUE)
|
||||
__naked void check_valid_spill_fill(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* spill R1(ctx) into stack */ \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
/* fill it back into R2 */ \
|
||||
r2 = *(u64*)(r10 - 8); \
|
||||
/* should be able to access R0 = *(R2 + 8) */ \
|
||||
/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\
|
||||
r0 = r2; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check valid spill/fill, skb mark")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void valid_spill_fill_skb_mark(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r6 = r1; \
|
||||
*(u64*)(r10 - 8) = r6; \
|
||||
r0 = *(u64*)(r10 - 8); \
|
||||
r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check valid spill/fill, ptr to mem")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void spill_fill_ptr_to_mem(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* reserve 8 byte ringbuf memory */ \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r1 = %[map_ringbuf] ll; \
|
||||
r2 = 8; \
|
||||
r3 = 0; \
|
||||
call %[bpf_ringbuf_reserve]; \
|
||||
/* store a pointer to the reserved memory in R6 */\
|
||||
r6 = r0; \
|
||||
/* check whether the reservation was successful */\
|
||||
if r0 == 0 goto l0_%=; \
|
||||
/* spill R6(mem) into the stack */ \
|
||||
*(u64*)(r10 - 8) = r6; \
|
||||
/* fill it back in R7 */ \
|
||||
r7 = *(u64*)(r10 - 8); \
|
||||
/* should be able to access *(R7) = 0 */ \
|
||||
r1 = 0; \
|
||||
*(u64*)(r7 + 0) = r1; \
|
||||
/* submit the reserved ringbuf memory */ \
|
||||
r1 = r7; \
|
||||
r2 = 0; \
|
||||
call %[bpf_ringbuf_submit]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ringbuf_reserve),
|
||||
__imm(bpf_ringbuf_submit),
|
||||
__imm_addr(map_ringbuf)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check with invalid reg offset 0")
|
||||
__failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited")
|
||||
__failure_unpriv
|
||||
__naked void with_invalid_reg_offset_0(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* reserve 8 byte ringbuf memory */ \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r1 = %[map_ringbuf] ll; \
|
||||
r2 = 8; \
|
||||
r3 = 0; \
|
||||
call %[bpf_ringbuf_reserve]; \
|
||||
/* store a pointer to the reserved memory in R6 */\
|
||||
r6 = r0; \
|
||||
/* add invalid offset to memory or NULL */ \
|
||||
r0 += 1; \
|
||||
/* check whether the reservation was successful */\
|
||||
if r0 == 0 goto l0_%=; \
|
||||
/* should not be able to access *(R7) = 0 */ \
|
||||
r1 = 0; \
|
||||
*(u32*)(r6 + 0) = r1; \
|
||||
/* submit the reserved ringbuf memory */ \
|
||||
r1 = r6; \
|
||||
r2 = 0; \
|
||||
call %[bpf_ringbuf_submit]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ringbuf_reserve),
|
||||
__imm(bpf_ringbuf_submit),
|
||||
__imm_addr(map_ringbuf)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check corrupted spill/fill")
|
||||
__failure __msg("R0 invalid mem access 'scalar'")
|
||||
__msg_unpriv("attempt to corrupt spilled")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void check_corrupted_spill_fill(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* spill R1(ctx) into stack */ \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
/* mess up with R1 pointer on stack */ \
|
||||
r0 = 0x23; \
|
||||
*(u8*)(r10 - 7) = r0; \
|
||||
/* fill back into R0 is fine for priv. \
|
||||
* R0 now becomes SCALAR_VALUE. \
|
||||
*/ \
|
||||
r0 = *(u64*)(r10 - 8); \
|
||||
/* Load from R0 should fail. */ \
|
||||
r0 = *(u64*)(r0 + 8); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check corrupted spill/fill, LSB")
|
||||
__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
|
||||
__retval(POINTER_VALUE)
|
||||
__naked void check_corrupted_spill_fill_lsb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r0 = 0xcafe; \
|
||||
*(u16*)(r10 - 8) = r0; \
|
||||
r0 = *(u64*)(r10 - 8); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("check corrupted spill/fill, MSB")
|
||||
__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
|
||||
__retval(POINTER_VALUE)
|
||||
__naked void check_corrupted_spill_fill_msb(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r0 = 0x12345678; \
|
||||
*(u32*)(r10 - 4) = r0; \
|
||||
r0 = *(u64*)(r10 - 8); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("Spill and refill a u32 const scalar. Offset to skb->data")
|
||||
__success __retval(0)
|
||||
__naked void scalar_offset_to_skb_data_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
w4 = 20; \
|
||||
*(u32*)(r10 - 8) = r4; \
|
||||
r4 = *(u32*)(r10 - 8); \
|
||||
r0 = r2; \
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ \
|
||||
r0 += r4; \
|
||||
/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
|
||||
if r0 > r3 goto l0_%=; \
|
||||
/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\
|
||||
r0 = *(u32*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("Spill a u32 const, refill from another half of the uninit u32 from the stack")
|
||||
/* in privileged mode reads from uninitialized stack locations are permitted */
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("invalid read from stack off -4+0 size 4")
|
||||
__retval(0)
|
||||
__naked void uninit_u32_from_the_stack(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w4 = 20; \
|
||||
*(u32*)(r10 - 8) = r4; \
|
||||
/* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ \
|
||||
r4 = *(u32*)(r10 - 4); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("Spill a u32 const scalar. Refill as u16. Offset to skb->data")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void u16_offset_to_skb_data(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
w4 = 20; \
|
||||
*(u32*)(r10 - 8) = r4; \
|
||||
r4 = *(u16*)(r10 - 8); \
|
||||
r0 = r2; \
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
|
||||
r0 += r4; \
|
||||
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
|
||||
if r0 > r3 goto l0_%=; \
|
||||
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
|
||||
r0 = *(u32*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("Spill u32 const scalars. Refill as u64. Offset to skb->data")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void u64_offset_to_skb_data(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
w6 = 0; \
|
||||
w7 = 20; \
|
||||
*(u32*)(r10 - 4) = r6; \
|
||||
*(u32*)(r10 - 8) = r7; \
|
||||
r4 = *(u16*)(r10 - 8); \
|
||||
r0 = r2; \
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
|
||||
r0 += r4; \
|
||||
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
|
||||
if r0 > r3 goto l0_%=; \
|
||||
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
|
||||
r0 = *(u32*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void _6_offset_to_skb_data(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
w4 = 20; \
|
||||
*(u32*)(r10 - 8) = r4; \
|
||||
r4 = *(u16*)(r10 - 6); \
|
||||
r0 = r2; \
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
|
||||
r0 += r4; \
|
||||
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
|
||||
if r0 > r3 goto l0_%=; \
|
||||
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
|
||||
r0 = *(u32*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data")
|
||||
__failure __msg("invalid access to packet")
|
||||
__naked void addr_offset_to_skb_data(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
w4 = 20; \
|
||||
*(u32*)(r10 - 8) = r4; \
|
||||
*(u32*)(r10 - 4) = r4; \
|
||||
r4 = *(u32*)(r10 - 4); \
|
||||
r0 = r2; \
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\
|
||||
r0 += r4; \
|
||||
/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
|
||||
if r0 > r3 goto l0_%=; \
|
||||
/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
|
||||
r0 = *(u32*)(r2 + 0); \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("Spill and refill a umax=40 bounded scalar. Offset to skb->data")
|
||||
__success __retval(0)
|
||||
__naked void scalar_offset_to_skb_data_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
|
||||
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
|
||||
r4 = *(u64*)(r1 + %[__sk_buff_tstamp]); \
|
||||
if r4 <= 40 goto l0_%=; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
l0_%=: /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ \
|
||||
*(u32*)(r10 - 8) = r4; \
|
||||
/* r4 = (*u32 *)(r10 - 8) */ \
|
||||
r4 = *(u32*)(r10 - 8); \
|
||||
/* r2 += r4 R2=pkt R4=umax=40 */ \
|
||||
r2 += r4; \
|
||||
/* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ \
|
||||
r0 = r2; \
|
||||
/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ \
|
||||
r2 += 20; \
|
||||
/* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\
|
||||
if r2 > r3 goto l1_%=; \
|
||||
/* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\
|
||||
r0 = *(u32*)(r0 + 0); \
|
||||
l1_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
|
||||
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
|
||||
__imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("Spill a u32 scalar at fp-4 and then at fp-8")
|
||||
__success __retval(0)
|
||||
__naked void and_then_at_fp_8(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
w4 = 4321; \
|
||||
*(u32*)(r10 - 4) = r4; \
|
||||
*(u32*)(r10 - 8) = r4; \
|
||||
r4 = *(u64*)(r10 - 8); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
484
tools/testing/selftests/bpf/progs/verifier_stack_ptr.c
Normal file
484
tools/testing/selftests/bpf/progs/verifier_stack_ptr.c
Normal file
@ -0,0 +1,484 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/stack_ptr.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <limits.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define MAX_ENTRIES 11
|
||||
|
||||
struct test_val {
|
||||
unsigned int index;
|
||||
int foo[MAX_ENTRIES];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct test_val);
|
||||
} map_array_48b SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK store/load")
|
||||
__success __success_unpriv __retval(0xfaceb00c)
|
||||
__naked void ptr_to_stack_store_load(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -10; \
|
||||
r0 = 0xfaceb00c; \
|
||||
*(u64*)(r1 + 2) = r0; \
|
||||
r0 = *(u64*)(r1 + 2); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK store/load - bad alignment on off")
|
||||
__failure __msg("misaligned stack access off (0x0; 0x0)+-8+2 size 8")
|
||||
__failure_unpriv
|
||||
__naked void load_bad_alignment_on_off(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -8; \
|
||||
r0 = 0xfaceb00c; \
|
||||
*(u64*)(r1 + 2) = r0; \
|
||||
r0 = *(u64*)(r1 + 2); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK store/load - bad alignment on reg")
|
||||
__failure __msg("misaligned stack access off (0x0; 0x0)+-10+8 size 8")
|
||||
__failure_unpriv
|
||||
__naked void load_bad_alignment_on_reg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -10; \
|
||||
r0 = 0xfaceb00c; \
|
||||
*(u64*)(r1 + 8) = r0; \
|
||||
r0 = *(u64*)(r1 + 8); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK store/load - out of bounds low")
|
||||
__failure __msg("invalid write to stack R1 off=-79992 size=8")
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__naked void load_out_of_bounds_low(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -80000; \
|
||||
r0 = 0xfaceb00c; \
|
||||
*(u64*)(r1 + 8) = r0; \
|
||||
r0 = *(u64*)(r1 + 8); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK store/load - out of bounds high")
|
||||
__failure __msg("invalid write to stack R1 off=0 size=8")
|
||||
__failure_unpriv
|
||||
__naked void load_out_of_bounds_high(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -8; \
|
||||
r0 = 0xfaceb00c; \
|
||||
*(u64*)(r1 + 8) = r0; \
|
||||
r0 = *(u64*)(r1 + 8); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check high 1")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void to_stack_check_high_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -1; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r0 = *(u8*)(r1 + 0); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check high 2")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void to_stack_check_high_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 - 1) = r0; \
|
||||
r0 = *(u8*)(r1 - 1); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check high 3")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__retval(42)
|
||||
__naked void to_stack_check_high_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += 0; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 - 1) = r0; \
|
||||
r0 = *(u8*)(r1 - 1); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check high 4")
|
||||
__failure __msg("invalid write to stack R1 off=0 size=1")
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__naked void to_stack_check_high_4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += 0; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r0 = *(u8*)(r1 + 0); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check high 5")
|
||||
__failure __msg("invalid write to stack R1")
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__naked void to_stack_check_high_5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += %[__imm_0]; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r0 = *(u8*)(r1 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, (1 << 29) - 1)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check high 6")
|
||||
__failure __msg("invalid write to stack")
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__naked void to_stack_check_high_6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += %[__imm_0]; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + %[shrt_max]) = r0; \
|
||||
r0 = *(u8*)(r1 + %[shrt_max]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, (1 << 29) - 1),
|
||||
__imm_const(shrt_max, SHRT_MAX)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check high 7")
|
||||
__failure __msg("fp pointer offset")
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__naked void to_stack_check_high_7(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += %[__imm_0]; \
|
||||
r1 += %[__imm_0]; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + %[shrt_max]) = r0; \
|
||||
r0 = *(u8*)(r1 + %[shrt_max]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, (1 << 29) - 1),
|
||||
__imm_const(shrt_max, SHRT_MAX)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check low 1")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void to_stack_check_low_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -512; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r0 = *(u8*)(r1 + 0); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check low 2")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__retval(42)
|
||||
__naked void to_stack_check_low_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -513; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 1) = r0; \
|
||||
r0 = *(u8*)(r1 + 1); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check low 3")
|
||||
__failure __msg("invalid write to stack R1 off=-513 size=1")
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__naked void to_stack_check_low_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -513; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r0 = *(u8*)(r1 + 0); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check low 4")
|
||||
__failure __msg("math between fp pointer")
|
||||
__failure_unpriv
|
||||
__naked void to_stack_check_low_4(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += %[int_min]; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r0 = *(u8*)(r1 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(int_min, INT_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check low 5")
|
||||
__failure __msg("invalid write to stack")
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__naked void to_stack_check_low_5(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += %[__imm_0]; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r0 = *(u8*)(r1 + 0); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, -((1 << 29) - 1))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check low 6")
|
||||
__failure __msg("invalid write to stack")
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__naked void to_stack_check_low_6(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += %[__imm_0]; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 %[shrt_min]) = r0; \
|
||||
r0 = *(u8*)(r1 %[shrt_min]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, -((1 << 29) - 1)),
|
||||
__imm_const(shrt_min, SHRT_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK check low 7")
|
||||
__failure __msg("fp pointer offset")
|
||||
__msg_unpriv("R1 stack pointer arithmetic goes out of range")
|
||||
__naked void to_stack_check_low_7(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += %[__imm_0]; \
|
||||
r1 += %[__imm_0]; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 %[shrt_min]) = r0; \
|
||||
r0 = *(u8*)(r1 %[shrt_min]); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_const(__imm_0, -((1 << 29) - 1)),
|
||||
__imm_const(shrt_min, SHRT_MIN)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK mixed reg/k, 1")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void stack_mixed_reg_k_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -3; \
|
||||
r2 = -3; \
|
||||
r1 += r2; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r0 = *(u8*)(r1 + 0); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK mixed reg/k, 2")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void stack_mixed_reg_k_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 16) = r0; \
|
||||
r1 = r10; \
|
||||
r1 += -3; \
|
||||
r2 = -3; \
|
||||
r1 += r2; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r5 = r10; \
|
||||
r0 = *(u8*)(r5 - 6); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK mixed reg/k, 3")
|
||||
__success __success_unpriv __retval(-3)
|
||||
__naked void stack_mixed_reg_k_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r1 += -3; \
|
||||
r2 = -3; \
|
||||
r1 += r2; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r0 = r2; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("PTR_TO_STACK reg")
|
||||
__success __success_unpriv __retval(42)
|
||||
__naked void ptr_to_stack_reg(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = r10; \
|
||||
r2 = -3; \
|
||||
r1 += r2; \
|
||||
r0 = 42; \
|
||||
*(u8*)(r1 + 0) = r0; \
|
||||
r0 = *(u8*)(r1 + 0); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("stack pointer arithmetic")
|
||||
__success __success_unpriv __retval(0)
|
||||
__naked void stack_pointer_arithmetic(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 4; \
|
||||
goto l0_%=; \
|
||||
l0_%=: r7 = r10; \
|
||||
r7 += -10; \
|
||||
r7 += -10; \
|
||||
r2 = r7; \
|
||||
r2 += r1; \
|
||||
r0 = 0; \
|
||||
*(u32*)(r2 + 4) = r0; \
|
||||
r2 = r7; \
|
||||
r2 += 8; \
|
||||
r0 = 0; \
|
||||
*(u32*)(r2 + 4) = r0; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("store PTR_TO_STACK in R10 to array map using BPF_B")
|
||||
__success __retval(42)
|
||||
__naked void array_map_using_bpf_b(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Load pointer to map. */ \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_array_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 != 0 goto l0_%=; \
|
||||
r0 = 2; \
|
||||
exit; \
|
||||
l0_%=: r1 = r0; \
|
||||
/* Copy R10 to R9. */ \
|
||||
r9 = r10; \
|
||||
/* Pollute other registers with unaligned values. */\
|
||||
r2 = -1; \
|
||||
r3 = -1; \
|
||||
r4 = -1; \
|
||||
r5 = -1; \
|
||||
r6 = -1; \
|
||||
r7 = -1; \
|
||||
r8 = -1; \
|
||||
/* Store both R9 and R10 with BPF_B and read back. */\
|
||||
*(u8*)(r1 + 0) = r10; \
|
||||
r2 = *(u8*)(r1 + 0); \
|
||||
*(u8*)(r1 + 0) = r9; \
|
||||
r3 = *(u8*)(r1 + 0); \
|
||||
/* Should read back as same value. */ \
|
||||
if r2 == r3 goto l1_%=; \
|
||||
r0 = 1; \
|
||||
exit; \
|
||||
l1_%=: r0 = 42; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_array_48b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
61
tools/testing/selftests/bpf/progs/verifier_uninit.c
Normal file
61
tools/testing/selftests/bpf/progs/verifier_uninit.c
Normal file
@ -0,0 +1,61 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/uninit.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "../../../include/linux/filter.h"
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("socket")
|
||||
__description("read uninitialized register")
|
||||
__failure __msg("R2 !read_ok")
|
||||
__failure_unpriv
|
||||
__naked void read_uninitialized_register(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = r2; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("read invalid register")
|
||||
__failure __msg("R15 is invalid")
|
||||
__failure_unpriv
|
||||
__naked void read_invalid_register(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
.8byte %[mov64_reg]; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_insn(mov64_reg, BPF_MOV64_REG(BPF_REG_0, -1))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("program doesn't init R0 before exit")
|
||||
__failure __msg("R0 !read_ok")
|
||||
__failure_unpriv
|
||||
__naked void t_init_r0_before_exit(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r1; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("program doesn't init R0 before exit in all branches")
|
||||
__failure __msg("R0 !read_ok")
|
||||
__msg_unpriv("R1 pointer comparison")
|
||||
__naked void before_exit_in_all_branches(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
if r1 >= 0 goto l0_%=; \
|
||||
r0 = 1; \
|
||||
r0 += 2; \
|
||||
l0_%=: exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
158
tools/testing/selftests/bpf/progs/verifier_value.c
Normal file
158
tools/testing/selftests/bpf/progs/verifier_value.c
Normal file
@ -0,0 +1,158 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/value.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define MAX_ENTRIES 11
|
||||
|
||||
struct test_val {
|
||||
unsigned int index;
|
||||
int foo[MAX_ENTRIES];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, struct test_val);
|
||||
} map_hash_48b SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("map element value store of cleared call register")
|
||||
__failure __msg("R1 !read_ok")
|
||||
__failure_unpriv __msg_unpriv("R1 !read_ok")
|
||||
__naked void store_of_cleared_call_register(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("map element value with unaligned store")
|
||||
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void element_value_with_unaligned_store(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r0 += 3; \
|
||||
r1 = 42; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
r1 = 43; \
|
||||
*(u64*)(r0 + 2) = r1; \
|
||||
r1 = 44; \
|
||||
*(u64*)(r0 - 2) = r1; \
|
||||
r8 = r0; \
|
||||
r1 = 32; \
|
||||
*(u64*)(r8 + 0) = r1; \
|
||||
r1 = 33; \
|
||||
*(u64*)(r8 + 2) = r1; \
|
||||
r1 = 34; \
|
||||
*(u64*)(r8 - 2) = r1; \
|
||||
r8 += 5; \
|
||||
r1 = 22; \
|
||||
*(u64*)(r8 + 0) = r1; \
|
||||
r1 = 23; \
|
||||
*(u64*)(r8 + 4) = r1; \
|
||||
r1 = 24; \
|
||||
*(u64*)(r8 - 7) = r1; \
|
||||
r7 = r8; \
|
||||
r7 += 3; \
|
||||
r1 = 22; \
|
||||
*(u64*)(r7 + 0) = r1; \
|
||||
r1 = 23; \
|
||||
*(u64*)(r7 + 4) = r1; \
|
||||
r1 = 24; \
|
||||
*(u64*)(r7 - 4) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("map element value with unaligned load")
|
||||
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void element_value_with_unaligned_load(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
if r1 >= %[max_entries] goto l0_%=; \
|
||||
r0 += 3; \
|
||||
r7 = *(u64*)(r0 + 0); \
|
||||
r7 = *(u64*)(r0 + 2); \
|
||||
r8 = r0; \
|
||||
r7 = *(u64*)(r8 + 0); \
|
||||
r7 = *(u64*)(r8 + 2); \
|
||||
r0 += 5; \
|
||||
r7 = *(u64*)(r0 + 0); \
|
||||
r7 = *(u64*)(r0 + 4); \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(max_entries, MAX_ENTRIES)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("map element value is preserved across register spilling")
|
||||
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void is_preserved_across_register_spilling(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r0 += %[test_val_foo]; \
|
||||
r1 = 42; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
r1 = r10; \
|
||||
r1 += -184; \
|
||||
*(u64*)(r1 + 0) = r0; \
|
||||
r3 = *(u64*)(r1 + 0); \
|
||||
r1 = 42; \
|
||||
*(u64*)(r3 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
78
tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c
Normal file
78
tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c
Normal file
@ -0,0 +1,78 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/value_adj_spill.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define MAX_ENTRIES 11
|
||||
|
||||
struct test_val {
|
||||
unsigned int index;
|
||||
int foo[MAX_ENTRIES];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, struct test_val);
|
||||
} map_hash_48b SEC(".maps");
|
||||
|
||||
SEC("socket")
|
||||
__description("map element value is preserved across register spilling")
|
||||
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__retval(0)
|
||||
__naked void is_preserved_across_register_spilling(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 42; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
r1 = r10; \
|
||||
r1 += -184; \
|
||||
*(u64*)(r1 + 0) = r0; \
|
||||
r3 = *(u64*)(r1 + 0); \
|
||||
r1 = 42; \
|
||||
*(u64*)(r3 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("map element value or null is marked on register spilling")
|
||||
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__retval(0)
|
||||
__naked void is_marked_on_register_spilling(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r2 + 0) = r1; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r1 = r10; \
|
||||
r1 += -152; \
|
||||
*(u64*)(r1 + 0) = r0; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r3 = *(u64*)(r1 + 0); \
|
||||
r1 = 42; \
|
||||
*(u64*)(r3 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
288
tools/testing/selftests/bpf/progs/verifier_value_or_null.c
Normal file
288
tools/testing/selftests/bpf/progs/verifier_value_or_null.c
Normal file
@ -0,0 +1,288 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/value_or_null.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define MAX_ENTRIES 11
|
||||
|
||||
struct test_val {
|
||||
unsigned int index;
|
||||
int foo[MAX_ENTRIES];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, struct test_val);
|
||||
} map_hash_48b SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
SEC("tc")
|
||||
__description("multiple registers share map_lookup_elem result")
|
||||
__success __retval(0)
|
||||
__naked void share_map_lookup_elem_result(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 10; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r4 = r0; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r4 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("alu ops on ptr_to_map_value_or_null, 1")
|
||||
__failure __msg("R4 pointer arithmetic on map_value_or_null")
|
||||
__naked void map_value_or_null_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 10; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r4 = r0; \
|
||||
r4 += -2; \
|
||||
r4 += 2; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r4 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("alu ops on ptr_to_map_value_or_null, 2")
|
||||
__failure __msg("R4 pointer arithmetic on map_value_or_null")
|
||||
__naked void map_value_or_null_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 10; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r4 = r0; \
|
||||
r4 &= -1; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r4 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("alu ops on ptr_to_map_value_or_null, 3")
|
||||
__failure __msg("R4 pointer arithmetic on map_value_or_null")
|
||||
__naked void map_value_or_null_3(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 10; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r4 = r0; \
|
||||
r4 <<= 1; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r4 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("invalid memory access with multiple map_lookup_elem calls")
|
||||
__failure __msg("R4 !read_ok")
|
||||
__naked void multiple_map_lookup_elem_calls(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 10; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
r8 = r1; \
|
||||
r7 = r2; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r4 = r0; \
|
||||
r1 = r8; \
|
||||
r2 = r7; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r4 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("valid indirect map_lookup_elem access with 2nd lookup in branch")
|
||||
__success __retval(0)
|
||||
__naked void with_2nd_lookup_in_branch(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 10; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
r8 = r1; \
|
||||
r7 = r2; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r2 = 10; \
|
||||
if r2 != 0 goto l0_%=; \
|
||||
r1 = r8; \
|
||||
r2 = r7; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
l0_%=: r4 = r0; \
|
||||
if r0 == 0 goto l1_%=; \
|
||||
r1 = 0; \
|
||||
*(u64*)(r4 + 0) = r1; \
|
||||
l1_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("invalid map access from else condition")
|
||||
__failure __msg("R0 unbounded memory access")
|
||||
__failure_unpriv __msg_unpriv("R0 leaks addr")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void map_access_from_else_condition(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_48b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 == 0 goto l0_%=; \
|
||||
r1 = *(u32*)(r0 + 0); \
|
||||
if r1 >= %[__imm_0] goto l1_%=; \
|
||||
r1 += 1; \
|
||||
l1_%=: r1 <<= 2; \
|
||||
r0 += r1; \
|
||||
r1 = %[test_val_foo]; \
|
||||
*(u64*)(r0 + 0) = r1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_48b),
|
||||
__imm_const(__imm_0, MAX_ENTRIES-1),
|
||||
__imm_const(test_val_foo, offsetof(struct test_val, foo))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("map lookup and null branch prediction")
|
||||
__success __retval(0)
|
||||
__naked void lookup_and_null_branch_prediction(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 10; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r6 = r0; \
|
||||
if r6 == 0 goto l0_%=; \
|
||||
if r6 != 0 goto l0_%=; \
|
||||
r10 += 10; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("MAP_VALUE_OR_NULL check_ids() in regsafe()")
|
||||
__failure __msg("R8 invalid mem access 'map_value_or_null'")
|
||||
__failure_unpriv __msg_unpriv("")
|
||||
__flag(BPF_F_TEST_STATE_FREQ)
|
||||
__naked void null_check_ids_in_regsafe(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
/* r9 = map_lookup_elem(...) */ \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r9 = r0; \
|
||||
/* r8 = map_lookup_elem(...) */ \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r8 = r0; \
|
||||
/* r7 = ktime_get_ns() */ \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
r7 = r0; \
|
||||
/* r6 = ktime_get_ns() */ \
|
||||
call %[bpf_ktime_get_ns]; \
|
||||
r6 = r0; \
|
||||
/* if r6 > r7 goto +1 ; no new information about the state is derived from\
|
||||
* ; this check, thus produced verifier states differ\
|
||||
* ; only in 'insn_idx' \
|
||||
* r9 = r8 ; optionally share ID between r9 and r8\
|
||||
*/ \
|
||||
if r6 > r7 goto l0_%=; \
|
||||
r9 = r8; \
|
||||
l0_%=: /* if r9 == 0 goto <exit> */ \
|
||||
if r9 == 0 goto l1_%=; \
|
||||
/* read map value via r8, this is not always \
|
||||
* safe because r8 might be not equal to r9. \
|
||||
*/ \
|
||||
r0 = *(u64*)(r8 + 0); \
|
||||
l1_%=: /* exit 0 */ \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_ktime_get_ns),
|
||||
__imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
349
tools/testing/selftests/bpf/progs/verifier_var_off.c
Normal file
349
tools/testing/selftests/bpf/progs/verifier_var_off.c
Normal file
@ -0,0 +1,349 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/var_off.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
SEC("lwt_in")
|
||||
__description("variable-offset ctx access")
|
||||
__failure __msg("variable ctx access var_off=(0x0; 0x4)")
|
||||
__naked void variable_offset_ctx_access(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Get an unknown value */ \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
/* Make it small and 4-byte aligned */ \
|
||||
r2 &= 4; \
|
||||
/* add it to skb. We now have either &skb->len or\
|
||||
* &skb->pkt_type, but we don't know which \
|
||||
*/ \
|
||||
r1 += r2; \
|
||||
/* dereference it */ \
|
||||
r0 = *(u32*)(r1 + 0); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("variable-offset stack read, priv vs unpriv")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("R2 variable stack access prohibited for !root")
|
||||
__retval(0)
|
||||
__naked void stack_read_priv_vs_unpriv(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Fill the top 8 bytes of the stack */ \
|
||||
r0 = 0; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
/* Get an unknown value */ \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
/* Make it small and 4-byte aligned */ \
|
||||
r2 &= 4; \
|
||||
r2 -= 8; \
|
||||
/* add it to fp. We now have either fp-4 or fp-8, but\
|
||||
* we don't know which \
|
||||
*/ \
|
||||
r2 += r10; \
|
||||
/* dereference it for a stack read */ \
|
||||
r0 = *(u32*)(r2 + 0); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("lwt_in")
|
||||
__description("variable-offset stack read, uninitialized")
|
||||
__failure __msg("invalid variable-offset read from stack R2")
|
||||
__naked void variable_offset_stack_read_uninitialized(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Get an unknown value */ \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
/* Make it small and 4-byte aligned */ \
|
||||
r2 &= 4; \
|
||||
r2 -= 8; \
|
||||
/* add it to fp. We now have either fp-4 or fp-8, but\
|
||||
* we don't know which \
|
||||
*/ \
|
||||
r2 += r10; \
|
||||
/* dereference it for a stack read */ \
|
||||
r0 = *(u32*)(r2 + 0); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("variable-offset stack write, priv vs unpriv")
|
||||
__success __failure_unpriv
|
||||
/* Variable stack access is rejected for unprivileged.
|
||||
*/
|
||||
__msg_unpriv("R2 variable stack access prohibited for !root")
|
||||
__retval(0)
|
||||
__naked void stack_write_priv_vs_unpriv(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Get an unknown value */ \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
/* Make it small and 8-byte aligned */ \
|
||||
r2 &= 8; \
|
||||
r2 -= 16; \
|
||||
/* Add it to fp. We now have either fp-8 or fp-16, but\
|
||||
* we don't know which \
|
||||
*/ \
|
||||
r2 += r10; \
|
||||
/* Dereference it for a stack write */ \
|
||||
r0 = 0; \
|
||||
*(u64*)(r2 + 0) = r0; \
|
||||
/* Now read from the address we just wrote. This shows\
|
||||
* that, after a variable-offset write, a priviledged\
|
||||
* program can read the slots that were in the range of\
|
||||
* that write (even if the verifier doesn't actually know\
|
||||
* if the slot being read was really written to or not.\
|
||||
*/ \
|
||||
r3 = *(u64*)(r2 + 0); \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("variable-offset stack write clobbers spilled regs")
|
||||
__failure
|
||||
/* In the priviledged case, dereferencing a spilled-and-then-filled
|
||||
* register is rejected because the previous variable offset stack
|
||||
* write might have overwritten the spilled pointer (i.e. we lose track
|
||||
* of the spilled register when we analyze the write).
|
||||
*/
|
||||
__msg("R2 invalid mem access 'scalar'")
|
||||
__failure_unpriv
|
||||
/* The unprivileged case is not too interesting; variable
|
||||
* stack access is rejected.
|
||||
*/
|
||||
__msg_unpriv("R2 variable stack access prohibited for !root")
|
||||
__naked void stack_write_clobbers_spilled_regs(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Dummy instruction; needed because we need to patch the next one\
|
||||
* and we can't patch the first instruction. \
|
||||
*/ \
|
||||
r6 = 0; \
|
||||
/* Make R0 a map ptr */ \
|
||||
r0 = %[map_hash_8b] ll; \
|
||||
/* Get an unknown value */ \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
/* Make it small and 8-byte aligned */ \
|
||||
r2 &= 8; \
|
||||
r2 -= 16; \
|
||||
/* Add it to fp. We now have either fp-8 or fp-16, but\
|
||||
* we don't know which. \
|
||||
*/ \
|
||||
r2 += r10; \
|
||||
/* Spill R0(map ptr) into stack */ \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
/* Dereference the unknown value for a stack write */\
|
||||
r0 = 0; \
|
||||
*(u64*)(r2 + 0) = r0; \
|
||||
/* Fill the register back into R2 */ \
|
||||
r2 = *(u64*)(r10 - 8); \
|
||||
/* Try to dereference R2 for a memory load */ \
|
||||
r0 = *(u64*)(r2 + 8); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("sockops")
|
||||
__description("indirect variable-offset stack access, unbounded")
|
||||
__failure __msg("invalid unbounded variable-offset indirect access to stack R4")
|
||||
__naked void variable_offset_stack_access_unbounded(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = 6; \
|
||||
r3 = 28; \
|
||||
/* Fill the top 16 bytes of the stack. */ \
|
||||
r4 = 0; \
|
||||
*(u64*)(r10 - 16) = r4; \
|
||||
r4 = 0; \
|
||||
*(u64*)(r10 - 8) = r4; \
|
||||
/* Get an unknown value. */ \
|
||||
r4 = *(u64*)(r1 + %[bpf_sock_ops_bytes_received]);\
|
||||
/* Check the lower bound but don't check the upper one. */\
|
||||
if r4 s< 0 goto l0_%=; \
|
||||
/* Point the lower bound to initialized stack. Offset is now in range\
|
||||
* from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.\
|
||||
*/ \
|
||||
r4 -= 16; \
|
||||
r4 += r10; \
|
||||
r5 = 8; \
|
||||
/* Dereference it indirectly. */ \
|
||||
call %[bpf_getsockopt]; \
|
||||
l0_%=: r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_getsockopt),
|
||||
__imm_const(bpf_sock_ops_bytes_received, offsetof(struct bpf_sock_ops, bytes_received))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("lwt_in")
|
||||
__description("indirect variable-offset stack access, max out of bound")
|
||||
__failure __msg("invalid variable-offset indirect access to stack R2")
|
||||
__naked void access_max_out_of_bound(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Fill the top 8 bytes of the stack */ \
|
||||
r2 = 0; \
|
||||
*(u64*)(r10 - 8) = r2; \
|
||||
/* Get an unknown value */ \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
/* Make it small and 4-byte aligned */ \
|
||||
r2 &= 4; \
|
||||
r2 -= 8; \
|
||||
/* add it to fp. We now have either fp-4 or fp-8, but\
|
||||
* we don't know which \
|
||||
*/ \
|
||||
r2 += r10; \
|
||||
/* dereference it indirectly */ \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("lwt_in")
|
||||
__description("indirect variable-offset stack access, min out of bound")
|
||||
__failure __msg("invalid variable-offset indirect access to stack R2")
|
||||
__naked void access_min_out_of_bound(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Fill the top 8 bytes of the stack */ \
|
||||
r2 = 0; \
|
||||
*(u64*)(r10 - 8) = r2; \
|
||||
/* Get an unknown value */ \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
/* Make it small and 4-byte aligned */ \
|
||||
r2 &= 4; \
|
||||
r2 -= 516; \
|
||||
/* add it to fp. We now have either fp-516 or fp-512, but\
|
||||
* we don't know which \
|
||||
*/ \
|
||||
r2 += r10; \
|
||||
/* dereference it indirectly */ \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("lwt_in")
|
||||
__description("indirect variable-offset stack access, min_off < min_initialized")
|
||||
__failure __msg("invalid indirect read from stack R2 var_off")
|
||||
__naked void access_min_off_min_initialized(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Fill only the top 8 bytes of the stack. */ \
|
||||
r2 = 0; \
|
||||
*(u64*)(r10 - 8) = r2; \
|
||||
/* Get an unknown value */ \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
/* Make it small and 4-byte aligned. */ \
|
||||
r2 &= 4; \
|
||||
r2 -= 16; \
|
||||
/* Add it to fp. We now have either fp-12 or fp-16, but we don't know\
|
||||
* which. fp-16 size 8 is partially uninitialized stack.\
|
||||
*/ \
|
||||
r2 += r10; \
|
||||
/* Dereference it indirectly. */ \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("cgroup/skb")
|
||||
__description("indirect variable-offset stack access, priv vs unpriv")
|
||||
__success __failure_unpriv
|
||||
__msg_unpriv("R2 variable stack access prohibited for !root")
|
||||
__retval(0)
|
||||
__naked void stack_access_priv_vs_unpriv(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Fill the top 16 bytes of the stack. */ \
|
||||
r2 = 0; \
|
||||
*(u64*)(r10 - 16) = r2; \
|
||||
r2 = 0; \
|
||||
*(u64*)(r10 - 8) = r2; \
|
||||
/* Get an unknown value. */ \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
/* Make it small and 4-byte aligned. */ \
|
||||
r2 &= 4; \
|
||||
r2 -= 16; \
|
||||
/* Add it to fp. We now have either fp-12 or fp-16, we don't know\
|
||||
* which, but either way it points to initialized stack.\
|
||||
*/ \
|
||||
r2 += r10; \
|
||||
/* Dereference it indirectly. */ \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("lwt_in")
|
||||
__description("indirect variable-offset stack access, ok")
|
||||
__success __retval(0)
|
||||
__naked void variable_offset_stack_access_ok(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
/* Fill the top 16 bytes of the stack. */ \
|
||||
r2 = 0; \
|
||||
*(u64*)(r10 - 16) = r2; \
|
||||
r2 = 0; \
|
||||
*(u64*)(r10 - 8) = r2; \
|
||||
/* Get an unknown value. */ \
|
||||
r2 = *(u32*)(r1 + 0); \
|
||||
/* Make it small and 4-byte aligned. */ \
|
||||
r2 &= 4; \
|
||||
r2 -= 16; \
|
||||
/* Add it to fp. We now have either fp-12 or fp-16, we don't know\
|
||||
* which, but either way it points to initialized stack.\
|
||||
*/ \
|
||||
r2 += r10; \
|
||||
/* Dereference it indirectly. */ \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
124
tools/testing/selftests/bpf/progs/verifier_xadd.c
Normal file
124
tools/testing/selftests/bpf/progs/verifier_xadd.c
Normal file
@ -0,0 +1,124 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/xadd.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, long long);
|
||||
__type(value, long long);
|
||||
} map_hash_8b SEC(".maps");
|
||||
|
||||
SEC("tc")
|
||||
__description("xadd/w check unaligned stack")
|
||||
__failure __msg("misaligned stack access off")
|
||||
__naked void xadd_w_check_unaligned_stack(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 1; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
lock *(u32 *)(r10 - 7) += w0; \
|
||||
r0 = *(u64*)(r10 - 8); \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("xadd/w check unaligned map")
|
||||
__failure __msg("misaligned value access off")
|
||||
__naked void xadd_w_check_unaligned_map(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r1 = 0; \
|
||||
*(u64*)(r10 - 8) = r1; \
|
||||
r2 = r10; \
|
||||
r2 += -8; \
|
||||
r1 = %[map_hash_8b] ll; \
|
||||
call %[bpf_map_lookup_elem]; \
|
||||
if r0 != 0 goto l0_%=; \
|
||||
exit; \
|
||||
l0_%=: r1 = 1; \
|
||||
lock *(u32 *)(r0 + 3) += w1; \
|
||||
r0 = *(u32*)(r0 + 3); \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_map_lookup_elem),
|
||||
__imm_addr(map_hash_8b)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("xdp")
|
||||
__description("xadd/w check unaligned pkt")
|
||||
__failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed")
|
||||
__flag(BPF_F_ANY_ALIGNMENT)
|
||||
__naked void xadd_w_check_unaligned_pkt(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_data]); \
|
||||
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
|
||||
r1 = r2; \
|
||||
r1 += 8; \
|
||||
if r1 < r3 goto l0_%=; \
|
||||
r0 = 99; \
|
||||
goto l1_%=; \
|
||||
l0_%=: r0 = 1; \
|
||||
r1 = 0; \
|
||||
*(u32*)(r2 + 0) = r1; \
|
||||
r1 = 0; \
|
||||
*(u32*)(r2 + 3) = r1; \
|
||||
lock *(u32 *)(r2 + 1) += w0; \
|
||||
lock *(u32 *)(r2 + 2) += w0; \
|
||||
r0 = *(u32*)(r2 + 1); \
|
||||
l1_%=: exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
|
||||
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("xadd/w check whether src/dst got mangled, 1")
|
||||
__success __retval(3)
|
||||
__naked void src_dst_got_mangled_1(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 1; \
|
||||
r6 = r0; \
|
||||
r7 = r10; \
|
||||
*(u64*)(r10 - 8) = r0; \
|
||||
lock *(u64 *)(r10 - 8) += r0; \
|
||||
lock *(u64 *)(r10 - 8) += r0; \
|
||||
if r6 != r0 goto l0_%=; \
|
||||
if r7 != r10 goto l0_%=; \
|
||||
r0 = *(u64*)(r10 - 8); \
|
||||
exit; \
|
||||
l0_%=: r0 = 42; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__description("xadd/w check whether src/dst got mangled, 2")
|
||||
__success __retval(3)
|
||||
__naked void src_dst_got_mangled_2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 1; \
|
||||
r6 = r0; \
|
||||
r7 = r10; \
|
||||
*(u32*)(r10 - 8) = r0; \
|
||||
lock *(u32 *)(r10 - 8) += w0; \
|
||||
lock *(u32 *)(r10 - 8) += w0; \
|
||||
if r6 != r0 goto l0_%=; \
|
||||
if r7 != r10 goto l0_%=; \
|
||||
r0 = *(u32*)(r10 - 8); \
|
||||
exit; \
|
||||
l0_%=: r0 = 42; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
24
tools/testing/selftests/bpf/progs/verifier_xdp.c
Normal file
24
tools/testing/selftests/bpf/progs/verifier_xdp.c
Normal file
@ -0,0 +1,24 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Converted from tools/testing/selftests/bpf/verifier/xdp.c */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("xdp")
|
||||
__description("XDP, using ifindex from netdev")
|
||||
__success __retval(1)
|
||||
__naked void xdp_using_ifindex_from_netdev(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
r2 = *(u32*)(r1 + %[xdp_md_ingress_ifindex]); \
|
||||
if r2 < 1 goto l0_%=; \
|
||||
r0 = 1; \
|
||||
l0_%=: exit; \
|
||||
" :
|
||||
: __imm_const(xdp_md_ingress_ifindex, offsetof(struct xdp_md, ingress_ifindex))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -1,9 +1,14 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
|
||||
#include <linux/capability.h>
|
||||
#include <stdlib.h>
|
||||
#include <test_progs.h>
|
||||
#include <bpf/btf.h>
|
||||
|
||||
#include "autoconf_helper.h"
|
||||
#include "unpriv_helpers.h"
|
||||
#include "cap_helpers.h"
|
||||
|
||||
#define str_has_pfx(str, pfx) \
|
||||
(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
|
||||
|
||||
@ -12,16 +17,48 @@
|
||||
#define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
|
||||
#define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
|
||||
#define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
|
||||
#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
|
||||
#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
|
||||
#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
|
||||
#define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
|
||||
#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
|
||||
#define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
|
||||
#define TEST_TAG_RETVAL_PFX "comment:test_retval="
|
||||
#define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv="
|
||||
|
||||
struct test_spec {
|
||||
const char *name;
|
||||
/* Warning: duplicated in bpf_misc.h */
|
||||
#define POINTER_VALUE 0xcafe4all
|
||||
#define TEST_DATA_LEN 64
|
||||
|
||||
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
#define EFFICIENT_UNALIGNED_ACCESS 1
|
||||
#else
|
||||
#define EFFICIENT_UNALIGNED_ACCESS 0
|
||||
#endif
|
||||
|
||||
static int sysctl_unpriv_disabled = -1;
|
||||
|
||||
enum mode {
|
||||
PRIV = 1,
|
||||
UNPRIV = 2
|
||||
};
|
||||
|
||||
struct test_subspec {
|
||||
char *name;
|
||||
bool expect_failure;
|
||||
const char **expect_msgs;
|
||||
size_t expect_msg_cnt;
|
||||
int retval;
|
||||
bool execute;
|
||||
};
|
||||
|
||||
struct test_spec {
|
||||
const char *prog_name;
|
||||
struct test_subspec priv;
|
||||
struct test_subspec unpriv;
|
||||
int log_level;
|
||||
int prog_flags;
|
||||
int mode_mask;
|
||||
};
|
||||
|
||||
static int tester_init(struct test_loader *tester)
|
||||
@ -44,17 +81,87 @@ void test_loader_fini(struct test_loader *tester)
|
||||
free(tester->log_buf);
|
||||
}
|
||||
|
||||
static void free_test_spec(struct test_spec *spec)
|
||||
{
|
||||
free(spec->priv.name);
|
||||
free(spec->unpriv.name);
|
||||
free(spec->priv.expect_msgs);
|
||||
free(spec->unpriv.expect_msgs);
|
||||
}
|
||||
|
||||
static int push_msg(const char *msg, struct test_subspec *subspec)
|
||||
{
|
||||
void *tmp;
|
||||
|
||||
tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *));
|
||||
if (!tmp) {
|
||||
ASSERT_FAIL("failed to realloc memory for messages\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
subspec->expect_msgs = tmp;
|
||||
subspec->expect_msgs[subspec->expect_msg_cnt++] = msg;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_int(const char *str, int *val, const char *name)
|
||||
{
|
||||
char *end;
|
||||
long tmp;
|
||||
|
||||
errno = 0;
|
||||
if (str_has_pfx(str, "0x"))
|
||||
tmp = strtol(str + 2, &end, 16);
|
||||
else
|
||||
tmp = strtol(str, &end, 10);
|
||||
if (errno || end[0] != '\0') {
|
||||
PRINT_FAIL("failed to parse %s from '%s'\n", name, str);
|
||||
return -EINVAL;
|
||||
}
|
||||
*val = tmp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_retval(const char *str, int *val, const char *name)
|
||||
{
|
||||
struct {
|
||||
char *name;
|
||||
int val;
|
||||
} named_values[] = {
|
||||
{ "INT_MIN" , INT_MIN },
|
||||
{ "POINTER_VALUE", POINTER_VALUE },
|
||||
{ "TEST_DATA_LEN", TEST_DATA_LEN },
|
||||
};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(named_values); ++i) {
|
||||
if (strcmp(str, named_values[i].name) != 0)
|
||||
continue;
|
||||
*val = named_values[i].val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return parse_int(str, val, name);
|
||||
}
|
||||
|
||||
/* Uses btf_decl_tag attributes to describe the expected test
|
||||
* behavior, see bpf_misc.h for detailed description of each attribute
|
||||
* and attribute combinations.
|
||||
*/
|
||||
static int parse_test_spec(struct test_loader *tester,
|
||||
struct bpf_object *obj,
|
||||
struct bpf_program *prog,
|
||||
struct test_spec *spec)
|
||||
{
|
||||
const char *description = NULL;
|
||||
bool has_unpriv_result = false;
|
||||
bool has_unpriv_retval = false;
|
||||
int func_id, i, err = 0;
|
||||
struct btf *btf;
|
||||
int func_id, i;
|
||||
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
|
||||
spec->name = bpf_program__name(prog);
|
||||
spec->prog_name = bpf_program__name(prog);
|
||||
|
||||
btf = bpf_object__btf(obj);
|
||||
if (!btf) {
|
||||
@ -62,16 +169,16 @@ static int parse_test_spec(struct test_loader *tester,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
func_id = btf__find_by_name_kind(btf, spec->name, BTF_KIND_FUNC);
|
||||
func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC);
|
||||
if (func_id < 0) {
|
||||
ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->name);
|
||||
ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->prog_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 1; i < btf__type_cnt(btf); i++) {
|
||||
const char *s, *val, *msg;
|
||||
const struct btf_type *t;
|
||||
const char *s, *val;
|
||||
char *e;
|
||||
int tmp;
|
||||
|
||||
t = btf__type_by_id(btf, i);
|
||||
if (!btf_is_decl_tag(t))
|
||||
@ -81,31 +188,54 @@ static int parse_test_spec(struct test_loader *tester,
|
||||
continue;
|
||||
|
||||
s = btf__str_by_offset(btf, t->name_off);
|
||||
if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) {
|
||||
spec->expect_failure = true;
|
||||
if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) {
|
||||
description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1;
|
||||
} else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) {
|
||||
spec->priv.expect_failure = true;
|
||||
spec->mode_mask |= PRIV;
|
||||
} else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) {
|
||||
spec->expect_failure = false;
|
||||
spec->priv.expect_failure = false;
|
||||
spec->mode_mask |= PRIV;
|
||||
} else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) {
|
||||
spec->unpriv.expect_failure = true;
|
||||
spec->mode_mask |= UNPRIV;
|
||||
has_unpriv_result = true;
|
||||
} else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) {
|
||||
spec->unpriv.expect_failure = false;
|
||||
spec->mode_mask |= UNPRIV;
|
||||
has_unpriv_result = true;
|
||||
} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) {
|
||||
void *tmp;
|
||||
const char **msg;
|
||||
|
||||
tmp = realloc(spec->expect_msgs,
|
||||
(1 + spec->expect_msg_cnt) * sizeof(void *));
|
||||
if (!tmp) {
|
||||
ASSERT_FAIL("failed to realloc memory for messages\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
spec->expect_msgs = tmp;
|
||||
msg = &spec->expect_msgs[spec->expect_msg_cnt++];
|
||||
*msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
|
||||
msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
|
||||
err = push_msg(msg, &spec->priv);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->mode_mask |= PRIV;
|
||||
} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) {
|
||||
msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1;
|
||||
err = push_msg(msg, &spec->unpriv);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->mode_mask |= UNPRIV;
|
||||
} else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) {
|
||||
val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1;
|
||||
err = parse_retval(val, &spec->priv.retval, "__retval");
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->priv.execute = true;
|
||||
spec->mode_mask |= PRIV;
|
||||
} else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) {
|
||||
val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1;
|
||||
err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv");
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->mode_mask |= UNPRIV;
|
||||
spec->unpriv.execute = true;
|
||||
has_unpriv_retval = true;
|
||||
} else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) {
|
||||
val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1;
|
||||
errno = 0;
|
||||
spec->log_level = strtol(val, &e, 0);
|
||||
if (errno || e[0] != '\0') {
|
||||
ASSERT_FAIL("failed to parse test log level from '%s'", s);
|
||||
return -EINVAL;
|
||||
}
|
||||
err = parse_int(val, &spec->log_level, "test log level");
|
||||
if (err)
|
||||
goto cleanup;
|
||||
} else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) {
|
||||
val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1;
|
||||
if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) {
|
||||
@ -121,17 +251,74 @@ static int parse_test_spec(struct test_loader *tester,
|
||||
} else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) {
|
||||
spec->prog_flags |= BPF_F_XDP_HAS_FRAGS;
|
||||
} else /* assume numeric value */ {
|
||||
errno = 0;
|
||||
spec->prog_flags |= strtol(val, &e, 0);
|
||||
if (errno || e[0] != '\0') {
|
||||
ASSERT_FAIL("failed to parse test prog flags from '%s'", s);
|
||||
return -EINVAL;
|
||||
}
|
||||
err = parse_int(val, &tmp, "test prog flags");
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->prog_flags |= tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (spec->mode_mask == 0)
|
||||
spec->mode_mask = PRIV;
|
||||
|
||||
if (!description)
|
||||
description = spec->prog_name;
|
||||
|
||||
if (spec->mode_mask & PRIV) {
|
||||
spec->priv.name = strdup(description);
|
||||
if (!spec->priv.name) {
|
||||
PRINT_FAIL("failed to allocate memory for priv.name\n");
|
||||
err = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
if (spec->mode_mask & UNPRIV) {
|
||||
int descr_len = strlen(description);
|
||||
const char *suffix = " @unpriv";
|
||||
char *name;
|
||||
|
||||
name = malloc(descr_len + strlen(suffix) + 1);
|
||||
if (!name) {
|
||||
PRINT_FAIL("failed to allocate memory for unpriv.name\n");
|
||||
err = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
strcpy(name, description);
|
||||
strcpy(&name[descr_len], suffix);
|
||||
spec->unpriv.name = name;
|
||||
}
|
||||
|
||||
if (spec->mode_mask & (PRIV | UNPRIV)) {
|
||||
if (!has_unpriv_result)
|
||||
spec->unpriv.expect_failure = spec->priv.expect_failure;
|
||||
|
||||
if (!has_unpriv_retval) {
|
||||
spec->unpriv.retval = spec->priv.retval;
|
||||
spec->unpriv.execute = spec->priv.execute;
|
||||
}
|
||||
|
||||
if (!spec->unpriv.expect_msgs) {
|
||||
size_t sz = spec->priv.expect_msg_cnt * sizeof(void *);
|
||||
|
||||
spec->unpriv.expect_msgs = malloc(sz);
|
||||
if (!spec->unpriv.expect_msgs) {
|
||||
PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n");
|
||||
err = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz);
|
||||
spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
free_test_spec(spec);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void prepare_case(struct test_loader *tester,
|
||||
@ -148,7 +335,7 @@ static void prepare_case(struct test_loader *tester,
|
||||
|
||||
bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz);
|
||||
|
||||
/* Make sure we set at least minimal log level, unless test requirest
|
||||
/* Make sure we set at least minimal log level, unless test requires
|
||||
* even higher level already. Make sure to preserve independent log
|
||||
* level 4 (verifier stats), though.
|
||||
*/
|
||||
@ -172,18 +359,18 @@ static void emit_verifier_log(const char *log_buf, bool force)
|
||||
}
|
||||
|
||||
static void validate_case(struct test_loader *tester,
|
||||
struct test_spec *spec,
|
||||
struct test_subspec *subspec,
|
||||
struct bpf_object *obj,
|
||||
struct bpf_program *prog,
|
||||
int load_err)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < spec->expect_msg_cnt; i++) {
|
||||
for (i = 0; i < subspec->expect_msg_cnt; i++) {
|
||||
char *match;
|
||||
const char *expect_msg;
|
||||
|
||||
expect_msg = spec->expect_msgs[i];
|
||||
expect_msg = subspec->expect_msgs[i];
|
||||
|
||||
match = strstr(tester->log_buf + tester->next_match_pos, expect_msg);
|
||||
if (!ASSERT_OK_PTR(match, "expect_msg")) {
|
||||
@ -191,7 +378,8 @@ static void validate_case(struct test_loader *tester,
|
||||
if (env.verbosity == VERBOSE_NONE)
|
||||
emit_verifier_log(tester->log_buf, true /*force*/);
|
||||
for (j = 0; j < i; j++)
|
||||
fprintf(stderr, "MATCHED MSG: '%s'\n", spec->expect_msgs[j]);
|
||||
fprintf(stderr,
|
||||
"MATCHED MSG: '%s'\n", subspec->expect_msgs[j]);
|
||||
fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg);
|
||||
return;
|
||||
}
|
||||
@ -200,17 +388,229 @@ static void validate_case(struct test_loader *tester,
|
||||
}
|
||||
}
|
||||
|
||||
struct cap_state {
|
||||
__u64 old_caps;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
static int drop_capabilities(struct cap_state *caps)
|
||||
{
|
||||
const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN |
|
||||
1ULL << CAP_PERFMON | 1ULL << CAP_BPF);
|
||||
int err;
|
||||
|
||||
err = cap_disable_effective(caps_to_drop, &caps->old_caps);
|
||||
if (err) {
|
||||
PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(err));
|
||||
return err;
|
||||
}
|
||||
|
||||
caps->initialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int restore_capabilities(struct cap_state *caps)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!caps->initialized)
|
||||
return 0;
|
||||
|
||||
err = cap_enable_effective(caps->old_caps, NULL);
|
||||
if (err)
|
||||
PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(err));
|
||||
caps->initialized = false;
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec)
|
||||
{
|
||||
if (sysctl_unpriv_disabled < 0)
|
||||
sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0;
|
||||
if (sysctl_unpriv_disabled)
|
||||
return false;
|
||||
if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool is_unpriv_capable_map(struct bpf_map *map)
|
||||
{
|
||||
enum bpf_map_type type;
|
||||
__u32 flags;
|
||||
|
||||
type = bpf_map__type(map);
|
||||
|
||||
switch (type) {
|
||||
case BPF_MAP_TYPE_HASH:
|
||||
case BPF_MAP_TYPE_PERCPU_HASH:
|
||||
case BPF_MAP_TYPE_HASH_OF_MAPS:
|
||||
flags = bpf_map__map_flags(map);
|
||||
return !(flags & BPF_F_ZERO_SEED);
|
||||
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
|
||||
case BPF_MAP_TYPE_ARRAY:
|
||||
case BPF_MAP_TYPE_RINGBUF:
|
||||
case BPF_MAP_TYPE_PROG_ARRAY:
|
||||
case BPF_MAP_TYPE_CGROUP_ARRAY:
|
||||
case BPF_MAP_TYPE_PERCPU_ARRAY:
|
||||
case BPF_MAP_TYPE_USER_RINGBUF:
|
||||
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
|
||||
case BPF_MAP_TYPE_CGROUP_STORAGE:
|
||||
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int do_prog_test_run(int fd_prog, int *retval)
|
||||
{
|
||||
__u8 tmp_out[TEST_DATA_LEN << 2] = {};
|
||||
__u8 tmp_in[TEST_DATA_LEN] = {};
|
||||
int err, saved_errno;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = tmp_in,
|
||||
.data_size_in = sizeof(tmp_in),
|
||||
.data_out = tmp_out,
|
||||
.data_size_out = sizeof(tmp_out),
|
||||
.repeat = 1,
|
||||
);
|
||||
|
||||
err = bpf_prog_test_run_opts(fd_prog, &topts);
|
||||
saved_errno = errno;
|
||||
|
||||
if (err) {
|
||||
PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) ",
|
||||
saved_errno, strerror(saved_errno));
|
||||
return err;
|
||||
}
|
||||
|
||||
ASSERT_OK(0, "bpf_prog_test_run");
|
||||
*retval = topts.retval;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec)
|
||||
{
|
||||
if (!subspec->execute)
|
||||
return false;
|
||||
|
||||
if (subspec->expect_failure)
|
||||
return false;
|
||||
|
||||
if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) {
|
||||
if (env.verbosity != VERBOSE_NONE)
|
||||
printf("alignment prevents execution\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* this function is forced noinline and has short generic name to look better
|
||||
* in test_progs output (in case of a failure)
|
||||
*/
|
||||
static noinline
|
||||
void run_subtest(struct test_loader *tester,
|
||||
const char *skel_name,
|
||||
skel_elf_bytes_fn elf_bytes_factory)
|
||||
struct bpf_object_open_opts *open_opts,
|
||||
const void *obj_bytes,
|
||||
size_t obj_byte_cnt,
|
||||
struct test_spec *spec,
|
||||
bool unpriv)
|
||||
{
|
||||
struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
|
||||
struct cap_state caps = {};
|
||||
struct bpf_program *tprog;
|
||||
struct bpf_object *tobj;
|
||||
struct bpf_map *map;
|
||||
int retval;
|
||||
int err;
|
||||
|
||||
if (!test__start_subtest(subspec->name))
|
||||
return;
|
||||
|
||||
if (unpriv) {
|
||||
if (!can_execute_unpriv(tester, spec)) {
|
||||
test__skip();
|
||||
test__end_subtest();
|
||||
return;
|
||||
}
|
||||
if (drop_capabilities(&caps)) {
|
||||
test__end_subtest();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts);
|
||||
if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */
|
||||
goto subtest_cleanup;
|
||||
|
||||
bpf_object__for_each_program(tprog, tobj)
|
||||
bpf_program__set_autoload(tprog, false);
|
||||
|
||||
bpf_object__for_each_program(tprog, tobj) {
|
||||
/* only load specified program */
|
||||
if (strcmp(bpf_program__name(tprog), spec->prog_name) == 0) {
|
||||
bpf_program__set_autoload(tprog, true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
prepare_case(tester, spec, tobj, tprog);
|
||||
|
||||
/* By default bpf_object__load() automatically creates all
|
||||
* maps declared in the skeleton. Some map types are only
|
||||
* allowed in priv mode. Disable autoload for such maps in
|
||||
* unpriv mode.
|
||||
*/
|
||||
bpf_object__for_each_map(map, tobj)
|
||||
bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map));
|
||||
|
||||
err = bpf_object__load(tobj);
|
||||
if (subspec->expect_failure) {
|
||||
if (!ASSERT_ERR(err, "unexpected_load_success")) {
|
||||
emit_verifier_log(tester->log_buf, false /*force*/);
|
||||
goto tobj_cleanup;
|
||||
}
|
||||
} else {
|
||||
if (!ASSERT_OK(err, "unexpected_load_failure")) {
|
||||
emit_verifier_log(tester->log_buf, true /*force*/);
|
||||
goto tobj_cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
emit_verifier_log(tester->log_buf, false /*force*/);
|
||||
validate_case(tester, subspec, tobj, tprog, err);
|
||||
|
||||
if (should_do_test_run(spec, subspec)) {
|
||||
/* For some reason test_verifier executes programs
|
||||
* with all capabilities restored. Do the same here.
|
||||
*/
|
||||
if (!restore_capabilities(&caps))
|
||||
goto tobj_cleanup;
|
||||
|
||||
do_prog_test_run(bpf_program__fd(tprog), &retval);
|
||||
if (retval != subspec->retval && subspec->retval != POINTER_VALUE) {
|
||||
PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
|
||||
goto tobj_cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
tobj_cleanup:
|
||||
bpf_object__close(tobj);
|
||||
subtest_cleanup:
|
||||
test__end_subtest();
|
||||
restore_capabilities(&caps);
|
||||
}
|
||||
|
||||
static void process_subtest(struct test_loader *tester,
|
||||
const char *skel_name,
|
||||
skel_elf_bytes_fn elf_bytes_factory)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name);
|
||||
struct bpf_object *obj = NULL, *tobj;
|
||||
struct bpf_program *prog, *tprog;
|
||||
struct bpf_object *obj = NULL;
|
||||
struct bpf_program *prog;
|
||||
const void *obj_bytes;
|
||||
size_t obj_byte_cnt;
|
||||
int err;
|
||||
@ -224,52 +624,22 @@ void run_subtest(struct test_loader *tester,
|
||||
return;
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
const char *prog_name = bpf_program__name(prog);
|
||||
struct test_spec spec;
|
||||
|
||||
if (!test__start_subtest(prog_name))
|
||||
continue;
|
||||
|
||||
/* if we can't derive test specification, go to the next test */
|
||||
err = parse_test_spec(tester, obj, prog, &spec);
|
||||
if (!ASSERT_OK(err, "parse_test_spec"))
|
||||
if (err) {
|
||||
PRINT_FAIL("Can't parse test spec for program '%s'\n",
|
||||
bpf_program__name(prog));
|
||||
continue;
|
||||
|
||||
tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts);
|
||||
if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */
|
||||
continue;
|
||||
|
||||
bpf_object__for_each_program(tprog, tobj)
|
||||
bpf_program__set_autoload(tprog, false);
|
||||
|
||||
bpf_object__for_each_program(tprog, tobj) {
|
||||
/* only load specified program */
|
||||
if (strcmp(bpf_program__name(tprog), prog_name) == 0) {
|
||||
bpf_program__set_autoload(tprog, true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
prepare_case(tester, &spec, tobj, tprog);
|
||||
if (spec.mode_mask & PRIV)
|
||||
run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, &spec, false);
|
||||
if (spec.mode_mask & UNPRIV)
|
||||
run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, &spec, true);
|
||||
|
||||
err = bpf_object__load(tobj);
|
||||
if (spec.expect_failure) {
|
||||
if (!ASSERT_ERR(err, "unexpected_load_success")) {
|
||||
emit_verifier_log(tester->log_buf, false /*force*/);
|
||||
goto tobj_cleanup;
|
||||
}
|
||||
} else {
|
||||
if (!ASSERT_OK(err, "unexpected_load_failure")) {
|
||||
emit_verifier_log(tester->log_buf, true /*force*/);
|
||||
goto tobj_cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
emit_verifier_log(tester->log_buf, false /*force*/);
|
||||
validate_case(tester, &spec, tobj, tprog, err);
|
||||
|
||||
tobj_cleanup:
|
||||
bpf_object__close(tobj);
|
||||
free_test_spec(&spec);
|
||||
}
|
||||
|
||||
bpf_object__close(obj);
|
||||
@ -280,5 +650,5 @@ void test_loader__run_subtests(struct test_loader *tester,
|
||||
skel_elf_bytes_fn elf_bytes_factory)
|
||||
{
|
||||
/* see comment in run_subtest() for why we do this function nesting */
|
||||
run_subtest(tester, skel_name, elf_bytes_factory);
|
||||
process_subtest(tester, skel_name, elf_bytes_factory);
|
||||
}
|
||||
|
@ -33,13 +33,8 @@
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
#ifdef HAVE_GENHDR
|
||||
# include "autoconf.h"
|
||||
#else
|
||||
# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
|
||||
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
#include "autoconf_helper.h"
|
||||
#include "unpriv_helpers.h"
|
||||
#include "cap_helpers.h"
|
||||
#include "bpf_rand.h"
|
||||
#include "bpf_util.h"
|
||||
@ -1665,22 +1660,6 @@ static bool is_admin(void)
|
||||
return (caps & ADMIN_CAPS) == ADMIN_CAPS;
|
||||
}
|
||||
|
||||
static void get_unpriv_disabled()
|
||||
{
|
||||
char buf[2];
|
||||
FILE *fd;
|
||||
|
||||
fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
|
||||
if (!fd) {
|
||||
perror("fopen /proc/sys/"UNPRIV_SYSCTL);
|
||||
unpriv_disabled = true;
|
||||
return;
|
||||
}
|
||||
if (fgets(buf, 2, fd) == buf && atoi(buf))
|
||||
unpriv_disabled = true;
|
||||
fclose(fd);
|
||||
}
|
||||
|
||||
static bool test_as_unpriv(struct bpf_test *test)
|
||||
{
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
|
26
tools/testing/selftests/bpf/unpriv_helpers.c
Normal file
26
tools/testing/selftests/bpf/unpriv_helpers.c
Normal file
@ -0,0 +1,26 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
#include <error.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "unpriv_helpers.h"
|
||||
|
||||
bool get_unpriv_disabled(void)
|
||||
{
|
||||
bool disabled;
|
||||
char buf[2];
|
||||
FILE *fd;
|
||||
|
||||
fd = fopen("/proc/sys/" UNPRIV_SYSCTL, "r");
|
||||
if (fd) {
|
||||
disabled = (fgets(buf, 2, fd) == buf && atoi(buf));
|
||||
fclose(fd);
|
||||
} else {
|
||||
perror("fopen /proc/sys/" UNPRIV_SYSCTL);
|
||||
disabled = true;
|
||||
}
|
||||
|
||||
return disabled;
|
||||
}
|
7
tools/testing/selftests/bpf/unpriv_helpers.h
Normal file
7
tools/testing/selftests/bpf/unpriv_helpers.h
Normal file
@ -0,0 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
|
||||
|
||||
bool get_unpriv_disabled(void);
|
@ -1,68 +0,0 @@
|
||||
{
|
||||
"invalid and of negative number",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R0 max value is outside of the allowed memory range",
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"invalid range check",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_9, 1),
|
||||
BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
|
||||
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
|
||||
BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
|
||||
BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
|
||||
BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_3, 1),
|
||||
BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
|
||||
BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
|
||||
BPF_MOV64_REG(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R0 max value is outside of the allowed memory range",
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"check known subreg with unknown reg",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 32),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xFFFF1234),
|
||||
/* Upper bits are unknown but AND above masks out 1 zero'ing lower bits */
|
||||
BPF_JMP32_IMM(BPF_JLT, BPF_REG_0, 1, 1),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 512),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 !read_ok",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.retval = 0
|
||||
},
|
@ -1,379 +0,0 @@
|
||||
{
|
||||
"valid map access into an array with a constant",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"valid map access into an array with a register",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 4),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"valid map access into an array with a variable",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"valid map access into an array with a signed variable",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_JMP32_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
|
||||
BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"invalid map access into an array with a constant",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=48 size=8",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"invalid map access into an array with a register",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R0 min value is outside of the allowed memory range",
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"invalid map access into an array with a variable",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R0 unbounded memory access, make sure to bounds check any such access",
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"invalid map access into an array with no floor check",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
|
||||
BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.errstr = "R0 unbounded memory access",
|
||||
.result_unpriv = REJECT,
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"invalid map access into an array with a invalid max check",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.errstr = "invalid access to map value, value_size=48 off=44 size=8",
|
||||
.result_unpriv = REJECT,
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"invalid map access into an array with a invalid max check",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
|
||||
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3, 11 },
|
||||
.errstr = "R0 pointer += pointer",
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"valid read map access into a read-only array 1",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_ro = { 3 },
|
||||
.result = ACCEPT,
|
||||
.retval = 28,
|
||||
},
|
||||
{
|
||||
"valid read map access into a read-only array 2",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_csum_diff),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.fixup_map_array_ro = { 3 },
|
||||
.result = ACCEPT,
|
||||
.retval = 65507,
|
||||
},
|
||||
{
|
||||
"invalid write map access into a read-only array 1",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_ro = { 3 },
|
||||
.result = REJECT,
|
||||
.errstr = "write into map forbidden",
|
||||
},
|
||||
{
|
||||
"invalid write map access into a read-only array 2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_skb_load_bytes),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.fixup_map_array_ro = { 4 },
|
||||
.result = REJECT,
|
||||
.errstr = "write into map forbidden",
|
||||
},
|
||||
{
|
||||
"valid write map access into a write-only array 1",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_wo = { 3 },
|
||||
.result = ACCEPT,
|
||||
.retval = 1,
|
||||
},
|
||||
{
|
||||
"valid write map access into a write-only array 2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_skb_load_bytes),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.fixup_map_array_wo = { 4 },
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"invalid read map access into a write-only array 1",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_wo = { 3 },
|
||||
.result = REJECT,
|
||||
.errstr = "read from map forbidden",
|
||||
},
|
||||
{
|
||||
"invalid read map access into a write-only array 2",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_csum_diff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.fixup_map_array_wo = { 3 },
|
||||
.result = REJECT,
|
||||
.errstr = "read from map forbidden",
|
||||
},
|
@ -1,64 +0,0 @@
|
||||
{
|
||||
"stack out of bounds",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid write to stack",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"uninitialized stack1",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 2 },
|
||||
.errstr = "invalid indirect read from stack",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"uninitialized stack2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid read from stack",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"invalid fp arithmetic",
|
||||
/* If this gets ever changed, make sure JITs can deal with it. */
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R1 subtraction from stack pointer",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"non-invalid fp arithmetic",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"misaligned read from stack",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "misaligned stack access",
|
||||
.result = REJECT,
|
||||
},
|
@ -1,136 +0,0 @@
|
||||
{
|
||||
"check deducing bounds from const, 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
|
||||
.errstr = "R0 tried to subtract pointer from scalar",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.retval = 1,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 3",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
|
||||
.errstr = "R0 tried to subtract pointer from scalar",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 4",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R6 has pointer with unsupported alu operation",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 5",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
|
||||
.errstr = "R0 tried to subtract pointer from scalar",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 6",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
|
||||
.errstr = "R0 tried to subtract pointer from scalar",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 7",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, ~0),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
|
||||
.errstr = "dereference of modified ctx ptr",
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 8",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, ~0),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
|
||||
.errstr = "negative offset ctx ptr R1 off=-1 disallowed",
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 9",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
|
||||
.errstr = "R0 tried to subtract pointer from scalar",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 10",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
|
||||
/* Marks reg as unknown. */
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
|
||||
.result = REJECT,
|
||||
},
|
@ -1,411 +0,0 @@
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, positive bounds",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 2),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 2",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
|
||||
BPF_MOV64_IMM(BPF_REG_8, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 3",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
|
||||
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 4",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 5",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 6",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_6, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R4 min value is negative, either use unsigned",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 7",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 8",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 9",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 10",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 11",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
|
||||
/* Dead branch. */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 12",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -6),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 13",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 2),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_7, 1),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 14",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_8, 2),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
|
||||
BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
|
||||
},
|
||||
.fixup_map_hash_8b = { 6 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 15",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -6),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
@ -1,73 +0,0 @@
|
||||
{
|
||||
"unreachable",
|
||||
.insns = {
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unreachable",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"unreachable2",
|
||||
.insns = {
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unreachable",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"out of range jump",
|
||||
.insns = {
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "jump out of range",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"out of range jump2",
|
||||
.insns = {
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, -2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "jump out of range",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"loop (back-edge)",
|
||||
.insns = {
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, -1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unreachable insn 1",
|
||||
.errstr_unpriv = "back-edge",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"loop2 (back-edge)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, -4),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unreachable insn 4",
|
||||
.errstr_unpriv = "back-edge",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"conditional loop",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "infinite loop detected",
|
||||
.errstr_unpriv = "back-edge",
|
||||
.result = REJECT,
|
||||
},
|
@ -1,72 +0,0 @@
|
||||
{
|
||||
"bpf_exit with invalid return code. test1",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 has value (0x0; 0xffffffff)",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test2",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test3",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 has value (0x0; 0x3)",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test4",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test5",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 has value (0x2; 0x0)",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test6",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 is not a known value (ctx)",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test7",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
|
||||
BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 has unknown scalar value",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
@ -1,197 +0,0 @@
|
||||
{
|
||||
"direct packet read test#1 for CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, len)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, pkt_type)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, queue_mapping)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, protocol)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, vlan_present)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "invalid bpf_context access off=76 size=4",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"direct packet read test#2 for CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, vlan_tci)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, vlan_proto)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, priority)),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
|
||||
offsetof(struct __sk_buff, priority)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, ingress_ifindex)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, tc_index)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, hash)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"direct packet read test#3 for CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, cb[1])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, cb[2])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, cb[3])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, cb[4])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, napi_id)),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
|
||||
offsetof(struct __sk_buff, cb[1])),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
|
||||
offsetof(struct __sk_buff, cb[2])),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
|
||||
offsetof(struct __sk_buff, cb[3])),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
|
||||
offsetof(struct __sk_buff, cb[4])),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"direct packet read test#4 for CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, family)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, remote_ip4)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, local_ip4)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, remote_ip6[0])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, remote_ip6[1])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, remote_ip6[2])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, remote_ip6[3])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, local_ip6[0])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, local_ip6[1])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, local_ip6[2])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, local_ip6[3])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, remote_port)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, local_port)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid access of tc_classid for CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, tc_classid)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid bpf_context access",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid access of data_meta for CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_meta)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid bpf_context access",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid access of flow_keys for CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, flow_keys)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid bpf_context access",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid write access to napi_id for CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, napi_id)),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
|
||||
offsetof(struct __sk_buff, napi_id)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid bpf_context access",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"write tstamp from CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, tstamp)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "invalid bpf_context access off=152 size=8",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"read tstamp from CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, tstamp)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
@ -1,220 +0,0 @@
|
||||
{
|
||||
"valid cgroup storage access",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_cgroup_storage = { 1 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid cgroup storage access 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid cgroup storage access 2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 1),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "fd 1 is not pointing to valid bpf_map",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid cgroup storage access 3",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_cgroup_storage = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to map value, value_size=64 off=256 size=4",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid cgroup storage access 4",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_cgroup_storage = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"invalid cgroup storage access 5",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 7),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_cgroup_storage = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "get_local_storage() doesn't support non-zero flags",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid cgroup storage access 6",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_cgroup_storage = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "get_local_storage() doesn't support non-zero flags",
|
||||
.errstr_unpriv = "R2 leaks addr into helper function",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"valid per-cpu cgroup storage access",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_percpu_cgroup_storage = { 1 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid per-cpu cgroup storage access 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid per-cpu cgroup storage access 2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 1),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "fd 1 is not pointing to valid bpf_map",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid per-cpu cgroup storage access 3",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_percpu_cgroup_storage = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to map value, value_size=64 off=256 size=4",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid per-cpu cgroup storage access 4",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_cgroup_storage = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"invalid per-cpu cgroup storage access 5",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 7),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_percpu_cgroup_storage = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "get_local_storage() doesn't support non-zero flags",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"invalid per-cpu cgroup storage access 6",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_percpu_cgroup_storage = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "get_local_storage() doesn't support non-zero flags",
|
||||
.errstr_unpriv = "R2 leaks addr into helper function",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
@ -1,60 +0,0 @@
|
||||
{
|
||||
"constant register |= constant should keep constant type",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 34),
|
||||
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"constant register |= constant should not bypass stack boundary checks",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 34),
|
||||
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid indirect access to stack R1 off=-48 size=58",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"constant register |= constant register should keep constant type",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 34),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 13),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"constant register |= constant register should not bypass stack boundary checks",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 34),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 24),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid indirect access to stack R1 off=-48 size=58",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
@ -1,181 +0,0 @@
|
||||
{
|
||||
"valid access family in SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, family)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
},
|
||||
{
|
||||
"valid access remote_ip4 in SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, remote_ip4)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
},
|
||||
{
|
||||
"valid access local_ip4 in SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, local_ip4)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
},
|
||||
{
|
||||
"valid access remote_port in SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, remote_port)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
},
|
||||
{
|
||||
"valid access local_port in SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, local_port)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
},
|
||||
{
|
||||
"valid access remote_ip6 in SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, remote_ip6[0])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, remote_ip6[1])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, remote_ip6[2])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, remote_ip6[3])),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
||||
},
|
||||
{
|
||||
"valid access local_ip6 in SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, local_ip6[0])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, local_ip6[1])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, local_ip6[2])),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, local_ip6[3])),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
||||
},
|
||||
{
|
||||
"valid access size in SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, size)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
},
|
||||
{
|
||||
"invalid 64B read of size in SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, size)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid bpf_context access",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"invalid read past end of SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, size) + 4),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid bpf_context access",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
},
|
||||
{
|
||||
"invalid read offset in SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, family) + 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid bpf_context access",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"direct packet read for SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, data)),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
},
|
||||
{
|
||||
"direct packet write for SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, data)),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
},
|
||||
{
|
||||
"overlapping checks for direct packet access SK_MSG",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, data)),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct sk_msg_md, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SK_MSG,
|
||||
},
|
@ -1,40 +0,0 @@
|
||||
{
|
||||
"direct stack access with 32-bit wraparound. test1",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "fp pointer and 2147483647",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"direct stack access with 32-bit wraparound. test2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "fp pointer and 1073741823",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"direct stack access with 32-bit wraparound. test3",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "fp pointer offset 1073741822",
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
.result = REJECT
|
||||
},
|
@ -1,184 +0,0 @@
|
||||
{
|
||||
"DIV32 by 0, zero check 1",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 42),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1),
|
||||
BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"DIV32 by 0, zero check 2",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 42),
|
||||
BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1),
|
||||
BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"DIV64 by 0, zero check",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 42),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1),
|
||||
BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"MOD32 by 0, zero check 1",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 42),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1),
|
||||
BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"MOD32 by 0, zero check 2",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 42),
|
||||
BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1),
|
||||
BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"MOD64 by 0, zero check",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 42),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1),
|
||||
BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"DIV32 by 0, zero check ok, cls",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 42),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 2),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 16),
|
||||
BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 8,
|
||||
},
|
||||
{
|
||||
"DIV32 by 0, zero check 1, cls",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"DIV32 by 0, zero check 2, cls",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"DIV64 by 0, zero check, cls",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"MOD32 by 0, zero check ok, cls",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 42),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 3),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 5),
|
||||
BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 2,
|
||||
},
|
||||
{
|
||||
"MOD32 by 0, zero check 1, cls",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 1,
|
||||
},
|
||||
{
|
||||
"MOD32 by 0, zero check 2, cls",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 1,
|
||||
},
|
||||
{
|
||||
"MOD64 by 0, zero check 1, cls",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 2),
|
||||
BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 2,
|
||||
},
|
||||
{
|
||||
"MOD64 by 0, zero check 2, cls",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_0, -1),
|
||||
BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = -1,
|
||||
},
|
@ -1,110 +0,0 @@
|
||||
/* Just make sure that JITs used udiv/umod as otherwise we get
|
||||
* an exception from INT_MIN/-1 overflow similarly as with div
|
||||
* by zero.
|
||||
*/
|
||||
{
|
||||
"DIV32 overflow, check 1",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
|
||||
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"DIV32 overflow, check 2",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
|
||||
BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"DIV64 overflow, check 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, -1),
|
||||
BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
|
||||
BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"DIV64 overflow, check 2",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_1, LLONG_MIN),
|
||||
BPF_ALU64_IMM(BPF_DIV, BPF_REG_1, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"MOD32 overflow, check 1",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
|
||||
BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = INT_MIN,
|
||||
},
|
||||
{
|
||||
"MOD32 overflow, check 2",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
|
||||
BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = INT_MIN,
|
||||
},
|
||||
{
|
||||
"MOD64 overflow, check 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, -1),
|
||||
BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
|
||||
BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 1,
|
||||
},
|
||||
{
|
||||
"MOD64 overflow, check 2",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 1,
|
||||
},
|
@ -1,650 +0,0 @@
|
||||
{
|
||||
"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, bitwise AND, zero included",
|
||||
.insns = {
|
||||
/* set max stack size */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
|
||||
/* set r3 to a random value */
|
||||
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
|
||||
/* use bitwise AND to limit r3 range to [0, 64] */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 64),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
/* Call bpf_ringbuf_output(), it is one of a few helper functions with
|
||||
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
|
||||
* For unpriv this should signal an error, because memory at &fp[-64] is
|
||||
* not initialized.
|
||||
*/
|
||||
BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_ringbuf = { 4 },
|
||||
.errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
|
||||
.result_unpriv = REJECT,
|
||||
/* in privileged mode reads from uninitialized stack locations are permitted */
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid indirect access to stack R1 off=-64 size=65",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP, correct bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP (signed), correct bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP, bounds + offset",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid indirect access to stack R1 off=-64 size=65",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP, wrong max",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid indirect access to stack R1 off=-64 size=65",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP, no max check",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
/* because max wasn't checked, signed min is negative */
|
||||
.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP, no min check",
|
||||
.insns = {
|
||||
/* set max stack size */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
|
||||
/* set r3 to a random value */
|
||||
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
|
||||
/* use JMP to limit r3 range to [0, 64] */
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 64, 6),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
/* Call bpf_ringbuf_output(), it is one of a few helper functions with
|
||||
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
|
||||
* For unpriv this should signal an error, because memory at &fp[-64] is
|
||||
* not initialized.
|
||||
*/
|
||||
BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_ringbuf = { 4 },
|
||||
.errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
|
||||
.result_unpriv = REJECT,
|
||||
/* in privileged mode reads from uninitialized stack locations are permitted */
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP (signed), no min check",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R2 min value is negative",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: map, JMP, correct bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val), 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: map, JMP, wrong max",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 4 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=0 size=49",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: map adjusted, JMP, correct bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
|
||||
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 20, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: map adjusted, JMP, wrong max",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 4 },
|
||||
.errstr = "R1 min value is outside of the allowed memory range",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R1 type=scalar expected=fp",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.retval = 0 /* csum_diff of 64-byte packet */,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R1 type=scalar expected=fp",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R1 type=scalar expected=fp",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: 8 bytes leak",
|
||||
.insns = {
|
||||
/* set max stack size */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
|
||||
/* set r3 to a random value */
|
||||
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
|
||||
/* Note: fp[-32] left uninitialized */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
/* Limit r3 range to [1, 64] */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 63),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
/* Call bpf_ringbuf_output(), it is one of a few helper functions with
|
||||
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
|
||||
* For unpriv this should signal an error, because memory region [1, 64]
|
||||
* at &fp[-64] is not fully initialized.
|
||||
*/
|
||||
BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_ringbuf = { 3 },
|
||||
.errstr_unpriv = "invalid indirect read from stack R2 off -64+32 size 64",
|
||||
.result_unpriv = REJECT,
|
||||
/* in privileged mode reads from uninitialized stack locations are permitted */
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: 8 bytes no leak (init memory)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
@ -1,460 +0,0 @@
|
||||
{
|
||||
"helper access to packet: test1, valid packet_ptr range",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.result_unpriv = ACCEPT,
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test2, unchecked packet_ptr",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test3, variable add",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
|
||||
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 11 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test4, packet_ptr with bad range",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 7 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test5, packet_ptr with too short range",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 6 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test6, cls valid packet_ptr range",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test7, cls unchecked packet_ptr",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test8, cls variable add",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
|
||||
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 11 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test9, cls packet_ptr with bad range",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 7 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test10, cls packet_ptr with too short range",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 6 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test11, cls unsuitable helper 1",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 42),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "helper access to the packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test12, cls unsuitable helper 2",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 4),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "helper access to the packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test13, cls helper ok",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test14, cls helper ok sub",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test15, cls helper fail sub",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test16, cls helper fail range 1",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test17, cls helper fail range 2",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -9),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R2 min value is negative",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test18, cls helper fail range 3",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, ~0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R2 min value is negative",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test19, cls helper range zero",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test20, pkt end as input",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R1 type=pkt_end expected=fp",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to packet: test21, wrong reg",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
@ -1,196 +0,0 @@
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_KPROBE,
|
||||
},
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
|
||||
},
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_KPROBE,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_KPROBE,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
|
||||
},
|
@ -1,953 +0,0 @@
|
||||
{
|
||||
"helper access to map: full range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: partial range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: empty range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=0 size=0",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: out-of-bound range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=0 size=56",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: negative range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R2 min value is negative",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): full range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) - offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): partial range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): empty range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=4 size=0",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): out-of-bound range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=4 size=52",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): negative range (> adjustment)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R2 min value is negative",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): negative range (< adjustment)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R2 min value is negative",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): full range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) - offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): partial range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): empty range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R1 min value is outside of the allowed memory range",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): out-of-bound range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) -
|
||||
offsetof(struct test_val, foo) + 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=4 size=52",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): negative range (> adjustment)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R2 min value is negative",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): negative range (< adjustment)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R2 min value is negative",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via variable): full range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) - offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via variable): partial range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via variable): empty range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R1 min value is outside of the allowed memory range",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via variable): no max check",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R1 unbounded memory access",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via variable): wrong max check",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) -
|
||||
offsetof(struct test_val, foo) + 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=4 size=45",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: bounds check using <, good access",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: bounds check using <, bad access",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = REJECT,
|
||||
.errstr = "R1 unbounded memory access",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: bounds check using <=, good access",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: bounds check using <=, bad access",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = REJECT,
|
||||
.errstr = "R1 unbounded memory access",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: bounds check using s<, good access",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: bounds check using s<, good access 2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: bounds check using s<, bad access",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = REJECT,
|
||||
.errstr = "R1 min value is negative",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: bounds check using s<=, good access",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: bounds check using s<=, good access 2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: bounds check using s<=, bad access",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result = REJECT,
|
||||
.errstr = "R1 min value is negative",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map lookup helper access to map",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 8 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map update helper access to map",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 10 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map update helper access to map: wrong size",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.fixup_map_hash_16b = { 10 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to map value, value_size=8 off=0 size=16",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map helper access to adjusted map (via const imm)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, offsetof(struct other_val, bar)),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 9 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map helper access to adjusted map (via const imm): out-of-bound 1",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, sizeof(struct other_val) - 4),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 9 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to map value, value_size=16 off=12 size=8",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map helper access to adjusted map (via const imm): out-of-bound 2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 9 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map helper access to adjusted map (via const reg)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, offsetof(struct other_val, bar)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 10 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map helper access to adjusted map (via const reg): out-of-bound 1",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct other_val) - 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 10 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to map value, value_size=16 off=12 size=8",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map helper access to adjusted map (via const reg): out-of-bound 2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, -4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 10 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map helper access to adjusted map (via variable)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar), 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 11 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map helper access to adjusted map (via variable): no max check",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 10 },
|
||||
.result = REJECT,
|
||||
.errstr = "R2 unbounded memory access, make sure to bounds check any such access",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map helper access to adjusted map (via variable): wrong max check",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar) + 1, 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 3, 11 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to map value, value_size=16 off=9 size=8",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
@ -1,161 +0,0 @@
|
||||
{
|
||||
"ARG_PTR_TO_LONG uninitialized",
|
||||
.insns = {
|
||||
/* bpf_strtoul arg1 (buf) */
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
||||
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
|
||||
/* bpf_strtoul arg2 (buf_len) */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
|
||||
/* bpf_strtoul arg3 (flags) */
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
|
||||
/* bpf_strtoul arg4 (res) */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
|
||||
|
||||
/* bpf_strtoul() */
|
||||
BPF_EMIT_CALL(BPF_FUNC_strtoul),
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
|
||||
.errstr = "invalid indirect read from stack R4 off -16+0 size 8",
|
||||
},
|
||||
{
|
||||
"ARG_PTR_TO_LONG half-uninitialized",
|
||||
.insns = {
|
||||
/* bpf_strtoul arg1 (buf) */
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
||||
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
|
||||
/* bpf_strtoul arg2 (buf_len) */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
|
||||
/* bpf_strtoul arg3 (flags) */
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
|
||||
/* bpf_strtoul arg4 (res) */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
|
||||
|
||||
/* bpf_strtoul() */
|
||||
BPF_EMIT_CALL(BPF_FUNC_strtoul),
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "invalid indirect read from stack R4 off -16+4 size 8",
|
||||
/* in privileged mode reads from uninitialized stack locations are permitted */
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"ARG_PTR_TO_LONG misaligned",
|
||||
.insns = {
|
||||
/* bpf_strtoul arg1 (buf) */
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
||||
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
|
||||
/* bpf_strtoul arg2 (buf_len) */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
|
||||
/* bpf_strtoul arg3 (flags) */
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
|
||||
/* bpf_strtoul arg4 (res) */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -12),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
|
||||
|
||||
/* bpf_strtoul() */
|
||||
BPF_EMIT_CALL(BPF_FUNC_strtoul),
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
|
||||
.errstr = "misaligned stack access off (0x0; 0x0)+-20+0 size 8",
|
||||
},
|
||||
{
|
||||
"ARG_PTR_TO_LONG size < sizeof(long)",
|
||||
.insns = {
|
||||
/* bpf_strtoul arg1 (buf) */
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
||||
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
|
||||
/* bpf_strtoul arg2 (buf_len) */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
|
||||
/* bpf_strtoul arg3 (flags) */
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
|
||||
/* bpf_strtoul arg4 (res) */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 12),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
|
||||
|
||||
/* bpf_strtoul() */
|
||||
BPF_EMIT_CALL(BPF_FUNC_strtoul),
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
|
||||
.errstr = "invalid indirect access to stack R4 off=-4 size=8",
|
||||
},
|
||||
{
|
||||
"ARG_PTR_TO_LONG initialized",
|
||||
.insns = {
|
||||
/* bpf_strtoul arg1 (buf) */
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
||||
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
|
||||
/* bpf_strtoul arg2 (buf_len) */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
|
||||
/* bpf_strtoul arg3 (flags) */
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
|
||||
/* bpf_strtoul arg4 (res) */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
|
||||
|
||||
/* bpf_strtoul() */
|
||||
BPF_EMIT_CALL(BPF_FUNC_strtoul),
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
|
||||
},
|
@ -1,72 +0,0 @@
|
||||
{
|
||||
"ld_ind: check calling conv, r1",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R1 !read_ok",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"ld_ind: check calling conv, r2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1),
|
||||
BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R2 !read_ok",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"ld_ind: check calling conv, r3",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R3 !read_ok",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"ld_ind: check calling conv, r4",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 1),
|
||||
BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R4 !read_ok",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"ld_ind: check calling conv, r5",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 1),
|
||||
BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R5 !read_ok",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"ld_ind: check calling conv, r7",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_MOV64_IMM(BPF_REG_7, 1),
|
||||
BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 1,
|
||||
},
|
@ -1,67 +0,0 @@
|
||||
{
|
||||
"leak pointer into ctx 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_2,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 2 },
|
||||
.errstr_unpriv = "R2 leaks addr into mem",
|
||||
.result_unpriv = REJECT,
|
||||
.result = REJECT,
|
||||
.errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
|
||||
},
|
||||
{
|
||||
"leak pointer into ctx 2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_10,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R10 leaks addr into mem",
|
||||
.result_unpriv = REJECT,
|
||||
.result = REJECT,
|
||||
.errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
|
||||
},
|
||||
{
|
||||
"leak pointer into ctx 3",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
|
||||
offsetof(struct __sk_buff, cb[0])),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 1 },
|
||||
.errstr_unpriv = "R2 leaks addr into ctx",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"leak pointer into map val",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 4 },
|
||||
.errstr_unpriv = "R6 leaks addr into mem",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
},
|
@ -1,99 +0,0 @@
|
||||
{
|
||||
"bpf_map_ptr: read with negative offset rejected",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 1 },
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN",
|
||||
.result = REJECT,
|
||||
.errstr = "R1 is bpf_array invalid negative access: off=-8",
|
||||
},
|
||||
{
|
||||
"bpf_map_ptr: write rejected",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 3 },
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN",
|
||||
.result = REJECT,
|
||||
.errstr = "only read from bpf_array is supported",
|
||||
},
|
||||
{
|
||||
"bpf_map_ptr: read non-existent field rejected",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_6, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 1 },
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN",
|
||||
.result = REJECT,
|
||||
.errstr = "cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4",
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"bpf_map_ptr: read ops field accepted",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_6, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 1 },
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN",
|
||||
.result = ACCEPT,
|
||||
.retval = 1,
|
||||
},
|
||||
{
|
||||
"bpf_map_ptr: r = 0, map_ptr = map_ptr + r",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 4 },
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"bpf_map_ptr: r = 0, r = r + map_ptr",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_0, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_16b = { 4 },
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R0 has pointer with unsupported alu operation",
|
||||
.result = ACCEPT,
|
||||
},
|
@ -1,65 +0,0 @@
|
||||
{
|
||||
"invalid map_fd for function call",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "fd 0 is not pointing to valid bpf_map",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"don't check return value before access",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.errstr = "R0 invalid mem access 'map_value_or_null'",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"access memory with incorrect alignment",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.errstr = "misaligned value access",
|
||||
.result = REJECT,
|
||||
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
||||
},
|
||||
{
|
||||
"sometimes access memory with incorrect alignment",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.errstr = "R0 invalid mem access",
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result = REJECT,
|
||||
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
||||
},
|
@ -1,322 +0,0 @@
|
||||
{
|
||||
"masking, test out of bounds 1",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 5),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 2",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 3",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 4",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 5",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 6",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 7",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 5),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 8",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 9",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 10",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 11",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test out of bounds 12",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test in bounds 1",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 4),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 4,
|
||||
},
|
||||
{
|
||||
"masking, test in bounds 2",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test in bounds 3",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0xfffffffe,
|
||||
},
|
||||
{
|
||||
"masking, test in bounds 4",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0xabcde,
|
||||
},
|
||||
{
|
||||
"masking, test in bounds 5",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"masking, test in bounds 6",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_1, 46),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 46,
|
||||
},
|
||||
{
|
||||
"masking, test in bounds 7",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_3, -46),
|
||||
BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 46,
|
||||
},
|
||||
{
|
||||
"masking, test in bounds 8",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_3, -47),
|
||||
BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
|
||||
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
@ -1,235 +0,0 @@
|
||||
{
|
||||
"meta access, test1",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test2",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet, off=-8",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test3",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test4",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_end)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test5",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_meta),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R3 !read_ok",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test6",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test7",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test8",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test9",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test10",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_end)),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 42),
|
||||
BPF_MOV64_IMM(BPF_REG_6, 24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
|
||||
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test11",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 42),
|
||||
BPF_MOV64_IMM(BPF_REG_6, 24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
|
||||
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"meta access, test12",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_meta)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
|
||||
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
@ -1,305 +0,0 @@
|
||||
{
|
||||
"raw_stack: no skb_load_bytes",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
/* Call to skb_load_bytes() omitted. */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid read from stack R6 off=-8 size=8",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, negative len",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, -8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R4 min value is negative",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, negative len 2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, ~0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R4 min value is negative",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, zero len",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid zero-sized read",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, no init",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, init",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, spilled regs around bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
|
||||
offsetof(struct __sk_buff, priority)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, spilled regs corruption",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R0 invalid mem access 'scalar'",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, spilled regs corruption 2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
|
||||
offsetof(struct __sk_buff, priority)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
|
||||
offsetof(struct __sk_buff, pkt_type)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R3 invalid mem access 'scalar'",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, spilled regs + data",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
|
||||
offsetof(struct __sk_buff, priority)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, invalid access 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid indirect access to stack R3 off=-513 size=8",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, invalid access 2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid indirect access to stack R3 off=-1 size=8",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, invalid access 3",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R4 min value is negative",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, invalid access 4",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, invalid access 5",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, invalid access 6",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid zero-sized read",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"raw_stack: skb_load_bytes, large access",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 4),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 512),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
@ -1,35 +0,0 @@
|
||||
{
|
||||
"raw_tracepoint_writable: reject variable offset",
|
||||
.insns = {
|
||||
/* r6 is our tp buffer */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
|
||||
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
/* move the key (== 0) to r10-8 */
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
|
||||
/* lookup in the map */
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
|
||||
/* exit clean if null */
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
|
||||
/* shift the buffer pointer to a variable location */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_0),
|
||||
/* clobber whatever's there */
|
||||
BPF_MOV64_IMM(BPF_REG_7, 4242),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, 0),
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 1, },
|
||||
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
|
||||
.errstr = "R6 invalid variable buffer offset: off=0, var_off=(0x0; 0xffffffff)",
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
@ -1,95 +0,0 @@
|
||||
{
|
||||
"ringbuf: invalid reservation offset 1",
|
||||
.insns = {
|
||||
/* reserve 8 byte ringbuf memory */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
||||
/* store a pointer to the reserved memory in R6 */
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
/* check whether the reservation was successful */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
/* spill R6(mem) into the stack */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
||||
/* fill it back in R7 */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
|
||||
/* should be able to access *(R7) = 0 */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
|
||||
/* submit the reserved ringbuf memory */
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
/* add invalid offset to reserved ringbuf memory */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xcafe),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_ringbuf = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "R1 must have zero offset when passed to release func",
|
||||
},
|
||||
{
|
||||
"ringbuf: invalid reservation offset 2",
|
||||
.insns = {
|
||||
/* reserve 8 byte ringbuf memory */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
||||
/* store a pointer to the reserved memory in R6 */
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
/* check whether the reservation was successful */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
/* spill R6(mem) into the stack */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
||||
/* fill it back in R7 */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
|
||||
/* add invalid offset to reserved ringbuf memory */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 0xcafe),
|
||||
/* should be able to access *(R7) = 0 */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
|
||||
/* submit the reserved ringbuf memory */
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_ringbuf = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "R7 min value is outside of the allowed memory range",
|
||||
},
|
||||
{
|
||||
"ringbuf: check passing rb mem to helpers",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
/* reserve 8 byte ringbuf memory */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
|
||||
/* check whether the reservation was successful */
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
/* pass allocated ring buffer memory to fib lookup */
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_fib_lookup),
|
||||
/* submit the ringbuf memory */
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_ringbuf = { 2 },
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.result = ACCEPT,
|
||||
},
|
@ -1,345 +0,0 @@
|
||||
{
|
||||
"check valid spill/fill",
|
||||
.insns = {
|
||||
/* spill R1(ctx) into stack */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
/* fill it back into R2 */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
|
||||
/* should be able to access R0 = *(R2 + 8) */
|
||||
/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.retval = POINTER_VALUE,
|
||||
},
|
||||
{
|
||||
"check valid spill/fill, skb mark",
|
||||
.insns = {
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = ACCEPT,
|
||||
},
|
||||
{
|
||||
"check valid spill/fill, ptr to mem",
|
||||
.insns = {
|
||||
/* reserve 8 byte ringbuf memory */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
||||
/* store a pointer to the reserved memory in R6 */
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
/* check whether the reservation was successful */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
/* spill R6(mem) into the stack */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
||||
/* fill it back in R7 */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
|
||||
/* should be able to access *(R7) = 0 */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
|
||||
/* submit the reserved ringbuf memory */
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_ringbuf = { 1 },
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = ACCEPT,
|
||||
},
|
||||
{
|
||||
"check with invalid reg offset 0",
|
||||
.insns = {
|
||||
/* reserve 8 byte ringbuf memory */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
||||
/* store a pointer to the reserved memory in R6 */
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
/* add invalid offset to memory or NULL */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
|
||||
/* check whether the reservation was successful */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
/* should not be able to access *(R7) = 0 */
|
||||
BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0),
|
||||
/* submit the reserved ringbuf memory */
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_ringbuf = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "R0 pointer arithmetic on ringbuf_mem_or_null prohibited",
|
||||
},
|
||||
{
|
||||
"check corrupted spill/fill",
|
||||
.insns = {
|
||||
/* spill R1(ctx) into stack */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
/* mess up with R1 pointer on stack */
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
|
||||
/* fill back into R0 is fine for priv.
|
||||
* R0 now becomes SCALAR_VALUE.
|
||||
*/
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
||||
/* Load from R0 should fail. */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "attempt to corrupt spilled",
|
||||
.errstr = "R0 invalid mem access 'scalar'",
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"check corrupted spill/fill, LSB",
|
||||
.insns = {
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "attempt to corrupt spilled",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.retval = POINTER_VALUE,
|
||||
},
|
||||
{
|
||||
"check corrupted spill/fill, MSB",
|
||||
.insns = {
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "attempt to corrupt spilled",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.retval = POINTER_VALUE,
|
||||
},
|
||||
{
|
||||
"Spill and refill a u32 const scalar. Offset to skb->data",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
/* r4 = 20 */
|
||||
BPF_MOV32_IMM(BPF_REG_4, 20),
|
||||
/* *(u32 *)(r10 -8) = r4 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
||||
/* r4 = *(u32 *)(r10 -8) */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
|
||||
/* r0 = r2 */
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
||||
/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"Spill a u32 const, refill from another half of the uninit u32 from the stack",
|
||||
.insns = {
|
||||
/* r4 = 20 */
|
||||
BPF_MOV32_IMM(BPF_REG_4, 20),
|
||||
/* *(u32 *)(r10 -8) = r4 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
||||
/* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "invalid read from stack off -4+0 size 4",
|
||||
/* in privileged mode reads from uninitialized stack locations are permitted */
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"Spill a u32 const scalar. Refill as u16. Offset to skb->data",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
/* r4 = 20 */
|
||||
BPF_MOV32_IMM(BPF_REG_4, 20),
|
||||
/* *(u32 *)(r10 -8) = r4 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
||||
/* r4 = *(u16 *)(r10 -8) */
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
|
||||
/* r0 = r2 */
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
||||
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"Spill u32 const scalars. Refill as u64. Offset to skb->data",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
/* r6 = 0 */
|
||||
BPF_MOV32_IMM(BPF_REG_6, 0),
|
||||
/* r7 = 20 */
|
||||
BPF_MOV32_IMM(BPF_REG_7, 20),
|
||||
/* *(u32 *)(r10 -4) = r6 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
|
||||
/* *(u32 *)(r10 -8) = r7 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
|
||||
/* r4 = *(u64 *)(r10 -8) */
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
|
||||
/* r0 = r2 */
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
||||
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
/* r4 = 20 */
|
||||
BPF_MOV32_IMM(BPF_REG_4, 20),
|
||||
/* *(u32 *)(r10 -8) = r4 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
||||
/* r4 = *(u16 *)(r10 -6) */
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
|
||||
/* r0 = r2 */
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
||||
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
/* r4 = 20 */
|
||||
BPF_MOV32_IMM(BPF_REG_4, 20),
|
||||
/* *(u32 *)(r10 -8) = r4 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
||||
/* *(u32 *)(r10 -4) = r4 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
|
||||
/* r4 = *(u32 *)(r10 -4), */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
|
||||
/* r0 = r2 */
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
||||
/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"Spill and refill a umax=40 bounded scalar. Offset to skb->data",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, tstamp)),
|
||||
BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
/* *(u32 *)(r10 -8) = r4 R4=umax=40 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
||||
/* r4 = (*u32 *)(r10 - 8) */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
|
||||
/* r2 += r4 R2=pkt R4=umax=40 */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
|
||||
/* r0 = r2 R2=pkt,umax=40 R4=umax=40 */
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
|
||||
/* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1),
|
||||
/* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"Spill a u32 scalar at fp-4 and then at fp-8",
|
||||
.insns = {
|
||||
/* r4 = 4321 */
|
||||
BPF_MOV32_IMM(BPF_REG_4, 4321),
|
||||
/* *(u32 *)(r10 -4) = r4 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
|
||||
/* *(u32 *)(r10 -8) = r4 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
||||
/* r4 = *(u64 *)(r10 -8) */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
@ -1,359 +0,0 @@
|
||||
{
|
||||
"PTR_TO_STACK store/load",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 0xfaceb00c,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK store/load - bad alignment on off",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK store/load - bad alignment on reg",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK store/load - out of bounds low",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid write to stack R1 off=-79992 size=8",
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK store/load - out of bounds high",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid write to stack R1 off=0 size=8",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check high 1",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check high 2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check high 3",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check high 4",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
.errstr = "invalid write to stack R1 off=0 size=1",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check high 5",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
.errstr = "invalid write to stack R1",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check high 6",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
.errstr = "invalid write to stack",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check high 7",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
.errstr = "fp pointer offset",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check low 1",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check low 2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check low 3",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
.errstr = "invalid write to stack R1 off=-513 size=1",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check low 4",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "math between fp pointer",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check low 5",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
.errstr = "invalid write to stack",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check low 6",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid write to stack",
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK check low 7",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
||||
.errstr = "fp pointer offset",
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK mixed reg/k, 1",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK mixed reg/k, 2",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK mixed reg/k, 3",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = -3,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK reg",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -3),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"stack pointer arithmetic",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 4),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
|
||||
BPF_ST_MEM(0, BPF_REG_2, 4, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
|
||||
BPF_ST_MEM(0, BPF_REG_2, 4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"store PTR_TO_STACK in R10 to array map using BPF_B",
|
||||
.insns = {
|
||||
/* Load pointer to map. */
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
/* Copy R10 to R9. */
|
||||
BPF_MOV64_REG(BPF_REG_9, BPF_REG_10),
|
||||
/* Pollute other registers with unaligned values. */
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_4, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_5, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_6, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_7, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_8, -1),
|
||||
/* Store both R9 and R10 with BPF_B and read back. */
|
||||
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, 0),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_1, 0),
|
||||
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_9, 0),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_1, 0),
|
||||
/* Should read back as same value. */
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_3, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 3 },
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
@ -1,39 +0,0 @@
|
||||
{
|
||||
"read uninitialized register",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R2 !read_ok",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"read invalid register",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_0, -1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R15 is invalid",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"program doesn't init R0 before exit",
|
||||
.insns = {
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 !read_ok",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"program doesn't init R0 before exit in all branches",
|
||||
.insns = {
|
||||
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 !read_ok",
|
||||
.errstr_unpriv = "R1 pointer comparison",
|
||||
.result = REJECT,
|
||||
},
|
@ -1,104 +0,0 @@
|
||||
{
|
||||
"map element value store of cleared call register",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R1 !read_ok",
|
||||
.errstr = "R1 !read_ok",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"map element value with unaligned store",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
|
||||
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"map element value with unaligned load",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"map element value is preserved across register spilling",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, offsetof(struct test_val, foo)),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
@ -1,43 +0,0 @@
|
||||
{
|
||||
"map element value is preserved across register spilling",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"map element value or null is marked on register spilling",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
@ -1,220 +0,0 @@
|
||||
{
|
||||
"multiple registers share map_lookup_elem result",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 10),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 4 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS
|
||||
},
|
||||
{
|
||||
"alu ops on ptr_to_map_value_or_null, 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 10),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 4 },
|
||||
.errstr = "R4 pointer arithmetic on map_value_or_null",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS
|
||||
},
|
||||
{
|
||||
"alu ops on ptr_to_map_value_or_null, 2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 10),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 4 },
|
||||
.errstr = "R4 pointer arithmetic on map_value_or_null",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS
|
||||
},
|
||||
{
|
||||
"alu ops on ptr_to_map_value_or_null, 3",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 10),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 4 },
|
||||
.errstr = "R4 pointer arithmetic on map_value_or_null",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS
|
||||
},
|
||||
{
|
||||
"invalid memory access with multiple map_lookup_elem calls",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 10),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 4 },
|
||||
.result = REJECT,
|
||||
.errstr = "R4 !read_ok",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS
|
||||
},
|
||||
{
|
||||
"valid indirect map_lookup_elem access with 2nd lookup in branch",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 10),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 10),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 4 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS
|
||||
},
|
||||
{
|
||||
"invalid map access from else condition",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.errstr = "R0 unbounded memory access",
|
||||
.result = REJECT,
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result_unpriv = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"map lookup and null branch prediction",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 10),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_10, 10),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 4 },
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"MAP_VALUE_OR_NULL check_ids() in regsafe()",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
/* r9 = map_lookup_elem(...) */
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1,
|
||||
0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
|
||||
/* r8 = map_lookup_elem(...) */
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1,
|
||||
0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
|
||||
/* r7 = ktime_get_ns() */
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
|
||||
/* r6 = ktime_get_ns() */
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
/* if r6 > r7 goto +1 ; no new information about the state is derived from
|
||||
* ; this check, thus produced verifier states differ
|
||||
* ; only in 'insn_idx'
|
||||
* r9 = r8 ; optionally share ID between r9 and r8
|
||||
*/
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
|
||||
BPF_MOV64_REG(BPF_REG_9, BPF_REG_8),
|
||||
/* if r9 == 0 goto <exit> */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1),
|
||||
/* read map value via r8, this is not always
|
||||
* safe because r8 might be not equal to r9.
|
||||
*/
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0),
|
||||
/* exit 0 */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.fixup_map_hash_8b = { 3, 9 },
|
||||
.result = REJECT,
|
||||
.errstr = "R8 invalid mem access 'map_value_or_null'",
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
@ -1,291 +0,0 @@
|
||||
{
|
||||
"variable-offset ctx access",
|
||||
.insns = {
|
||||
/* Get an unknown value */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 4-byte aligned */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
|
||||
/* add it to skb. We now have either &skb->len or
|
||||
* &skb->pkt_type, but we don't know which
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
|
||||
/* dereference it */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "variable ctx access var_off=(0x0; 0x4)",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_LWT_IN,
|
||||
},
|
||||
{
|
||||
"variable-offset stack read, priv vs unpriv",
|
||||
.insns = {
|
||||
/* Fill the top 8 bytes of the stack */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
/* Get an unknown value */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 4-byte aligned */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
|
||||
/* add it to fp. We now have either fp-4 or fp-8, but
|
||||
* we don't know which
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
|
||||
/* dereference it for a stack read */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R2 variable stack access prohibited for !root",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"variable-offset stack read, uninitialized",
|
||||
.insns = {
|
||||
/* Get an unknown value */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 4-byte aligned */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
|
||||
/* add it to fp. We now have either fp-4 or fp-8, but
|
||||
* we don't know which
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
|
||||
/* dereference it for a stack read */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid variable-offset read from stack R2",
|
||||
.prog_type = BPF_PROG_TYPE_LWT_IN,
|
||||
},
|
||||
{
|
||||
"variable-offset stack write, priv vs unpriv",
|
||||
.insns = {
|
||||
/* Get an unknown value */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 8-byte aligned */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
|
||||
/* Add it to fp. We now have either fp-8 or fp-16, but
|
||||
* we don't know which
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
|
||||
/* Dereference it for a stack write */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
/* Now read from the address we just wrote. This shows
|
||||
* that, after a variable-offset write, a priviledged
|
||||
* program can read the slots that were in the range of
|
||||
* that write (even if the verifier doesn't actually know
|
||||
* if the slot being read was really written to or not.
|
||||
*/
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
/* Variable stack access is rejected for unprivileged.
|
||||
*/
|
||||
.errstr_unpriv = "R2 variable stack access prohibited for !root",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"variable-offset stack write clobbers spilled regs",
|
||||
.insns = {
|
||||
/* Dummy instruction; needed because we need to patch the next one
|
||||
* and we can't patch the first instruction.
|
||||
*/
|
||||
BPF_MOV64_IMM(BPF_REG_6, 0),
|
||||
/* Make R0 a map ptr */
|
||||
BPF_LD_MAP_FD(BPF_REG_0, 0),
|
||||
/* Get an unknown value */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 8-byte aligned */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
|
||||
/* Add it to fp. We now have either fp-8 or fp-16, but
|
||||
* we don't know which.
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
|
||||
/* Spill R0(map ptr) into stack */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
/* Dereference the unknown value for a stack write */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
/* Fill the register back into R2 */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
|
||||
/* Try to dereference R2 for a memory load */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 1 },
|
||||
/* The unprivileged case is not too interesting; variable
|
||||
* stack access is rejected.
|
||||
*/
|
||||
.errstr_unpriv = "R2 variable stack access prohibited for !root",
|
||||
.result_unpriv = REJECT,
|
||||
/* In the priviledged case, dereferencing a spilled-and-then-filled
|
||||
* register is rejected because the previous variable offset stack
|
||||
* write might have overwritten the spilled pointer (i.e. we lose track
|
||||
* of the spilled register when we analyze the write).
|
||||
*/
|
||||
.errstr = "R2 invalid mem access 'scalar'",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"indirect variable-offset stack access, unbounded",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 6),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 28),
|
||||
/* Fill the top 16 bytes of the stack. */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
/* Get an unknown value. */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, offsetof(struct bpf_sock_ops,
|
||||
bytes_received)),
|
||||
/* Check the lower bound but don't check the upper one. */
|
||||
BPF_JMP_IMM(BPF_JSLT, BPF_REG_4, 0, 4),
|
||||
/* Point the lower bound to initialized stack. Offset is now in range
|
||||
* from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.
|
||||
*/
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 8),
|
||||
/* Dereference it indirectly. */
|
||||
BPF_EMIT_CALL(BPF_FUNC_getsockopt),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid unbounded variable-offset indirect access to stack R4",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SOCK_OPS,
|
||||
},
|
||||
{
|
||||
"indirect variable-offset stack access, max out of bound",
|
||||
.insns = {
|
||||
/* Fill the top 8 bytes of the stack */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
/* Get an unknown value */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 4-byte aligned */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
|
||||
/* add it to fp. We now have either fp-4 or fp-8, but
|
||||
* we don't know which
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
|
||||
/* dereference it indirectly */
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "invalid variable-offset indirect access to stack R2",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_LWT_IN,
|
||||
},
|
||||
{
|
||||
"indirect variable-offset stack access, min out of bound",
|
||||
.insns = {
|
||||
/* Fill the top 8 bytes of the stack */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
/* Get an unknown value */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 4-byte aligned */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 516),
|
||||
/* add it to fp. We now have either fp-516 or fp-512, but
|
||||
* we don't know which
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
|
||||
/* dereference it indirectly */
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "invalid variable-offset indirect access to stack R2",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_LWT_IN,
|
||||
},
|
||||
{
|
||||
"indirect variable-offset stack access, min_off < min_initialized",
|
||||
.insns = {
|
||||
/* Fill only the top 8 bytes of the stack. */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
/* Get an unknown value */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 4-byte aligned. */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
|
||||
/* Add it to fp. We now have either fp-12 or fp-16, but we don't know
|
||||
* which. fp-16 size 8 is partially uninitialized stack.
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
|
||||
/* Dereference it indirectly. */
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 5 },
|
||||
.errstr = "invalid indirect read from stack R2 var_off",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_LWT_IN,
|
||||
},
|
||||
{
|
||||
"indirect variable-offset stack access, priv vs unpriv",
|
||||
.insns = {
|
||||
/* Fill the top 16 bytes of the stack. */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
/* Get an unknown value. */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 4-byte aligned. */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
|
||||
/* Add it to fp. We now have either fp-12 or fp-16, we don't know
|
||||
* which, but either way it points to initialized stack.
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
|
||||
/* Dereference it indirectly. */
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 6 },
|
||||
.errstr_unpriv = "R2 variable stack access prohibited for !root",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"indirect variable-offset stack access, ok",
|
||||
.insns = {
|
||||
/* Fill the top 16 bytes of the stack. */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
/* Get an unknown value. */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 4-byte aligned. */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
|
||||
/* Add it to fp. We now have either fp-12 or fp-16, we don't know
|
||||
* which, but either way it points to initialized stack.
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
|
||||
/* Dereference it indirectly. */
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 6 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_LWT_IN,
|
||||
},
|
@ -1,97 +0,0 @@
|
||||
{
|
||||
"xadd/w check unaligned stack",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -7),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "misaligned stack access off",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"xadd/w check unaligned map",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 3),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 3 },
|
||||
.result = REJECT,
|
||||
.errstr = "misaligned value access off",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"xadd/w check unaligned pkt",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct xdp_md, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
|
||||
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 99),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 6),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
||||
BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 1),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 2),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "BPF_ATOMIC stores into R2 pkt is not allowed",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"xadd/w check whether src/dst got mangled, 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
|
||||
BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.retval = 3,
|
||||
},
|
||||
{
|
||||
"xadd/w check whether src/dst got mangled, 2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
|
||||
BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.retval = 3,
|
||||
},
|
@ -1,14 +0,0 @@
|
||||
{
|
||||
"XDP, using ifindex from netdev",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct xdp_md, ingress_ifindex)),
|
||||
BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.retval = 1,
|
||||
},
|
Loading…
x
Reference in New Issue
Block a user