selftests/bpf: verifier/ringbuf.c converted to inline assembly
Test verifier/ringbuf.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20230325025524.144043-34-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
18cdc2b531
commit
b7e4203086
@ -30,6 +30,7 @@
|
|||||||
#include "verifier_meta_access.skel.h"
|
#include "verifier_meta_access.skel.h"
|
||||||
#include "verifier_raw_stack.skel.h"
|
#include "verifier_raw_stack.skel.h"
|
||||||
#include "verifier_raw_tp_writable.skel.h"
|
#include "verifier_raw_tp_writable.skel.h"
|
||||||
|
#include "verifier_ringbuf.skel.h"
|
||||||
|
|
||||||
__maybe_unused
|
__maybe_unused
|
||||||
static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory)
|
static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory)
|
||||||
@ -82,3 +83,4 @@ void test_verifier_masking(void) { RUN(verifier_masking); }
|
|||||||
void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
|
void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
|
||||||
void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
|
void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
|
||||||
void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); }
|
void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); }
|
||||||
|
void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); }
|
||||||
|
131
tools/testing/selftests/bpf/progs/verifier_ringbuf.c
Normal file
131
tools/testing/selftests/bpf/progs/verifier_ringbuf.c
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/* Converted from tools/testing/selftests/bpf/verifier/ringbuf.c */
|
||||||
|
|
||||||
|
#include <linux/bpf.h>
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
#include "bpf_misc.h"
|
||||||
|
|
||||||
|
struct {
|
||||||
|
__uint(type, BPF_MAP_TYPE_RINGBUF);
|
||||||
|
__uint(max_entries, 4096);
|
||||||
|
} map_ringbuf SEC(".maps");
|
||||||
|
|
||||||
|
SEC("socket")
|
||||||
|
__description("ringbuf: invalid reservation offset 1")
|
||||||
|
__failure __msg("R1 must have zero offset when passed to release func")
|
||||||
|
__failure_unpriv
|
||||||
|
__naked void ringbuf_invalid_reservation_offset_1(void)
|
||||||
|
{
|
||||||
|
asm volatile (" \
|
||||||
|
/* reserve 8 byte ringbuf memory */ \
|
||||||
|
r1 = 0; \
|
||||||
|
*(u64*)(r10 - 8) = r1; \
|
||||||
|
r1 = %[map_ringbuf] ll; \
|
||||||
|
r2 = 8; \
|
||||||
|
r3 = 0; \
|
||||||
|
call %[bpf_ringbuf_reserve]; \
|
||||||
|
/* store a pointer to the reserved memory in R6 */\
|
||||||
|
r6 = r0; \
|
||||||
|
/* check whether the reservation was successful */\
|
||||||
|
if r0 == 0 goto l0_%=; \
|
||||||
|
/* spill R6(mem) into the stack */ \
|
||||||
|
*(u64*)(r10 - 8) = r6; \
|
||||||
|
/* fill it back in R7 */ \
|
||||||
|
r7 = *(u64*)(r10 - 8); \
|
||||||
|
/* should be able to access *(R7) = 0 */ \
|
||||||
|
r1 = 0; \
|
||||||
|
*(u64*)(r7 + 0) = r1; \
|
||||||
|
/* submit the reserved ringbuf memory */ \
|
||||||
|
r1 = r7; \
|
||||||
|
/* add invalid offset to reserved ringbuf memory */\
|
||||||
|
r1 += 0xcafe; \
|
||||||
|
r2 = 0; \
|
||||||
|
call %[bpf_ringbuf_submit]; \
|
||||||
|
l0_%=: r0 = 0; \
|
||||||
|
exit; \
|
||||||
|
" :
|
||||||
|
: __imm(bpf_ringbuf_reserve),
|
||||||
|
__imm(bpf_ringbuf_submit),
|
||||||
|
__imm_addr(map_ringbuf)
|
||||||
|
: __clobber_all);
|
||||||
|
}
|
||||||
|
|
||||||
|
SEC("socket")
|
||||||
|
__description("ringbuf: invalid reservation offset 2")
|
||||||
|
__failure __msg("R7 min value is outside of the allowed memory range")
|
||||||
|
__failure_unpriv
|
||||||
|
__naked void ringbuf_invalid_reservation_offset_2(void)
|
||||||
|
{
|
||||||
|
asm volatile (" \
|
||||||
|
/* reserve 8 byte ringbuf memory */ \
|
||||||
|
r1 = 0; \
|
||||||
|
*(u64*)(r10 - 8) = r1; \
|
||||||
|
r1 = %[map_ringbuf] ll; \
|
||||||
|
r2 = 8; \
|
||||||
|
r3 = 0; \
|
||||||
|
call %[bpf_ringbuf_reserve]; \
|
||||||
|
/* store a pointer to the reserved memory in R6 */\
|
||||||
|
r6 = r0; \
|
||||||
|
/* check whether the reservation was successful */\
|
||||||
|
if r0 == 0 goto l0_%=; \
|
||||||
|
/* spill R6(mem) into the stack */ \
|
||||||
|
*(u64*)(r10 - 8) = r6; \
|
||||||
|
/* fill it back in R7 */ \
|
||||||
|
r7 = *(u64*)(r10 - 8); \
|
||||||
|
/* add invalid offset to reserved ringbuf memory */\
|
||||||
|
r7 += 0xcafe; \
|
||||||
|
/* should be able to access *(R7) = 0 */ \
|
||||||
|
r1 = 0; \
|
||||||
|
*(u64*)(r7 + 0) = r1; \
|
||||||
|
/* submit the reserved ringbuf memory */ \
|
||||||
|
r1 = r7; \
|
||||||
|
r2 = 0; \
|
||||||
|
call %[bpf_ringbuf_submit]; \
|
||||||
|
l0_%=: r0 = 0; \
|
||||||
|
exit; \
|
||||||
|
" :
|
||||||
|
: __imm(bpf_ringbuf_reserve),
|
||||||
|
__imm(bpf_ringbuf_submit),
|
||||||
|
__imm_addr(map_ringbuf)
|
||||||
|
: __clobber_all);
|
||||||
|
}
|
||||||
|
|
||||||
|
SEC("xdp")
|
||||||
|
__description("ringbuf: check passing rb mem to helpers")
|
||||||
|
__success __retval(0)
|
||||||
|
__naked void passing_rb_mem_to_helpers(void)
|
||||||
|
{
|
||||||
|
asm volatile (" \
|
||||||
|
r6 = r1; \
|
||||||
|
/* reserve 8 byte ringbuf memory */ \
|
||||||
|
r1 = 0; \
|
||||||
|
*(u64*)(r10 - 8) = r1; \
|
||||||
|
r1 = %[map_ringbuf] ll; \
|
||||||
|
r2 = 8; \
|
||||||
|
r3 = 0; \
|
||||||
|
call %[bpf_ringbuf_reserve]; \
|
||||||
|
r7 = r0; \
|
||||||
|
/* check whether the reservation was successful */\
|
||||||
|
if r0 != 0 goto l0_%=; \
|
||||||
|
exit; \
|
||||||
|
l0_%=: /* pass allocated ring buffer memory to fib lookup */\
|
||||||
|
r1 = r6; \
|
||||||
|
r2 = r0; \
|
||||||
|
r3 = 8; \
|
||||||
|
r4 = 0; \
|
||||||
|
call %[bpf_fib_lookup]; \
|
||||||
|
/* submit the ringbuf memory */ \
|
||||||
|
r1 = r7; \
|
||||||
|
r2 = 0; \
|
||||||
|
call %[bpf_ringbuf_submit]; \
|
||||||
|
r0 = 0; \
|
||||||
|
exit; \
|
||||||
|
" :
|
||||||
|
: __imm(bpf_fib_lookup),
|
||||||
|
__imm(bpf_ringbuf_reserve),
|
||||||
|
__imm(bpf_ringbuf_submit),
|
||||||
|
__imm_addr(map_ringbuf)
|
||||||
|
: __clobber_all);
|
||||||
|
}
|
||||||
|
|
||||||
|
char _license[] SEC("license") = "GPL";
|
@ -1,95 +0,0 @@
|
|||||||
{
|
|
||||||
"ringbuf: invalid reservation offset 1",
|
|
||||||
.insns = {
|
|
||||||
/* reserve 8 byte ringbuf memory */
|
|
||||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
||||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
|
||||||
/* store a pointer to the reserved memory in R6 */
|
|
||||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
|
||||||
/* check whether the reservation was successful */
|
|
||||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
|
||||||
/* spill R6(mem) into the stack */
|
|
||||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
|
||||||
/* fill it back in R7 */
|
|
||||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
|
|
||||||
/* should be able to access *(R7) = 0 */
|
|
||||||
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
|
|
||||||
/* submit the reserved ringbuf memory */
|
|
||||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
|
||||||
/* add invalid offset to reserved ringbuf memory */
|
|
||||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xcafe),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
},
|
|
||||||
.fixup_map_ringbuf = { 1 },
|
|
||||||
.result = REJECT,
|
|
||||||
.errstr = "R1 must have zero offset when passed to release func",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ringbuf: invalid reservation offset 2",
|
|
||||||
.insns = {
|
|
||||||
/* reserve 8 byte ringbuf memory */
|
|
||||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
||||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
|
||||||
/* store a pointer to the reserved memory in R6 */
|
|
||||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
|
||||||
/* check whether the reservation was successful */
|
|
||||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
|
||||||
/* spill R6(mem) into the stack */
|
|
||||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
|
||||||
/* fill it back in R7 */
|
|
||||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
|
|
||||||
/* add invalid offset to reserved ringbuf memory */
|
|
||||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 0xcafe),
|
|
||||||
/* should be able to access *(R7) = 0 */
|
|
||||||
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
|
|
||||||
/* submit the reserved ringbuf memory */
|
|
||||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
},
|
|
||||||
.fixup_map_ringbuf = { 1 },
|
|
||||||
.result = REJECT,
|
|
||||||
.errstr = "R7 min value is outside of the allowed memory range",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ringbuf: check passing rb mem to helpers",
|
|
||||||
.insns = {
|
|
||||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
||||||
/* reserve 8 byte ringbuf memory */
|
|
||||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
||||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
|
||||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
|
|
||||||
/* check whether the reservation was successful */
|
|
||||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
/* pass allocated ring buffer memory to fib lookup */
|
|
||||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
||||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_3, 8),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_fib_lookup),
|
|
||||||
/* submit the ringbuf memory */
|
|
||||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
},
|
|
||||||
.fixup_map_ringbuf = { 2 },
|
|
||||||
.prog_type = BPF_PROG_TYPE_XDP,
|
|
||||||
.result = ACCEPT,
|
|
||||||
},
|
|
Reference in New Issue
Block a user