linux/tools/testing/selftests/bpf/verifier/atomic_and.c
Brendan Jackman b29dd96b90 bpf, x86: Fix BPF_FETCH atomic and/or/xor with r0 as src
This code generates a CMPXCHG loop in order to implement atomic_fetch
bitwise operations. Because CMPXCHG is hard-coded to use rax (which
holds the BPF r0 value), it saves the _real_ r0 value into the
internal "ax" temporary register and restores it once the loop is
complete.

In the middle of the loop, the actual bitwise operation is performed
using src_reg. The bug occurs when src_reg is r0: as described above,
r0 has been clobbered and the real r0 value is in the ax register.

Therefore, perform this operation on the ax register instead, when
src_reg is r0.

Fixes: 981f94c3e9 ("bpf: Add bitwise atomic instructions")
Signed-off-by: Brendan Jackman <jackmanb@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: KP Singh <kpsingh@kernel.org>
Link: https://lore.kernel.org/bpf/20210216125307.1406237-1-jackmanb@google.com
2021-02-22 18:03:11 +01:00

101 lines
3.0 KiB
C

{
"BPF_ATOMIC_AND without fetch",
.insns = {
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* atomic_and(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_AND, BPF_REG_10, BPF_REG_1, -8),
/* if (val != 0x010) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x010, 2),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
/* r1 should not be clobbered, no BPF_FETCH flag */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC_AND with fetch",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 123),
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* old = atomic_fetch_and(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
/* if (old != 0x110) exit(3); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x010) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* Check R0 wasn't clobbered (for fear of x86 JIT bug) */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC_AND with fetch 32bit",
.insns = {
/* r0 = (s64) -1 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
/* val = 0x110; */
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
/* old = atomic_fetch_and(&val, 0x011); */
BPF_MOV32_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
/* if (old != 0x110) exit(3); */
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
BPF_MOV32_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x010) exit(2); */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
BPF_MOV32_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* Check R0 wasn't clobbered (for fear of x86 JIT bug)
* It should be -1 so add 1 to get exit code.
*/
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC_AND with fetch - r0 as source reg",
.insns = {
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* old = atomic_fetch_and(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_0, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_0, -8),
/* if (old != 0x110) exit(3); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x110, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x010) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},