2016-06-22 19:25:07 +03:00
/*
* bpf_jit64 . h : BPF JIT compiler for PPC64
*
* Copyright 2016 Naveen N . Rao < naveen . n . rao @ linux . vnet . ibm . com >
* IBM Corporation
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; version 2
* of the License .
*/
# ifndef _BPF_JIT64_H
# define _BPF_JIT64_H
# include "bpf_jit.h"
/*
* Stack layout :
2016-09-23 23:35:00 +03:00
* Ensure the top half ( upto local_tmp_var ) stays consistent
* with our redzone usage .
2016-06-22 19:25:07 +03:00
*
* [ prev sp ] < - - - - - - - - - - - - -
* [ nv gpr save area ] 8 * 8 |
2016-09-23 23:35:00 +03:00
* [ tail_call_cnt ] 8 |
* [ local_tmp_var ] 8 |
2017-09-01 21:53:01 +03:00
* fp ( r31 ) - - > [ ebpf stack space ] upto 512 |
2016-06-22 19:25:07 +03:00
* [ frame header ] 32 / 112 |
* sp ( r1 ) - - - > [ stack pointer ] - - - - - - - - - - - - - -
*/
/* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
# define BPF_PPC_STACK_SAVE (8*8)
2016-09-23 23:35:00 +03:00
/* for bpf JIT code internal usage */
# define BPF_PPC_STACK_LOCALS 16
2017-09-01 21:53:01 +03:00
/* stack frame excluding BPF stack, ensure this is quadword aligned */
# define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
2016-09-23 23:35:00 +03:00
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE )
2016-06-22 19:25:07 +03:00
# ifndef __ASSEMBLY__
/* BPF register usage */
powerpc/bpf: Add support for bpf constant blinding
In line with similar support for other architectures by Daniel Borkmann.
'MOD Default X' from test_bpf without constant blinding:
84 bytes emitted from JIT compiler (pass:3, flen:7)
d0000000058a4688 + <x>:
0: nop
4: nop
8: std r27,-40(r1)
c: std r28,-32(r1)
10: xor r8,r8,r8
14: xor r28,r28,r28
18: mr r27,r3
1c: li r8,66
20: cmpwi r28,0
24: bne 0x0000000000000030
28: li r8,0
2c: b 0x0000000000000044
30: divwu r9,r8,r28
34: mullw r9,r28,r9
38: subf r8,r9,r8
3c: rotlwi r8,r8,0
40: li r8,66
44: ld r27,-40(r1)
48: ld r28,-32(r1)
4c: mr r3,r8
50: blr
... and with constant blinding:
140 bytes emitted from JIT compiler (pass:3, flen:11)
d00000000bd6ab24 + <x>:
0: nop
4: nop
8: std r27,-40(r1)
c: std r28,-32(r1)
10: xor r8,r8,r8
14: xor r28,r28,r28
18: mr r27,r3
1c: lis r2,-22834
20: ori r2,r2,36083
24: rotlwi r2,r2,0
28: xori r2,r2,36017
2c: xoris r2,r2,42702
30: rotlwi r2,r2,0
34: mr r8,r2
38: rotlwi r8,r8,0
3c: cmpwi r28,0
40: bne 0x000000000000004c
44: li r8,0
48: b 0x000000000000007c
4c: divwu r9,r8,r28
50: mullw r9,r28,r9
54: subf r8,r9,r8
58: rotlwi r8,r8,0
5c: lis r2,-17137
60: ori r2,r2,39065
64: rotlwi r2,r2,0
68: xori r2,r2,39131
6c: xoris r2,r2,48399
70: rotlwi r2,r2,0
74: mr r8,r2
78: rotlwi r8,r8,0
7c: ld r27,-40(r1)
80: ld r28,-32(r1)
84: mr r3,r8
88: blr
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-09-23 23:35:02 +03:00
# define SKB_HLEN_REG (MAX_BPF_JIT_REG + 0)
# define SKB_DATA_REG (MAX_BPF_JIT_REG + 1)
# define TMP_REG_1 (MAX_BPF_JIT_REG + 2)
# define TMP_REG_2 (MAX_BPF_JIT_REG + 3)
2016-06-22 19:25:07 +03:00
/* BPF to ppc register mappings */
static const int b2p [ ] = {
/* function return value */
[ BPF_REG_0 ] = 8 ,
/* function arguments */
[ BPF_REG_1 ] = 3 ,
[ BPF_REG_2 ] = 4 ,
[ BPF_REG_3 ] = 5 ,
[ BPF_REG_4 ] = 6 ,
[ BPF_REG_5 ] = 7 ,
/* non volatile registers */
[ BPF_REG_6 ] = 27 ,
[ BPF_REG_7 ] = 28 ,
[ BPF_REG_8 ] = 29 ,
[ BPF_REG_9 ] = 30 ,
/* frame pointer aka BPF_REG_10 */
[ BPF_REG_FP ] = 31 ,
/* eBPF jit internal registers */
powerpc/bpf: Add support for bpf constant blinding
In line with similar support for other architectures by Daniel Borkmann.
'MOD Default X' from test_bpf without constant blinding:
84 bytes emitted from JIT compiler (pass:3, flen:7)
d0000000058a4688 + <x>:
0: nop
4: nop
8: std r27,-40(r1)
c: std r28,-32(r1)
10: xor r8,r8,r8
14: xor r28,r28,r28
18: mr r27,r3
1c: li r8,66
20: cmpwi r28,0
24: bne 0x0000000000000030
28: li r8,0
2c: b 0x0000000000000044
30: divwu r9,r8,r28
34: mullw r9,r28,r9
38: subf r8,r9,r8
3c: rotlwi r8,r8,0
40: li r8,66
44: ld r27,-40(r1)
48: ld r28,-32(r1)
4c: mr r3,r8
50: blr
... and with constant blinding:
140 bytes emitted from JIT compiler (pass:3, flen:11)
d00000000bd6ab24 + <x>:
0: nop
4: nop
8: std r27,-40(r1)
c: std r28,-32(r1)
10: xor r8,r8,r8
14: xor r28,r28,r28
18: mr r27,r3
1c: lis r2,-22834
20: ori r2,r2,36083
24: rotlwi r2,r2,0
28: xori r2,r2,36017
2c: xoris r2,r2,42702
30: rotlwi r2,r2,0
34: mr r8,r2
38: rotlwi r8,r8,0
3c: cmpwi r28,0
40: bne 0x000000000000004c
44: li r8,0
48: b 0x000000000000007c
4c: divwu r9,r8,r28
50: mullw r9,r28,r9
54: subf r8,r9,r8
58: rotlwi r8,r8,0
5c: lis r2,-17137
60: ori r2,r2,39065
64: rotlwi r2,r2,0
68: xori r2,r2,39131
6c: xoris r2,r2,48399
70: rotlwi r2,r2,0
74: mr r8,r2
78: rotlwi r8,r8,0
7c: ld r27,-40(r1)
80: ld r28,-32(r1)
84: mr r3,r8
88: blr
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-09-23 23:35:02 +03:00
[ BPF_REG_AX ] = 2 ,
2016-06-22 19:25:07 +03:00
[ SKB_HLEN_REG ] = 25 ,
[ SKB_DATA_REG ] = 26 ,
[ TMP_REG_1 ] = 9 ,
[ TMP_REG_2 ] = 10
} ;
2016-09-23 23:35:00 +03:00
/* PPC NVR range -- update this if we ever use NVRs below r24 */
# define BPF_PPC_NVR_MIN 24
2016-06-22 19:25:07 +03:00
/* Assembly helpers */
# define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
u64 func # # _negative_offset ( u64 r3 , u64 r4 ) ; \
u64 func # # _positive_offset ( u64 r3 , u64 r4 ) ;
DECLARE_LOAD_FUNC ( sk_load_word ) ;
DECLARE_LOAD_FUNC ( sk_load_half ) ;
DECLARE_LOAD_FUNC ( sk_load_byte ) ;
# define CHOOSE_LOAD_FUNC(imm, func) \
( imm < 0 ? \
( imm > = SKF_LL_OFF ? func # # _negative_offset : func ) : \
func # # _positive_offset )
# define SEEN_FUNC 0x1000 /* might call external helpers */
# define SEEN_STACK 0x2000 /* uses BPF stack */
# define SEEN_SKB 0x4000 /* uses sk_buff */
2016-09-23 23:35:01 +03:00
# define SEEN_TAILCALL 0x8000 /* uses tail calls */
2016-06-22 19:25:07 +03:00
struct codegen_context {
/*
* This is used to track register usage as well
* as calls to external helpers .
* - register usage is tracked with corresponding
* bits ( r3 - r10 and r25 - r31 )
* - rest of the bits can be used to track other
* things - - for now , we use bits 16 to 23
* encoded in SEEN_ * macros above
*/
unsigned int seen ;
unsigned int idx ;
2017-09-01 21:53:01 +03:00
unsigned int stack_size ;
2016-06-22 19:25:07 +03:00
} ;
# endif /* !__ASSEMBLY__ */
# endif