2016-06-22 21:55:07 +05:30
/*
* bpf_jit64 . h : BPF JIT compiler for PPC64
*
* Copyright 2016 Naveen N . Rao < naveen . n . rao @ linux . vnet . ibm . com >
* IBM Corporation
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; version 2
* of the License .
*/
# ifndef _BPF_JIT64_H
# define _BPF_JIT64_H
# include "bpf_jit.h"
/*
* Stack layout :
2016-09-24 02:05:00 +05:30
* Ensure the top half ( upto local_tmp_var ) stays consistent
* with our redzone usage .
2016-06-22 21:55:07 +05:30
*
* [ prev sp ] < - - - - - - - - - - - - -
2018-05-04 01:08:21 +02:00
* [ nv gpr save area ] 6 * 8 |
2016-09-24 02:05:00 +05:30
* [ tail_call_cnt ] 8 |
* [ local_tmp_var ] 8 |
2017-09-02 00:23:01 +05:30
* fp ( r31 ) - - > [ ebpf stack space ] upto 512 |
2016-06-22 21:55:07 +05:30
* [ frame header ] 32 / 112 |
* sp ( r1 ) - - - > [ stack pointer ] - - - - - - - - - - - - - -
*/
2018-05-04 01:08:21 +02:00
/* for gpr non volatile registers BPG_REG_6 to 10 */
# define BPF_PPC_STACK_SAVE (6*8)
2016-09-24 02:05:00 +05:30
/* for bpf JIT code internal usage */
# define BPF_PPC_STACK_LOCALS 16
2017-09-02 00:23:01 +05:30
/* stack frame excluding BPF stack, ensure this is quadword aligned */
# define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
2016-09-24 02:05:00 +05:30
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE )
2016-06-22 21:55:07 +05:30
# ifndef __ASSEMBLY__
/* BPF register usage */
2018-05-04 01:08:21 +02:00
# define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
# define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
2016-06-22 21:55:07 +05:30
/* BPF to ppc register mappings */
static const int b2p [ ] = {
/* function return value */
[ BPF_REG_0 ] = 8 ,
/* function arguments */
[ BPF_REG_1 ] = 3 ,
[ BPF_REG_2 ] = 4 ,
[ BPF_REG_3 ] = 5 ,
[ BPF_REG_4 ] = 6 ,
[ BPF_REG_5 ] = 7 ,
/* non volatile registers */
[ BPF_REG_6 ] = 27 ,
[ BPF_REG_7 ] = 28 ,
[ BPF_REG_8 ] = 29 ,
[ BPF_REG_9 ] = 30 ,
/* frame pointer aka BPF_REG_10 */
[ BPF_REG_FP ] = 31 ,
/* eBPF jit internal registers */
powerpc/bpf: Add support for bpf constant blinding
In line with similar support for other architectures by Daniel Borkmann.
'MOD Default X' from test_bpf without constant blinding:
84 bytes emitted from JIT compiler (pass:3, flen:7)
d0000000058a4688 + <x>:
0: nop
4: nop
8: std r27,-40(r1)
c: std r28,-32(r1)
10: xor r8,r8,r8
14: xor r28,r28,r28
18: mr r27,r3
1c: li r8,66
20: cmpwi r28,0
24: bne 0x0000000000000030
28: li r8,0
2c: b 0x0000000000000044
30: divwu r9,r8,r28
34: mullw r9,r28,r9
38: subf r8,r9,r8
3c: rotlwi r8,r8,0
40: li r8,66
44: ld r27,-40(r1)
48: ld r28,-32(r1)
4c: mr r3,r8
50: blr
... and with constant blinding:
140 bytes emitted from JIT compiler (pass:3, flen:11)
d00000000bd6ab24 + <x>:
0: nop
4: nop
8: std r27,-40(r1)
c: std r28,-32(r1)
10: xor r8,r8,r8
14: xor r28,r28,r28
18: mr r27,r3
1c: lis r2,-22834
20: ori r2,r2,36083
24: rotlwi r2,r2,0
28: xori r2,r2,36017
2c: xoris r2,r2,42702
30: rotlwi r2,r2,0
34: mr r8,r2
38: rotlwi r8,r8,0
3c: cmpwi r28,0
40: bne 0x000000000000004c
44: li r8,0
48: b 0x000000000000007c
4c: divwu r9,r8,r28
50: mullw r9,r28,r9
54: subf r8,r9,r8
58: rotlwi r8,r8,0
5c: lis r2,-17137
60: ori r2,r2,39065
64: rotlwi r2,r2,0
68: xori r2,r2,39131
6c: xoris r2,r2,48399
70: rotlwi r2,r2,0
74: mr r8,r2
78: rotlwi r8,r8,0
7c: ld r27,-40(r1)
80: ld r28,-32(r1)
84: mr r3,r8
88: blr
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-09-24 02:05:02 +05:30
[ BPF_REG_AX ] = 2 ,
2016-06-22 21:55:07 +05:30
[ TMP_REG_1 ] = 9 ,
[ TMP_REG_2 ] = 10
} ;
2018-05-04 01:08:21 +02:00
/* PPC NVR range -- update this if we ever use NVRs below r27 */
# define BPF_PPC_NVR_MIN 27
2016-06-22 21:55:07 +05:30
# define SEEN_FUNC 0x1000 /* might call external helpers */
# define SEEN_STACK 0x2000 /* uses BPF stack */
2018-05-04 01:08:21 +02:00
# define SEEN_TAILCALL 0x4000 /* uses tail calls */
2016-06-22 21:55:07 +05:30
struct codegen_context {
/*
* This is used to track register usage as well
* as calls to external helpers .
* - register usage is tracked with corresponding
2018-05-04 01:08:21 +02:00
* bits ( r3 - r10 and r27 - r31 )
2016-06-22 21:55:07 +05:30
* - rest of the bits can be used to track other
* things - - for now , we use bits 16 to 23
* encoded in SEEN_ * macros above
*/
unsigned int seen ;
unsigned int idx ;
2017-09-02 00:23:01 +05:30
unsigned int stack_size ;
2016-06-22 21:55:07 +05:30
} ;
# endif /* !__ASSEMBLY__ */
# endif