2019-06-01 10:08:55 +02:00
/* SPDX-License-Identifier: GPL-2.0-only */
2016-06-22 21:55:06 +05:30
/*
* bpf_jit . h : BPF JIT compiler for PPC
2011-07-20 15:51:00 +00:00
*
* Copyright 2011 Matt Evans < matt @ ozlabs . org > , IBM Corporation
2016-06-22 21:55:07 +05:30
* 2016 Naveen N . Rao < naveen . n . rao @ linux . vnet . ibm . com >
2011-07-20 15:51:00 +00:00
*/
# ifndef _BPF_JIT_H
# define _BPF_JIT_H
# ifndef __ASSEMBLY__
2016-06-22 21:55:07 +05:30
# include <asm/types.h>
2020-06-24 17:00:35 +05:30
# include <asm/ppc-opcode.h>
2016-06-22 21:55:07 +05:30
# ifdef PPC64_ELF_ABI_v1
2011-07-20 15:51:00 +00:00
# define FUNCTION_DESCR_SIZE 24
2015-02-17 10:04:40 +03:00
# else
# define FUNCTION_DESCR_SIZE 0
# endif
2011-07-20 15:51:00 +00:00
# define PLANT_INSTR(d, idx, instr) \
do { if ( d ) { ( d ) [ idx ] = instr ; } idx + + ; } while ( 0 )
# define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
/* Long jump; (unconditional 'branch') */
# define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
( ( ( dest ) - ( ctx - > idx * 4 ) ) & 0x03fffffc ) )
/* "cond" here covers BO:BI fields. */
# define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
( ( ( cond ) & 0x3ff ) < < 16 ) | \
( ( ( dest ) - ( ctx - > idx * 4 ) ) & \
0xfffc ) )
2016-06-22 21:55:02 +05:30
/* Sign-extended 32-bit immediate load */
# define PPC_LI32(d, i) do { \
if ( ( int ) ( uintptr_t ) ( i ) > = - 32768 & & \
( int ) ( uintptr_t ) ( i ) < 32768 ) \
2020-06-24 17:00:36 +05:30
EMIT ( PPC_RAW_LI ( d , i ) ) ; \
2016-06-22 21:55:02 +05:30
else { \
2020-06-24 17:00:36 +05:30
EMIT ( PPC_RAW_LIS ( d , IMM_H ( i ) ) ) ; \
2016-06-22 21:55:02 +05:30
if ( IMM_L ( i ) ) \
2020-06-24 17:00:36 +05:30
EMIT ( PPC_RAW_ORI ( d , d , IMM_L ( i ) ) ) ; \
2011-07-20 15:51:00 +00:00
} } while ( 0 )
2016-06-22 21:55:02 +05:30
2011-07-20 15:51:00 +00:00
# define PPC_LI64(d, i) do { \
2016-06-22 21:55:03 +05:30
if ( ( long ) ( i ) > = - 2147483648 & & \
( long ) ( i ) < 2147483648 ) \
2011-07-20 15:51:00 +00:00
PPC_LI32 ( d , i ) ; \
else { \
2016-06-22 21:55:03 +05:30
if ( ! ( ( uintptr_t ) ( i ) & 0xffff800000000000ULL ) ) \
2020-06-24 17:00:36 +05:30
EMIT ( PPC_RAW_LI ( d , ( ( uintptr_t ) ( i ) > > 32 ) & \
0xffff ) ) ; \
2016-06-22 21:55:03 +05:30
else { \
2020-06-24 17:00:36 +05:30
EMIT ( PPC_RAW_LIS ( d , ( ( uintptr_t ) ( i ) > > 48 ) ) ) ; \
2016-06-22 21:55:03 +05:30
if ( ( uintptr_t ) ( i ) & 0x0000ffff00000000ULL ) \
2020-06-24 17:00:36 +05:30
EMIT ( PPC_RAW_ORI ( d , d , \
( ( uintptr_t ) ( i ) > > 32 ) & 0xffff ) ) ; \
2016-06-22 21:55:03 +05:30
} \
2020-06-24 17:00:36 +05:30
EMIT ( PPC_RAW_SLDI ( d , d , 32 ) ) ; \
2011-07-20 15:51:00 +00:00
if ( ( uintptr_t ) ( i ) & 0x00000000ffff0000ULL ) \
2020-06-24 17:00:36 +05:30
EMIT ( PPC_RAW_ORIS ( d , d , \
( ( uintptr_t ) ( i ) > > 16 ) & 0xffff ) ) ; \
2011-07-20 15:51:00 +00:00
if ( ( uintptr_t ) ( i ) & 0x000000000000ffffULL ) \
2020-06-24 17:00:36 +05:30
EMIT ( PPC_RAW_ORI ( d , d , ( uintptr_t ) ( i ) & \
0xffff ) ) ; \
2016-06-22 21:55:03 +05:30
} } while ( 0 )
2011-07-20 15:51:00 +00:00
2015-02-17 10:04:40 +03:00
# ifdef CONFIG_PPC64
# define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0)
# else
# define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
# endif
2011-07-20 15:51:00 +00:00
static inline bool is_nearbranch ( int offset )
{
return ( offset < 32768 ) & & ( offset > = - 32768 ) ;
}
/*
* The fly in the ointment of code size changing from pass to pass is
* avoided by padding the short branch case with a NOP . If code size differs
* with different branch reaches we will have the issue of code moving from
* one pass to the next and will need a few passes to converge on a stable
* state .
*/
# define PPC_BCC(cond, dest) do { \
if ( is_nearbranch ( ( dest ) - ( ctx - > idx * 4 ) ) ) { \
PPC_BCC_SHORT ( cond , dest ) ; \
2020-06-24 17:00:36 +05:30
EMIT ( PPC_RAW_NOP ( ) ) ; \
2011-07-20 15:51:00 +00:00
} else { \
/* Flip the 'T or F' bit to invert comparison */ \
PPC_BCC_SHORT ( cond ^ COND_CMP_TRUE , ( ctx - > idx + 2 ) * 4 ) ; \
PPC_JMP ( dest ) ; \
} } while ( 0 )
/* To create a branch condition, select a bit of cr0... */
# define CR0_LT 0
# define CR0_GT 1
# define CR0_EQ 2
/* ...and modify BO[3] */
# define COND_CMP_TRUE 0x100
# define COND_CMP_FALSE 0x000
/* Together, they make all required comparisons: */
# define COND_GT (CR0_GT | COND_CMP_TRUE)
# define COND_GE (CR0_LT | COND_CMP_FALSE)
# define COND_EQ (CR0_EQ | COND_CMP_TRUE)
# define COND_NE (CR0_EQ | COND_CMP_FALSE)
# define COND_LT (CR0_LT | COND_CMP_TRUE)
2017-08-10 01:40:00 +02:00
# define COND_LE (CR0_GT | COND_CMP_FALSE)
2011-07-20 15:51:00 +00:00
2021-03-22 16:37:48 +00:00
# define SEEN_FUNC 0x1000 /* might call external helpers */
# define SEEN_STACK 0x2000 /* uses BPF stack */
# define SEEN_TAILCALL 0x4000 /* uses tail calls */
struct codegen_context {
/*
* This is used to track register usage as well
* as calls to external helpers .
* - register usage is tracked with corresponding
* bits ( r3 - r10 and r27 - r31 )
* - rest of the bits can be used to track other
* things - - for now , we use bits 16 to 23
* encoded in SEEN_ * macros above
*/
unsigned int seen ;
unsigned int idx ;
unsigned int stack_size ;
} ;
static inline void bpf_flush_icache ( void * start , void * end )
{
smp_wmb ( ) ; /* smp write barrier */
flush_icache_range ( ( unsigned long ) start , ( unsigned long ) end ) ;
}
static inline bool bpf_is_seen_register ( struct codegen_context * ctx , int i )
{
return ctx - > seen & ( 1 < < ( 31 - i ) ) ;
}
static inline void bpf_set_seen_register ( struct codegen_context * ctx , int i )
{
ctx - > seen | = 1 < < ( 31 - i ) ;
}
2011-07-20 15:51:00 +00:00
# endif
# endif