2014-01-07 18:17:08 +04:00
/*
* Copyright ( C ) 2013 Huawei Ltd .
* Author : Jiang Liu < liuj97 @ gmail . com >
*
2016-01-14 10:33:21 +03:00
* Copyright ( C ) 2014 - 2016 Zi Shen Lim < zlim . lnx @ gmail . com >
2014-08-27 08:15:17 +04:00
*
2014-01-07 18:17:08 +04:00
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2014-01-07 18:17:11 +04:00
# include <linux/bitops.h>
2015-01-22 04:36:05 +03:00
# include <linux/bug.h>
2014-01-07 18:17:08 +04:00
# include <linux/compiler.h>
# include <linux/kernel.h>
2015-01-22 04:36:05 +03:00
# include <linux/mm.h>
2014-01-07 18:17:09 +04:00
# include <linux/smp.h>
2015-01-22 04:36:05 +03:00
# include <linux/spinlock.h>
2014-01-07 18:17:09 +04:00
# include <linux/stop_machine.h>
2015-01-22 04:36:05 +03:00
# include <linux/types.h>
2014-01-07 18:17:09 +04:00
# include <linux/uaccess.h>
2014-09-16 20:42:33 +04:00
2014-01-07 18:17:09 +04:00
# include <asm/cacheflush.h>
2014-09-16 20:42:33 +04:00
# include <asm/debug-monitors.h>
2015-01-22 04:36:05 +03:00
# include <asm/fixmap.h>
2014-01-07 18:17:08 +04:00
# include <asm/insn.h>
2014-08-27 08:15:17 +04:00
# define AARCH64_INSN_SF_BIT BIT(31)
2014-08-27 08:15:23 +04:00
# define AARCH64_INSN_N_BIT BIT(22)
2014-08-27 08:15:17 +04:00
2014-01-07 18:17:08 +04:00
static int aarch64_insn_encoding_class [ ] = {
AARCH64_INSN_CLS_UNKNOWN ,
AARCH64_INSN_CLS_UNKNOWN ,
AARCH64_INSN_CLS_UNKNOWN ,
AARCH64_INSN_CLS_UNKNOWN ,
AARCH64_INSN_CLS_LDST ,
AARCH64_INSN_CLS_DP_REG ,
AARCH64_INSN_CLS_LDST ,
AARCH64_INSN_CLS_DP_FPSIMD ,
AARCH64_INSN_CLS_DP_IMM ,
AARCH64_INSN_CLS_DP_IMM ,
AARCH64_INSN_CLS_BR_SYS ,
AARCH64_INSN_CLS_BR_SYS ,
AARCH64_INSN_CLS_LDST ,
AARCH64_INSN_CLS_DP_REG ,
AARCH64_INSN_CLS_LDST ,
AARCH64_INSN_CLS_DP_FPSIMD ,
} ;
enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class ( u32 insn )
{
return aarch64_insn_encoding_class [ ( insn > > 25 ) & 0xf ] ;
}
/* NOP is an alias of HINT */
bool __kprobes aarch64_insn_is_nop ( u32 insn )
{
if ( ! aarch64_insn_is_hint ( insn ) )
return false ;
switch ( insn & 0xFE0 ) {
case AARCH64_INSN_HINT_YIELD :
case AARCH64_INSN_HINT_WFE :
case AARCH64_INSN_HINT_WFI :
case AARCH64_INSN_HINT_SEV :
case AARCH64_INSN_HINT_SEVL :
return false ;
default :
return true ;
}
}
2015-06-01 12:47:39 +03:00
bool aarch64_insn_is_branch_imm ( u32 insn )
{
return ( aarch64_insn_is_b ( insn ) | | aarch64_insn_is_bl ( insn ) | |
aarch64_insn_is_tbz ( insn ) | | aarch64_insn_is_tbnz ( insn ) | |
aarch64_insn_is_cbz ( insn ) | | aarch64_insn_is_cbnz ( insn ) | |
aarch64_insn_is_bcond ( insn ) ) ;
}
2015-09-30 21:23:12 +03:00
static DEFINE_RAW_SPINLOCK ( patch_lock ) ;
2015-01-22 04:36:05 +03:00
static void __kprobes * patch_map ( void * addr , int fixmap )
{
unsigned long uintaddr = ( uintptr_t ) addr ;
bool module = ! core_kernel_text ( uintaddr ) ;
struct page * page ;
if ( module & & IS_ENABLED ( CONFIG_DEBUG_SET_MODULE_RONX ) )
page = vmalloc_to_page ( addr ) ;
2015-02-24 19:30:21 +03:00
else if ( ! module & & IS_ENABLED ( CONFIG_DEBUG_RODATA ) )
2016-03-30 17:45:59 +03:00
page = pfn_to_page ( PHYS_PFN ( __pa ( addr ) ) ) ;
2015-02-24 19:30:21 +03:00
else
return addr ;
2015-01-22 04:36:05 +03:00
BUG_ON ( ! page ) ;
2015-07-24 14:52:28 +03:00
return ( void * ) set_fixmap_offset ( fixmap , page_to_phys ( page ) +
( uintaddr & ~ PAGE_MASK ) ) ;
2015-01-22 04:36:05 +03:00
}
static void __kprobes patch_unmap ( int fixmap )
{
clear_fixmap ( fixmap ) ;
}
2014-01-07 18:17:09 +04:00
/*
* In ARMv8 - A , A64 instructions have a fixed length of 32 bits and are always
* little - endian .
*/
int __kprobes aarch64_insn_read ( void * addr , u32 * insnp )
{
int ret ;
u32 val ;
ret = probe_kernel_read ( & val , addr , AARCH64_INSN_SIZE ) ;
if ( ! ret )
* insnp = le32_to_cpu ( val ) ;
return ret ;
}
2015-01-22 04:36:05 +03:00
static int __kprobes __aarch64_insn_write ( void * addr , u32 insn )
{
void * waddr = addr ;
unsigned long flags = 0 ;
int ret ;
2015-09-30 21:23:12 +03:00
raw_spin_lock_irqsave ( & patch_lock , flags ) ;
2015-01-22 04:36:05 +03:00
waddr = patch_map ( addr , FIX_TEXT_POKE0 ) ;
ret = probe_kernel_write ( waddr , & insn , AARCH64_INSN_SIZE ) ;
patch_unmap ( FIX_TEXT_POKE0 ) ;
2015-09-30 21:23:12 +03:00
raw_spin_unlock_irqrestore ( & patch_lock , flags ) ;
2015-01-22 04:36:05 +03:00
return ret ;
}
2014-01-07 18:17:09 +04:00
int __kprobes aarch64_insn_write ( void * addr , u32 insn )
{
insn = cpu_to_le32 ( insn ) ;
2015-01-22 04:36:05 +03:00
return __aarch64_insn_write ( addr , insn ) ;
2014-01-07 18:17:09 +04:00
}
2014-01-07 18:17:08 +04:00
static bool __kprobes __aarch64_insn_hotpatch_safe ( u32 insn )
{
if ( aarch64_get_insn_class ( insn ) ! = AARCH64_INSN_CLS_BR_SYS )
return false ;
return aarch64_insn_is_b ( insn ) | |
aarch64_insn_is_bl ( insn ) | |
aarch64_insn_is_svc ( insn ) | |
aarch64_insn_is_hvc ( insn ) | |
aarch64_insn_is_smc ( insn ) | |
aarch64_insn_is_brk ( insn ) | |
aarch64_insn_is_nop ( insn ) ;
}
/*
* ARM Architecture Reference Manual for ARMv8 Profile - A , Issue A . a
* Section B2 .6 .5 " Concurrent modification and execution of instructions " :
* Concurrent modification and execution of instructions can lead to the
* resulting instruction performing any behavior that can be achieved by
* executing any sequence of instructions that can be executed from the
* same Exception level , except where the instruction before modification
* and the instruction after modification is a B , BL , NOP , BKPT , SVC , HVC ,
* or SMC instruction .
*/
bool __kprobes aarch64_insn_hotpatch_safe ( u32 old_insn , u32 new_insn )
{
return __aarch64_insn_hotpatch_safe ( old_insn ) & &
__aarch64_insn_hotpatch_safe ( new_insn ) ;
}
2014-01-07 18:17:09 +04:00
int __kprobes aarch64_insn_patch_text_nosync ( void * addr , u32 insn )
{
u32 * tp = addr ;
int ret ;
/* A64 instructions must be word aligned */
if ( ( uintptr_t ) tp & 0x3 )
return - EINVAL ;
ret = aarch64_insn_write ( tp , insn ) ;
if ( ret = = 0 )
flush_icache_range ( ( uintptr_t ) tp ,
( uintptr_t ) tp + AARCH64_INSN_SIZE ) ;
return ret ;
}
struct aarch64_insn_patch {
void * * text_addrs ;
u32 * new_insns ;
int insn_cnt ;
atomic_t cpu_count ;
} ;
static int __kprobes aarch64_insn_patch_text_cb ( void * arg )
{
int i , ret = 0 ;
struct aarch64_insn_patch * pp = arg ;
/* The first CPU becomes master */
if ( atomic_inc_return ( & pp - > cpu_count ) = = 1 ) {
for ( i = 0 ; ret = = 0 & & i < pp - > insn_cnt ; i + + )
ret = aarch64_insn_patch_text_nosync ( pp - > text_addrs [ i ] ,
pp - > new_insns [ i ] ) ;
/*
* aarch64_insn_patch_text_nosync ( ) calls flush_icache_range ( ) ,
* which ends with " dsb; isb " pair guaranteeing global
* visibility .
*/
2014-11-11 17:41:27 +03:00
/* Notify other processors with an additional increment. */
atomic_inc ( & pp - > cpu_count ) ;
2014-01-07 18:17:09 +04:00
} else {
2014-11-11 17:41:27 +03:00
while ( atomic_read ( & pp - > cpu_count ) < = num_online_cpus ( ) )
2014-01-07 18:17:09 +04:00
cpu_relax ( ) ;
isb ( ) ;
}
return ret ;
}
int __kprobes aarch64_insn_patch_text_sync ( void * addrs [ ] , u32 insns [ ] , int cnt )
{
struct aarch64_insn_patch patch = {
. text_addrs = addrs ,
. new_insns = insns ,
. insn_cnt = cnt ,
. cpu_count = ATOMIC_INIT ( 0 ) ,
} ;
if ( cnt < = 0 )
return - EINVAL ;
return stop_machine ( aarch64_insn_patch_text_cb , & patch ,
cpu_online_mask ) ;
}
int __kprobes aarch64_insn_patch_text ( void * addrs [ ] , u32 insns [ ] , int cnt )
{
int ret ;
u32 insn ;
/* Unsafe to patch multiple instructions without synchronizaiton */
if ( cnt = = 1 ) {
ret = aarch64_insn_read ( addrs [ 0 ] , & insn ) ;
if ( ret )
return ret ;
if ( aarch64_insn_hotpatch_safe ( insn , insns [ 0 ] ) ) {
/*
* ARMv8 architecture doesn ' t guarantee all CPUs see
* the new instruction after returning from function
* aarch64_insn_patch_text_nosync ( ) . So send IPIs to
* all other CPUs to achieve instruction
* synchronization .
*/
ret = aarch64_insn_patch_text_nosync ( addrs [ 0 ] , insns [ 0 ] ) ;
kick_all_cpus_sync ( ) ;
return ret ;
}
}
return aarch64_insn_patch_text_sync ( addrs , insns , cnt ) ;
}
2014-01-07 18:17:10 +04:00
2015-03-27 16:09:21 +03:00
static int __kprobes aarch64_get_imm_shift_mask ( enum aarch64_insn_imm_type type ,
u32 * maskp , int * shiftp )
2014-01-07 18:17:10 +04:00
{
2015-03-27 16:09:21 +03:00
u32 mask ;
2014-01-07 18:17:10 +04:00
int shift ;
switch ( type ) {
case AARCH64_INSN_IMM_26 :
mask = BIT ( 26 ) - 1 ;
shift = 0 ;
break ;
case AARCH64_INSN_IMM_19 :
mask = BIT ( 19 ) - 1 ;
shift = 5 ;
break ;
case AARCH64_INSN_IMM_16 :
mask = BIT ( 16 ) - 1 ;
shift = 5 ;
break ;
case AARCH64_INSN_IMM_14 :
mask = BIT ( 14 ) - 1 ;
shift = 5 ;
break ;
case AARCH64_INSN_IMM_12 :
mask = BIT ( 12 ) - 1 ;
shift = 10 ;
break ;
case AARCH64_INSN_IMM_9 :
mask = BIT ( 9 ) - 1 ;
shift = 12 ;
break ;
2014-08-27 08:15:21 +04:00
case AARCH64_INSN_IMM_7 :
mask = BIT ( 7 ) - 1 ;
shift = 15 ;
break ;
2014-08-27 08:15:25 +04:00
case AARCH64_INSN_IMM_6 :
2014-08-27 08:15:23 +04:00
case AARCH64_INSN_IMM_S :
mask = BIT ( 6 ) - 1 ;
shift = 10 ;
break ;
case AARCH64_INSN_IMM_R :
mask = BIT ( 6 ) - 1 ;
shift = 16 ;
break ;
2014-01-07 18:17:10 +04:00
default :
2015-03-27 16:09:21 +03:00
return - EINVAL ;
}
* maskp = mask ;
* shiftp = shift ;
return 0 ;
}
# define ADR_IMM_HILOSPLIT 2
# define ADR_IMM_SIZE SZ_2M
# define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
# define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
# define ADR_IMM_LOSHIFT 29
# define ADR_IMM_HISHIFT 5
u64 aarch64_insn_decode_immediate ( enum aarch64_insn_imm_type type , u32 insn )
{
u32 immlo , immhi , mask ;
int shift ;
switch ( type ) {
case AARCH64_INSN_IMM_ADR :
shift = 0 ;
immlo = ( insn > > ADR_IMM_LOSHIFT ) & ADR_IMM_LOMASK ;
immhi = ( insn > > ADR_IMM_HISHIFT ) & ADR_IMM_HIMASK ;
insn = ( immhi < < ADR_IMM_HILOSPLIT ) | immlo ;
mask = ADR_IMM_SIZE - 1 ;
break ;
default :
if ( aarch64_get_imm_shift_mask ( type , & mask , & shift ) < 0 ) {
pr_err ( " aarch64_insn_decode_immediate: unknown immediate encoding %d \n " ,
type ) ;
return 0 ;
}
}
return ( insn > > shift ) & mask ;
}
u32 __kprobes aarch64_insn_encode_immediate ( enum aarch64_insn_imm_type type ,
u32 insn , u64 imm )
{
u32 immlo , immhi , mask ;
int shift ;
2016-01-14 10:33:21 +03:00
if ( insn = = AARCH64_BREAK_FAULT )
return AARCH64_BREAK_FAULT ;
2015-03-27 16:09:21 +03:00
switch ( type ) {
case AARCH64_INSN_IMM_ADR :
shift = 0 ;
immlo = ( imm & ADR_IMM_LOMASK ) < < ADR_IMM_LOSHIFT ;
imm > > = ADR_IMM_HILOSPLIT ;
immhi = ( imm & ADR_IMM_HIMASK ) < < ADR_IMM_HISHIFT ;
imm = immlo | immhi ;
mask = ( ( ADR_IMM_LOMASK < < ADR_IMM_LOSHIFT ) |
( ADR_IMM_HIMASK < < ADR_IMM_HISHIFT ) ) ;
break ;
default :
if ( aarch64_get_imm_shift_mask ( type , & mask , & shift ) < 0 ) {
pr_err ( " aarch64_insn_encode_immediate: unknown immediate encoding %d \n " ,
type ) ;
2016-01-14 10:33:21 +03:00
return AARCH64_BREAK_FAULT ;
2015-03-27 16:09:21 +03:00
}
2014-01-07 18:17:10 +04:00
}
/* Update the immediate field. */
insn & = ~ ( mask < < shift ) ;
insn | = ( imm & mask ) < < shift ;
return insn ;
}
2014-01-07 18:17:11 +04:00
2014-08-27 08:15:17 +04:00
static u32 aarch64_insn_encode_register ( enum aarch64_insn_register_type type ,
u32 insn ,
enum aarch64_insn_register reg )
{
int shift ;
2016-01-14 10:33:21 +03:00
if ( insn = = AARCH64_BREAK_FAULT )
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:17 +04:00
if ( reg < AARCH64_INSN_REG_0 | | reg > AARCH64_INSN_REG_SP ) {
pr_err ( " %s: unknown register encoding %d \n " , __func__ , reg ) ;
2016-01-14 10:33:21 +03:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:17 +04:00
}
switch ( type ) {
case AARCH64_INSN_REGTYPE_RT :
2014-08-27 08:15:22 +04:00
case AARCH64_INSN_REGTYPE_RD :
2014-08-27 08:15:17 +04:00
shift = 0 ;
break ;
2014-08-27 08:15:18 +04:00
case AARCH64_INSN_REGTYPE_RN :
shift = 5 ;
break ;
2014-08-27 08:15:21 +04:00
case AARCH64_INSN_REGTYPE_RT2 :
2014-08-27 08:15:28 +04:00
case AARCH64_INSN_REGTYPE_RA :
2014-08-27 08:15:21 +04:00
shift = 10 ;
break ;
2014-08-27 08:15:20 +04:00
case AARCH64_INSN_REGTYPE_RM :
shift = 16 ;
break ;
2014-08-27 08:15:17 +04:00
default :
pr_err ( " %s: unknown register type encoding %d \n " , __func__ ,
type ) ;
2016-01-14 10:33:21 +03:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:17 +04:00
}
insn & = ~ ( GENMASK ( 4 , 0 ) < < shift ) ;
insn | = reg < < shift ;
return insn ;
}
2014-08-27 08:15:20 +04:00
static u32 aarch64_insn_encode_ldst_size ( enum aarch64_insn_size_type type ,
u32 insn )
{
u32 size ;
switch ( type ) {
case AARCH64_INSN_SIZE_8 :
size = 0 ;
break ;
case AARCH64_INSN_SIZE_16 :
size = 1 ;
break ;
case AARCH64_INSN_SIZE_32 :
size = 2 ;
break ;
case AARCH64_INSN_SIZE_64 :
size = 3 ;
break ;
default :
pr_err ( " %s: unknown size encoding %d \n " , __func__ , type ) ;
2016-01-14 10:33:21 +03:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:20 +04:00
}
insn & = ~ GENMASK ( 31 , 30 ) ;
insn | = size < < 30 ;
return insn ;
}
2014-08-27 08:15:17 +04:00
static inline long branch_imm_common ( unsigned long pc , unsigned long addr ,
long range )
2014-01-07 18:17:11 +04:00
{
long offset ;
2016-01-14 10:33:21 +03:00
if ( ( pc & 0x3 ) | | ( addr & 0x3 ) ) {
pr_err ( " %s: A64 instructions must be word aligned \n " , __func__ ) ;
return range ;
}
2014-01-07 18:17:11 +04:00
2014-08-27 08:15:17 +04:00
offset = ( ( long ) addr - ( long ) pc ) ;
2016-01-14 10:33:21 +03:00
if ( offset < - range | | offset > = range ) {
pr_err ( " %s: offset out of range \n " , __func__ ) ;
return range ;
}
2014-08-27 08:15:17 +04:00
return offset ;
}
u32 __kprobes aarch64_insn_gen_branch_imm ( unsigned long pc , unsigned long addr ,
enum aarch64_insn_branch_type type )
{
u32 insn ;
long offset ;
2014-01-07 18:17:11 +04:00
/*
* B / BL support [ - 128 M , 128 M ) offset
* ARM64 virtual address arrangement guarantees all kernel and module
* texts are within + / - 128 M .
*/
2014-08-27 08:15:17 +04:00
offset = branch_imm_common ( pc , addr , SZ_128M ) ;
2016-01-14 10:33:21 +03:00
if ( offset > = SZ_128M )
return AARCH64_BREAK_FAULT ;
2014-01-07 18:17:11 +04:00
2014-08-27 08:15:18 +04:00
switch ( type ) {
case AARCH64_INSN_BRANCH_LINK :
2014-01-07 18:17:11 +04:00
insn = aarch64_insn_get_bl_value ( ) ;
2014-08-27 08:15:18 +04:00
break ;
case AARCH64_INSN_BRANCH_NOLINK :
2014-01-07 18:17:11 +04:00
insn = aarch64_insn_get_b_value ( ) ;
2014-08-27 08:15:18 +04:00
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown branch encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:18 +04:00
}
2014-01-07 18:17:11 +04:00
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_26 , insn ,
offset > > 2 ) ;
}
2014-08-27 08:15:17 +04:00
u32 aarch64_insn_gen_comp_branch_imm ( unsigned long pc , unsigned long addr ,
enum aarch64_insn_register reg ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_branch_type type )
{
u32 insn ;
long offset ;
offset = branch_imm_common ( pc , addr , SZ_1M ) ;
2016-01-14 10:33:21 +03:00
if ( offset > = SZ_1M )
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:17 +04:00
switch ( type ) {
case AARCH64_INSN_BRANCH_COMP_ZERO :
insn = aarch64_insn_get_cbz_value ( ) ;
break ;
case AARCH64_INSN_BRANCH_COMP_NONZERO :
insn = aarch64_insn_get_cbnz_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown branch encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:17 +04:00
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown variant encoding %d \n " , __func__ , variant ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:17 +04:00
}
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RT , insn , reg ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_19 , insn ,
offset > > 2 ) ;
}
2014-08-27 08:15:19 +04:00
u32 aarch64_insn_gen_cond_branch_imm ( unsigned long pc , unsigned long addr ,
enum aarch64_insn_condition cond )
{
u32 insn ;
long offset ;
offset = branch_imm_common ( pc , addr , SZ_1M ) ;
insn = aarch64_insn_get_bcond_value ( ) ;
2016-01-14 10:33:21 +03:00
if ( cond < AARCH64_INSN_COND_EQ | | cond > AARCH64_INSN_COND_AL ) {
pr_err ( " %s: unknown condition encoding %d \n " , __func__ , cond ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:19 +04:00
insn | = cond ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_19 , insn ,
offset > > 2 ) ;
}
2014-01-07 18:17:11 +04:00
u32 __kprobes aarch64_insn_gen_hint ( enum aarch64_insn_hint_op op )
{
return aarch64_insn_get_hint_value ( ) | op ;
}
u32 __kprobes aarch64_insn_gen_nop ( void )
{
return aarch64_insn_gen_hint ( AARCH64_INSN_HINT_NOP ) ;
}
2014-08-27 08:15:18 +04:00
u32 aarch64_insn_gen_branch_reg ( enum aarch64_insn_register reg ,
enum aarch64_insn_branch_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_BRANCH_NOLINK :
insn = aarch64_insn_get_br_value ( ) ;
break ;
case AARCH64_INSN_BRANCH_LINK :
insn = aarch64_insn_get_blr_value ( ) ;
break ;
case AARCH64_INSN_BRANCH_RETURN :
insn = aarch64_insn_get_ret_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown branch encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:18 +04:00
}
return aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , reg ) ;
}
2014-08-27 08:15:20 +04:00
u32 aarch64_insn_gen_load_store_reg ( enum aarch64_insn_register reg ,
enum aarch64_insn_register base ,
enum aarch64_insn_register offset ,
enum aarch64_insn_size_type size ,
enum aarch64_insn_ldst_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_LDST_LOAD_REG_OFFSET :
insn = aarch64_insn_get_ldr_reg_value ( ) ;
break ;
case AARCH64_INSN_LDST_STORE_REG_OFFSET :
insn = aarch64_insn_get_str_reg_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown load/store encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:20 +04:00
}
insn = aarch64_insn_encode_ldst_size ( size , insn ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RT , insn , reg ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn ,
base ) ;
return aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RM , insn ,
offset ) ;
}
2014-08-27 08:15:21 +04:00
u32 aarch64_insn_gen_load_store_pair ( enum aarch64_insn_register reg1 ,
enum aarch64_insn_register reg2 ,
enum aarch64_insn_register base ,
int offset ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_ldst_type type )
{
u32 insn ;
int shift ;
switch ( type ) {
case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX :
insn = aarch64_insn_get_ldp_pre_value ( ) ;
break ;
case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX :
insn = aarch64_insn_get_stp_pre_value ( ) ;
break ;
case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX :
insn = aarch64_insn_get_ldp_post_value ( ) ;
break ;
case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX :
insn = aarch64_insn_get_stp_post_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown load/store encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:21 +04:00
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
2016-01-14 10:33:21 +03:00
if ( ( offset & 0x3 ) | | ( offset < - 256 ) | | ( offset > 252 ) ) {
pr_err ( " %s: offset must be multiples of 4 in the range of [-256, 252] %d \n " ,
__func__ , offset ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:21 +04:00
shift = 2 ;
break ;
case AARCH64_INSN_VARIANT_64BIT :
2016-01-14 10:33:21 +03:00
if ( ( offset & 0x7 ) | | ( offset < - 512 ) | | ( offset > 504 ) ) {
pr_err ( " %s: offset must be multiples of 8 in the range of [-512, 504] %d \n " ,
__func__ , offset ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:21 +04:00
shift = 3 ;
insn | = AARCH64_INSN_SF_BIT ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown variant encoding %d \n " , __func__ , variant ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:21 +04:00
}
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RT , insn ,
reg1 ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RT2 , insn ,
reg2 ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn ,
base ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_7 , insn ,
offset > > shift ) ;
}
2014-08-27 08:15:22 +04:00
u32 aarch64_insn_gen_add_sub_imm ( enum aarch64_insn_register dst ,
enum aarch64_insn_register src ,
int imm , enum aarch64_insn_variant variant ,
enum aarch64_insn_adsb_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_ADSB_ADD :
insn = aarch64_insn_get_add_imm_value ( ) ;
break ;
case AARCH64_INSN_ADSB_SUB :
insn = aarch64_insn_get_sub_imm_value ( ) ;
break ;
case AARCH64_INSN_ADSB_ADD_SETFLAGS :
insn = aarch64_insn_get_adds_imm_value ( ) ;
break ;
case AARCH64_INSN_ADSB_SUB_SETFLAGS :
insn = aarch64_insn_get_subs_imm_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown add/sub encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:22 +04:00
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown variant encoding %d \n " , __func__ , variant ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:22 +04:00
}
2016-01-14 10:33:21 +03:00
if ( imm & ~ ( SZ_4K - 1 ) ) {
pr_err ( " %s: invalid immediate encoding %d \n " , __func__ , imm ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:22 +04:00
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , src ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_12 , insn , imm ) ;
}
2014-08-27 08:15:23 +04:00
u32 aarch64_insn_gen_bitfield ( enum aarch64_insn_register dst ,
enum aarch64_insn_register src ,
int immr , int imms ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_bitfield_type type )
{
u32 insn ;
u32 mask ;
switch ( type ) {
case AARCH64_INSN_BITFIELD_MOVE :
insn = aarch64_insn_get_bfm_value ( ) ;
break ;
case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED :
insn = aarch64_insn_get_ubfm_value ( ) ;
break ;
case AARCH64_INSN_BITFIELD_MOVE_SIGNED :
insn = aarch64_insn_get_sbfm_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown bitfield encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:23 +04:00
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
mask = GENMASK ( 4 , 0 ) ;
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT ;
mask = GENMASK ( 5 , 0 ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown variant encoding %d \n " , __func__ , variant ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:23 +04:00
}
2016-01-14 10:33:21 +03:00
if ( immr & ~ mask ) {
pr_err ( " %s: invalid immr encoding %d \n " , __func__ , immr ) ;
return AARCH64_BREAK_FAULT ;
}
if ( imms & ~ mask ) {
pr_err ( " %s: invalid imms encoding %d \n " , __func__ , imms ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:23 +04:00
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , src ) ;
insn = aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_R , insn , immr ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_S , insn , imms ) ;
}
2014-08-27 08:15:24 +04:00
u32 aarch64_insn_gen_movewide ( enum aarch64_insn_register dst ,
int imm , int shift ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_movewide_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_MOVEWIDE_ZERO :
insn = aarch64_insn_get_movz_value ( ) ;
break ;
case AARCH64_INSN_MOVEWIDE_KEEP :
insn = aarch64_insn_get_movk_value ( ) ;
break ;
case AARCH64_INSN_MOVEWIDE_INVERSE :
insn = aarch64_insn_get_movn_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown movewide encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:24 +04:00
}
2016-01-14 10:33:21 +03:00
if ( imm & ~ ( SZ_64K - 1 ) ) {
pr_err ( " %s: invalid immediate encoding %d \n " , __func__ , imm ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:24 +04:00
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
2016-01-14 10:33:21 +03:00
if ( shift ! = 0 & & shift ! = 16 ) {
pr_err ( " %s: invalid shift encoding %d \n " , __func__ ,
shift ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:24 +04:00
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
2016-01-14 10:33:21 +03:00
if ( shift ! = 0 & & shift ! = 16 & & shift ! = 32 & & shift ! = 48 ) {
pr_err ( " %s: invalid shift encoding %d \n " , __func__ ,
shift ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:24 +04:00
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown variant encoding %d \n " , __func__ , variant ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:24 +04:00
}
insn | = ( shift > > 4 ) < < 21 ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_16 , insn , imm ) ;
}
2014-08-27 08:15:25 +04:00
u32 aarch64_insn_gen_add_sub_shifted_reg ( enum aarch64_insn_register dst ,
enum aarch64_insn_register src ,
enum aarch64_insn_register reg ,
int shift ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_adsb_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_ADSB_ADD :
insn = aarch64_insn_get_add_value ( ) ;
break ;
case AARCH64_INSN_ADSB_SUB :
insn = aarch64_insn_get_sub_value ( ) ;
break ;
case AARCH64_INSN_ADSB_ADD_SETFLAGS :
insn = aarch64_insn_get_adds_value ( ) ;
break ;
case AARCH64_INSN_ADSB_SUB_SETFLAGS :
insn = aarch64_insn_get_subs_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown add/sub encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:25 +04:00
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
2016-01-14 10:33:21 +03:00
if ( shift & ~ ( SZ_32 - 1 ) ) {
pr_err ( " %s: invalid shift encoding %d \n " , __func__ ,
shift ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:25 +04:00
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
2016-01-14 10:33:21 +03:00
if ( shift & ~ ( SZ_64 - 1 ) ) {
pr_err ( " %s: invalid shift encoding %d \n " , __func__ ,
shift ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:25 +04:00
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown variant encoding %d \n " , __func__ , variant ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:25 +04:00
}
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , src ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RM , insn , reg ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_6 , insn , shift ) ;
}
2014-08-27 08:15:26 +04:00
u32 aarch64_insn_gen_data1 ( enum aarch64_insn_register dst ,
enum aarch64_insn_register src ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_data1_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_DATA1_REVERSE_16 :
insn = aarch64_insn_get_rev16_value ( ) ;
break ;
case AARCH64_INSN_DATA1_REVERSE_32 :
insn = aarch64_insn_get_rev32_value ( ) ;
break ;
case AARCH64_INSN_DATA1_REVERSE_64 :
2016-01-14 10:33:21 +03:00
if ( variant ! = AARCH64_INSN_VARIANT_64BIT ) {
pr_err ( " %s: invalid variant for reverse64 %d \n " ,
__func__ , variant ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:26 +04:00
insn = aarch64_insn_get_rev64_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown data1 encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:26 +04:00
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown variant encoding %d \n " , __func__ , variant ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:26 +04:00
}
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
return aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , src ) ;
}
2014-08-27 08:15:27 +04:00
u32 aarch64_insn_gen_data2 ( enum aarch64_insn_register dst ,
enum aarch64_insn_register src ,
enum aarch64_insn_register reg ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_data2_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_DATA2_UDIV :
insn = aarch64_insn_get_udiv_value ( ) ;
break ;
case AARCH64_INSN_DATA2_SDIV :
insn = aarch64_insn_get_sdiv_value ( ) ;
break ;
case AARCH64_INSN_DATA2_LSLV :
insn = aarch64_insn_get_lslv_value ( ) ;
break ;
case AARCH64_INSN_DATA2_LSRV :
insn = aarch64_insn_get_lsrv_value ( ) ;
break ;
case AARCH64_INSN_DATA2_ASRV :
insn = aarch64_insn_get_asrv_value ( ) ;
break ;
case AARCH64_INSN_DATA2_RORV :
insn = aarch64_insn_get_rorv_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown data2 encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:27 +04:00
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown variant encoding %d \n " , __func__ , variant ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:27 +04:00
}
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , src ) ;
return aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RM , insn , reg ) ;
}
2014-08-27 08:15:28 +04:00
u32 aarch64_insn_gen_data3 ( enum aarch64_insn_register dst ,
enum aarch64_insn_register src ,
enum aarch64_insn_register reg1 ,
enum aarch64_insn_register reg2 ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_data3_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_DATA3_MADD :
insn = aarch64_insn_get_madd_value ( ) ;
break ;
case AARCH64_INSN_DATA3_MSUB :
insn = aarch64_insn_get_msub_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown data3 encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:28 +04:00
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown variant encoding %d \n " , __func__ , variant ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:28 +04:00
}
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RA , insn , src ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn ,
reg1 ) ;
return aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RM , insn ,
reg2 ) ;
}
2014-08-27 08:15:29 +04:00
u32 aarch64_insn_gen_logical_shifted_reg ( enum aarch64_insn_register dst ,
enum aarch64_insn_register src ,
enum aarch64_insn_register reg ,
int shift ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_logic_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_LOGIC_AND :
insn = aarch64_insn_get_and_value ( ) ;
break ;
case AARCH64_INSN_LOGIC_BIC :
insn = aarch64_insn_get_bic_value ( ) ;
break ;
case AARCH64_INSN_LOGIC_ORR :
insn = aarch64_insn_get_orr_value ( ) ;
break ;
case AARCH64_INSN_LOGIC_ORN :
insn = aarch64_insn_get_orn_value ( ) ;
break ;
case AARCH64_INSN_LOGIC_EOR :
insn = aarch64_insn_get_eor_value ( ) ;
break ;
case AARCH64_INSN_LOGIC_EON :
insn = aarch64_insn_get_eon_value ( ) ;
break ;
case AARCH64_INSN_LOGIC_AND_SETFLAGS :
insn = aarch64_insn_get_ands_value ( ) ;
break ;
case AARCH64_INSN_LOGIC_BIC_SETFLAGS :
insn = aarch64_insn_get_bics_value ( ) ;
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown logical encoding %d \n " , __func__ , type ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:29 +04:00
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
2016-01-14 10:33:21 +03:00
if ( shift & ~ ( SZ_32 - 1 ) ) {
pr_err ( " %s: invalid shift encoding %d \n " , __func__ ,
shift ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:29 +04:00
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
2016-01-14 10:33:21 +03:00
if ( shift & ~ ( SZ_64 - 1 ) ) {
pr_err ( " %s: invalid shift encoding %d \n " , __func__ ,
shift ) ;
return AARCH64_BREAK_FAULT ;
}
2014-08-27 08:15:29 +04:00
break ;
default :
2016-01-14 10:33:21 +03:00
pr_err ( " %s: unknown variant encoding %d \n " , __func__ , variant ) ;
2014-09-16 20:42:33 +04:00
return AARCH64_BREAK_FAULT ;
2014-08-27 08:15:29 +04:00
}
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , src ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RM , insn , reg ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_6 , insn , shift ) ;
}
2014-11-18 14:41:22 +03:00
2015-06-01 12:47:39 +03:00
/*
* Decode the imm field of a branch , and return the byte offset as a
* signed value ( so it can be used when computing a new branch
* target ) .
*/
s32 aarch64_get_branch_offset ( u32 insn )
{
s32 imm ;
if ( aarch64_insn_is_b ( insn ) | | aarch64_insn_is_bl ( insn ) ) {
imm = aarch64_insn_decode_immediate ( AARCH64_INSN_IMM_26 , insn ) ;
return ( imm < < 6 ) > > 4 ;
}
if ( aarch64_insn_is_cbz ( insn ) | | aarch64_insn_is_cbnz ( insn ) | |
aarch64_insn_is_bcond ( insn ) ) {
imm = aarch64_insn_decode_immediate ( AARCH64_INSN_IMM_19 , insn ) ;
return ( imm < < 13 ) > > 11 ;
}
if ( aarch64_insn_is_tbz ( insn ) | | aarch64_insn_is_tbnz ( insn ) ) {
imm = aarch64_insn_decode_immediate ( AARCH64_INSN_IMM_14 , insn ) ;
return ( imm < < 18 ) > > 16 ;
}
/* Unhandled instruction */
BUG ( ) ;
}
/*
* Encode the displacement of a branch in the imm field and return the
* updated instruction .
*/
u32 aarch64_set_branch_offset ( u32 insn , s32 offset )
{
if ( aarch64_insn_is_b ( insn ) | | aarch64_insn_is_bl ( insn ) )
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_26 , insn ,
offset > > 2 ) ;
if ( aarch64_insn_is_cbz ( insn ) | | aarch64_insn_is_cbnz ( insn ) | |
aarch64_insn_is_bcond ( insn ) )
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_19 , insn ,
offset > > 2 ) ;
if ( aarch64_insn_is_tbz ( insn ) | | aarch64_insn_is_tbnz ( insn ) )
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_14 , insn ,
offset > > 2 ) ;
/* Unhandled instruction */
BUG ( ) ;
}
2014-11-18 14:41:22 +03:00
bool aarch32_insn_is_wide ( u32 insn )
{
return insn > = 0xe800 ;
}
2014-11-18 14:41:25 +03:00
/*
* Macros / defines for extracting register numbers from instruction .
*/
u32 aarch32_insn_extract_reg_num ( u32 insn , int offset )
{
return ( insn & ( 0xf < < offset ) ) > > offset ;
}
2014-11-18 14:41:26 +03:00
# define OPC2_MASK 0x7
# define OPC2_OFFSET 5
u32 aarch32_insn_mcr_extract_opc2 ( u32 insn )
{
return ( insn & ( OPC2_MASK < < OPC2_OFFSET ) ) > > OPC2_OFFSET ;
}
# define CRM_MASK 0xf
u32 aarch32_insn_mcr_extract_crm ( u32 insn )
{
return insn & CRM_MASK ;
}