2014-01-07 18:17:08 +04:00
/*
* Copyright ( C ) 2013 Huawei Ltd .
* Author : Jiang Liu < liuj97 @ gmail . com >
*
2014-08-27 08:15:17 +04:00
* Copyright ( C ) 2014 Zi Shen Lim < zlim . lnx @ gmail . com >
*
2014-01-07 18:17:08 +04:00
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2014-01-07 18:17:11 +04:00
# include <linux/bitops.h>
2014-01-07 18:17:08 +04:00
# include <linux/compiler.h>
# include <linux/kernel.h>
2014-01-07 18:17:09 +04:00
# include <linux/smp.h>
# include <linux/stop_machine.h>
# include <linux/uaccess.h>
# include <asm/cacheflush.h>
2014-01-07 18:17:08 +04:00
# include <asm/insn.h>
2014-08-27 08:15:17 +04:00
# define AARCH64_INSN_SF_BIT BIT(31)
2014-08-27 08:15:23 +04:00
# define AARCH64_INSN_N_BIT BIT(22)
2014-08-27 08:15:17 +04:00
2014-01-07 18:17:08 +04:00
static int aarch64_insn_encoding_class [ ] = {
AARCH64_INSN_CLS_UNKNOWN ,
AARCH64_INSN_CLS_UNKNOWN ,
AARCH64_INSN_CLS_UNKNOWN ,
AARCH64_INSN_CLS_UNKNOWN ,
AARCH64_INSN_CLS_LDST ,
AARCH64_INSN_CLS_DP_REG ,
AARCH64_INSN_CLS_LDST ,
AARCH64_INSN_CLS_DP_FPSIMD ,
AARCH64_INSN_CLS_DP_IMM ,
AARCH64_INSN_CLS_DP_IMM ,
AARCH64_INSN_CLS_BR_SYS ,
AARCH64_INSN_CLS_BR_SYS ,
AARCH64_INSN_CLS_LDST ,
AARCH64_INSN_CLS_DP_REG ,
AARCH64_INSN_CLS_LDST ,
AARCH64_INSN_CLS_DP_FPSIMD ,
} ;
enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class ( u32 insn )
{
return aarch64_insn_encoding_class [ ( insn > > 25 ) & 0xf ] ;
}
/* NOP is an alias of HINT */
bool __kprobes aarch64_insn_is_nop ( u32 insn )
{
if ( ! aarch64_insn_is_hint ( insn ) )
return false ;
switch ( insn & 0xFE0 ) {
case AARCH64_INSN_HINT_YIELD :
case AARCH64_INSN_HINT_WFE :
case AARCH64_INSN_HINT_WFI :
case AARCH64_INSN_HINT_SEV :
case AARCH64_INSN_HINT_SEVL :
return false ;
default :
return true ;
}
}
2014-01-07 18:17:09 +04:00
/*
* In ARMv8 - A , A64 instructions have a fixed length of 32 bits and are always
* little - endian .
*/
int __kprobes aarch64_insn_read ( void * addr , u32 * insnp )
{
int ret ;
u32 val ;
ret = probe_kernel_read ( & val , addr , AARCH64_INSN_SIZE ) ;
if ( ! ret )
* insnp = le32_to_cpu ( val ) ;
return ret ;
}
int __kprobes aarch64_insn_write ( void * addr , u32 insn )
{
insn = cpu_to_le32 ( insn ) ;
return probe_kernel_write ( addr , & insn , AARCH64_INSN_SIZE ) ;
}
2014-01-07 18:17:08 +04:00
static bool __kprobes __aarch64_insn_hotpatch_safe ( u32 insn )
{
if ( aarch64_get_insn_class ( insn ) ! = AARCH64_INSN_CLS_BR_SYS )
return false ;
return aarch64_insn_is_b ( insn ) | |
aarch64_insn_is_bl ( insn ) | |
aarch64_insn_is_svc ( insn ) | |
aarch64_insn_is_hvc ( insn ) | |
aarch64_insn_is_smc ( insn ) | |
aarch64_insn_is_brk ( insn ) | |
aarch64_insn_is_nop ( insn ) ;
}
/*
* ARM Architecture Reference Manual for ARMv8 Profile - A , Issue A . a
* Section B2 .6 .5 " Concurrent modification and execution of instructions " :
* Concurrent modification and execution of instructions can lead to the
* resulting instruction performing any behavior that can be achieved by
* executing any sequence of instructions that can be executed from the
* same Exception level , except where the instruction before modification
* and the instruction after modification is a B , BL , NOP , BKPT , SVC , HVC ,
* or SMC instruction .
*/
bool __kprobes aarch64_insn_hotpatch_safe ( u32 old_insn , u32 new_insn )
{
return __aarch64_insn_hotpatch_safe ( old_insn ) & &
__aarch64_insn_hotpatch_safe ( new_insn ) ;
}
2014-01-07 18:17:09 +04:00
int __kprobes aarch64_insn_patch_text_nosync ( void * addr , u32 insn )
{
u32 * tp = addr ;
int ret ;
/* A64 instructions must be word aligned */
if ( ( uintptr_t ) tp & 0x3 )
return - EINVAL ;
ret = aarch64_insn_write ( tp , insn ) ;
if ( ret = = 0 )
flush_icache_range ( ( uintptr_t ) tp ,
( uintptr_t ) tp + AARCH64_INSN_SIZE ) ;
return ret ;
}
struct aarch64_insn_patch {
void * * text_addrs ;
u32 * new_insns ;
int insn_cnt ;
atomic_t cpu_count ;
} ;
static int __kprobes aarch64_insn_patch_text_cb ( void * arg )
{
int i , ret = 0 ;
struct aarch64_insn_patch * pp = arg ;
/* The first CPU becomes master */
if ( atomic_inc_return ( & pp - > cpu_count ) = = 1 ) {
for ( i = 0 ; ret = = 0 & & i < pp - > insn_cnt ; i + + )
ret = aarch64_insn_patch_text_nosync ( pp - > text_addrs [ i ] ,
pp - > new_insns [ i ] ) ;
/*
* aarch64_insn_patch_text_nosync ( ) calls flush_icache_range ( ) ,
* which ends with " dsb; isb " pair guaranteeing global
* visibility .
*/
atomic_set ( & pp - > cpu_count , - 1 ) ;
} else {
while ( atomic_read ( & pp - > cpu_count ) ! = - 1 )
cpu_relax ( ) ;
isb ( ) ;
}
return ret ;
}
int __kprobes aarch64_insn_patch_text_sync ( void * addrs [ ] , u32 insns [ ] , int cnt )
{
struct aarch64_insn_patch patch = {
. text_addrs = addrs ,
. new_insns = insns ,
. insn_cnt = cnt ,
. cpu_count = ATOMIC_INIT ( 0 ) ,
} ;
if ( cnt < = 0 )
return - EINVAL ;
return stop_machine ( aarch64_insn_patch_text_cb , & patch ,
cpu_online_mask ) ;
}
int __kprobes aarch64_insn_patch_text ( void * addrs [ ] , u32 insns [ ] , int cnt )
{
int ret ;
u32 insn ;
/* Unsafe to patch multiple instructions without synchronizaiton */
if ( cnt = = 1 ) {
ret = aarch64_insn_read ( addrs [ 0 ] , & insn ) ;
if ( ret )
return ret ;
if ( aarch64_insn_hotpatch_safe ( insn , insns [ 0 ] ) ) {
/*
* ARMv8 architecture doesn ' t guarantee all CPUs see
* the new instruction after returning from function
* aarch64_insn_patch_text_nosync ( ) . So send IPIs to
* all other CPUs to achieve instruction
* synchronization .
*/
ret = aarch64_insn_patch_text_nosync ( addrs [ 0 ] , insns [ 0 ] ) ;
kick_all_cpus_sync ( ) ;
return ret ;
}
}
return aarch64_insn_patch_text_sync ( addrs , insns , cnt ) ;
}
2014-01-07 18:17:10 +04:00
u32 __kprobes aarch64_insn_encode_immediate ( enum aarch64_insn_imm_type type ,
u32 insn , u64 imm )
{
u32 immlo , immhi , lomask , himask , mask ;
int shift ;
switch ( type ) {
case AARCH64_INSN_IMM_ADR :
lomask = 0x3 ;
himask = 0x7ffff ;
immlo = imm & lomask ;
imm > > = 2 ;
immhi = imm & himask ;
imm = ( immlo < < 24 ) | ( immhi ) ;
mask = ( lomask < < 24 ) | ( himask ) ;
shift = 5 ;
break ;
case AARCH64_INSN_IMM_26 :
mask = BIT ( 26 ) - 1 ;
shift = 0 ;
break ;
case AARCH64_INSN_IMM_19 :
mask = BIT ( 19 ) - 1 ;
shift = 5 ;
break ;
case AARCH64_INSN_IMM_16 :
mask = BIT ( 16 ) - 1 ;
shift = 5 ;
break ;
case AARCH64_INSN_IMM_14 :
mask = BIT ( 14 ) - 1 ;
shift = 5 ;
break ;
case AARCH64_INSN_IMM_12 :
mask = BIT ( 12 ) - 1 ;
shift = 10 ;
break ;
case AARCH64_INSN_IMM_9 :
mask = BIT ( 9 ) - 1 ;
shift = 12 ;
break ;
2014-08-27 08:15:21 +04:00
case AARCH64_INSN_IMM_7 :
mask = BIT ( 7 ) - 1 ;
shift = 15 ;
break ;
2014-08-27 08:15:25 +04:00
case AARCH64_INSN_IMM_6 :
2014-08-27 08:15:23 +04:00
case AARCH64_INSN_IMM_S :
mask = BIT ( 6 ) - 1 ;
shift = 10 ;
break ;
case AARCH64_INSN_IMM_R :
mask = BIT ( 6 ) - 1 ;
shift = 16 ;
break ;
2014-01-07 18:17:10 +04:00
default :
pr_err ( " aarch64_insn_encode_immediate: unknown immediate encoding %d \n " ,
type ) ;
return 0 ;
}
/* Update the immediate field. */
insn & = ~ ( mask < < shift ) ;
insn | = ( imm & mask ) < < shift ;
return insn ;
}
2014-01-07 18:17:11 +04:00
2014-08-27 08:15:17 +04:00
static u32 aarch64_insn_encode_register ( enum aarch64_insn_register_type type ,
u32 insn ,
enum aarch64_insn_register reg )
{
int shift ;
if ( reg < AARCH64_INSN_REG_0 | | reg > AARCH64_INSN_REG_SP ) {
pr_err ( " %s: unknown register encoding %d \n " , __func__ , reg ) ;
return 0 ;
}
switch ( type ) {
case AARCH64_INSN_REGTYPE_RT :
2014-08-27 08:15:22 +04:00
case AARCH64_INSN_REGTYPE_RD :
2014-08-27 08:15:17 +04:00
shift = 0 ;
break ;
2014-08-27 08:15:18 +04:00
case AARCH64_INSN_REGTYPE_RN :
shift = 5 ;
break ;
2014-08-27 08:15:21 +04:00
case AARCH64_INSN_REGTYPE_RT2 :
shift = 10 ;
break ;
2014-08-27 08:15:20 +04:00
case AARCH64_INSN_REGTYPE_RM :
shift = 16 ;
break ;
2014-08-27 08:15:17 +04:00
default :
pr_err ( " %s: unknown register type encoding %d \n " , __func__ ,
type ) ;
return 0 ;
}
insn & = ~ ( GENMASK ( 4 , 0 ) < < shift ) ;
insn | = reg < < shift ;
return insn ;
}
2014-08-27 08:15:20 +04:00
static u32 aarch64_insn_encode_ldst_size ( enum aarch64_insn_size_type type ,
u32 insn )
{
u32 size ;
switch ( type ) {
case AARCH64_INSN_SIZE_8 :
size = 0 ;
break ;
case AARCH64_INSN_SIZE_16 :
size = 1 ;
break ;
case AARCH64_INSN_SIZE_32 :
size = 2 ;
break ;
case AARCH64_INSN_SIZE_64 :
size = 3 ;
break ;
default :
pr_err ( " %s: unknown size encoding %d \n " , __func__ , type ) ;
return 0 ;
}
insn & = ~ GENMASK ( 31 , 30 ) ;
insn | = size < < 30 ;
return insn ;
}
2014-08-27 08:15:17 +04:00
static inline long branch_imm_common ( unsigned long pc , unsigned long addr ,
long range )
2014-01-07 18:17:11 +04:00
{
long offset ;
/*
* PC : A 64 - bit Program Counter holding the address of the current
* instruction . A64 instructions must be word - aligned .
*/
BUG_ON ( ( pc & 0x3 ) | | ( addr & 0x3 ) ) ;
2014-08-27 08:15:17 +04:00
offset = ( ( long ) addr - ( long ) pc ) ;
BUG_ON ( offset < - range | | offset > = range ) ;
return offset ;
}
u32 __kprobes aarch64_insn_gen_branch_imm ( unsigned long pc , unsigned long addr ,
enum aarch64_insn_branch_type type )
{
u32 insn ;
long offset ;
2014-01-07 18:17:11 +04:00
/*
* B / BL support [ - 128 M , 128 M ) offset
* ARM64 virtual address arrangement guarantees all kernel and module
* texts are within + / - 128 M .
*/
2014-08-27 08:15:17 +04:00
offset = branch_imm_common ( pc , addr , SZ_128M ) ;
2014-01-07 18:17:11 +04:00
2014-08-27 08:15:18 +04:00
switch ( type ) {
case AARCH64_INSN_BRANCH_LINK :
2014-01-07 18:17:11 +04:00
insn = aarch64_insn_get_bl_value ( ) ;
2014-08-27 08:15:18 +04:00
break ;
case AARCH64_INSN_BRANCH_NOLINK :
2014-01-07 18:17:11 +04:00
insn = aarch64_insn_get_b_value ( ) ;
2014-08-27 08:15:18 +04:00
break ;
default :
BUG_ON ( 1 ) ;
}
2014-01-07 18:17:11 +04:00
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_26 , insn ,
offset > > 2 ) ;
}
2014-08-27 08:15:17 +04:00
u32 aarch64_insn_gen_comp_branch_imm ( unsigned long pc , unsigned long addr ,
enum aarch64_insn_register reg ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_branch_type type )
{
u32 insn ;
long offset ;
offset = branch_imm_common ( pc , addr , SZ_1M ) ;
switch ( type ) {
case AARCH64_INSN_BRANCH_COMP_ZERO :
insn = aarch64_insn_get_cbz_value ( ) ;
break ;
case AARCH64_INSN_BRANCH_COMP_NONZERO :
insn = aarch64_insn_get_cbnz_value ( ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
break ;
default :
BUG_ON ( 1 ) ;
}
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RT , insn , reg ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_19 , insn ,
offset > > 2 ) ;
}
2014-08-27 08:15:19 +04:00
u32 aarch64_insn_gen_cond_branch_imm ( unsigned long pc , unsigned long addr ,
enum aarch64_insn_condition cond )
{
u32 insn ;
long offset ;
offset = branch_imm_common ( pc , addr , SZ_1M ) ;
insn = aarch64_insn_get_bcond_value ( ) ;
BUG_ON ( cond < AARCH64_INSN_COND_EQ | | cond > AARCH64_INSN_COND_AL ) ;
insn | = cond ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_19 , insn ,
offset > > 2 ) ;
}
2014-01-07 18:17:11 +04:00
u32 __kprobes aarch64_insn_gen_hint ( enum aarch64_insn_hint_op op )
{
return aarch64_insn_get_hint_value ( ) | op ;
}
u32 __kprobes aarch64_insn_gen_nop ( void )
{
return aarch64_insn_gen_hint ( AARCH64_INSN_HINT_NOP ) ;
}
2014-08-27 08:15:18 +04:00
u32 aarch64_insn_gen_branch_reg ( enum aarch64_insn_register reg ,
enum aarch64_insn_branch_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_BRANCH_NOLINK :
insn = aarch64_insn_get_br_value ( ) ;
break ;
case AARCH64_INSN_BRANCH_LINK :
insn = aarch64_insn_get_blr_value ( ) ;
break ;
case AARCH64_INSN_BRANCH_RETURN :
insn = aarch64_insn_get_ret_value ( ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
return aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , reg ) ;
}
2014-08-27 08:15:20 +04:00
u32 aarch64_insn_gen_load_store_reg ( enum aarch64_insn_register reg ,
enum aarch64_insn_register base ,
enum aarch64_insn_register offset ,
enum aarch64_insn_size_type size ,
enum aarch64_insn_ldst_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_LDST_LOAD_REG_OFFSET :
insn = aarch64_insn_get_ldr_reg_value ( ) ;
break ;
case AARCH64_INSN_LDST_STORE_REG_OFFSET :
insn = aarch64_insn_get_str_reg_value ( ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
insn = aarch64_insn_encode_ldst_size ( size , insn ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RT , insn , reg ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn ,
base ) ;
return aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RM , insn ,
offset ) ;
}
2014-08-27 08:15:21 +04:00
u32 aarch64_insn_gen_load_store_pair ( enum aarch64_insn_register reg1 ,
enum aarch64_insn_register reg2 ,
enum aarch64_insn_register base ,
int offset ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_ldst_type type )
{
u32 insn ;
int shift ;
switch ( type ) {
case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX :
insn = aarch64_insn_get_ldp_pre_value ( ) ;
break ;
case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX :
insn = aarch64_insn_get_stp_pre_value ( ) ;
break ;
case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX :
insn = aarch64_insn_get_ldp_post_value ( ) ;
break ;
case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX :
insn = aarch64_insn_get_stp_post_value ( ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
/* offset must be multiples of 4 in the range [-256, 252] */
BUG_ON ( offset & 0x3 ) ;
BUG_ON ( offset < - 256 | | offset > 252 ) ;
shift = 2 ;
break ;
case AARCH64_INSN_VARIANT_64BIT :
/* offset must be multiples of 8 in the range [-512, 504] */
BUG_ON ( offset & 0x7 ) ;
BUG_ON ( offset < - 512 | | offset > 504 ) ;
shift = 3 ;
insn | = AARCH64_INSN_SF_BIT ;
break ;
default :
BUG_ON ( 1 ) ;
}
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RT , insn ,
reg1 ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RT2 , insn ,
reg2 ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn ,
base ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_7 , insn ,
offset > > shift ) ;
}
2014-08-27 08:15:22 +04:00
u32 aarch64_insn_gen_add_sub_imm ( enum aarch64_insn_register dst ,
enum aarch64_insn_register src ,
int imm , enum aarch64_insn_variant variant ,
enum aarch64_insn_adsb_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_ADSB_ADD :
insn = aarch64_insn_get_add_imm_value ( ) ;
break ;
case AARCH64_INSN_ADSB_SUB :
insn = aarch64_insn_get_sub_imm_value ( ) ;
break ;
case AARCH64_INSN_ADSB_ADD_SETFLAGS :
insn = aarch64_insn_get_adds_imm_value ( ) ;
break ;
case AARCH64_INSN_ADSB_SUB_SETFLAGS :
insn = aarch64_insn_get_subs_imm_value ( ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
break ;
default :
BUG_ON ( 1 ) ;
}
BUG_ON ( imm & ~ ( SZ_4K - 1 ) ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , src ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_12 , insn , imm ) ;
}
2014-08-27 08:15:23 +04:00
u32 aarch64_insn_gen_bitfield ( enum aarch64_insn_register dst ,
enum aarch64_insn_register src ,
int immr , int imms ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_bitfield_type type )
{
u32 insn ;
u32 mask ;
switch ( type ) {
case AARCH64_INSN_BITFIELD_MOVE :
insn = aarch64_insn_get_bfm_value ( ) ;
break ;
case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED :
insn = aarch64_insn_get_ubfm_value ( ) ;
break ;
case AARCH64_INSN_BITFIELD_MOVE_SIGNED :
insn = aarch64_insn_get_sbfm_value ( ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
mask = GENMASK ( 4 , 0 ) ;
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT ;
mask = GENMASK ( 5 , 0 ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
BUG_ON ( immr & ~ mask ) ;
BUG_ON ( imms & ~ mask ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , src ) ;
insn = aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_R , insn , immr ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_S , insn , imms ) ;
}
2014-08-27 08:15:24 +04:00
u32 aarch64_insn_gen_movewide ( enum aarch64_insn_register dst ,
int imm , int shift ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_movewide_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_MOVEWIDE_ZERO :
insn = aarch64_insn_get_movz_value ( ) ;
break ;
case AARCH64_INSN_MOVEWIDE_KEEP :
insn = aarch64_insn_get_movk_value ( ) ;
break ;
case AARCH64_INSN_MOVEWIDE_INVERSE :
insn = aarch64_insn_get_movn_value ( ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
BUG_ON ( imm & ~ ( SZ_64K - 1 ) ) ;
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
BUG_ON ( shift ! = 0 & & shift ! = 16 ) ;
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
BUG_ON ( shift ! = 0 & & shift ! = 16 & & shift ! = 32 & &
shift ! = 48 ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
insn | = ( shift > > 4 ) < < 21 ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_16 , insn , imm ) ;
}
2014-08-27 08:15:25 +04:00
u32 aarch64_insn_gen_add_sub_shifted_reg ( enum aarch64_insn_register dst ,
enum aarch64_insn_register src ,
enum aarch64_insn_register reg ,
int shift ,
enum aarch64_insn_variant variant ,
enum aarch64_insn_adsb_type type )
{
u32 insn ;
switch ( type ) {
case AARCH64_INSN_ADSB_ADD :
insn = aarch64_insn_get_add_value ( ) ;
break ;
case AARCH64_INSN_ADSB_SUB :
insn = aarch64_insn_get_sub_value ( ) ;
break ;
case AARCH64_INSN_ADSB_ADD_SETFLAGS :
insn = aarch64_insn_get_adds_value ( ) ;
break ;
case AARCH64_INSN_ADSB_SUB_SETFLAGS :
insn = aarch64_insn_get_subs_value ( ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
switch ( variant ) {
case AARCH64_INSN_VARIANT_32BIT :
BUG_ON ( shift & ~ ( SZ_32 - 1 ) ) ;
break ;
case AARCH64_INSN_VARIANT_64BIT :
insn | = AARCH64_INSN_SF_BIT ;
BUG_ON ( shift & ~ ( SZ_64 - 1 ) ) ;
break ;
default :
BUG_ON ( 1 ) ;
}
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RD , insn , dst ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RN , insn , src ) ;
insn = aarch64_insn_encode_register ( AARCH64_INSN_REGTYPE_RM , insn , reg ) ;
return aarch64_insn_encode_immediate ( AARCH64_INSN_IMM_6 , insn , shift ) ;
}