2012-12-10 13:27:52 +00:00
/*
* Copyright ( C ) 2012 , 2013 - ARM Ltd
* Author : Marc Zyngier < marc . zyngier @ arm . com >
*
* Derived from arch / arm / include / kvm_emulate . h
* Copyright ( C ) 2012 - Virtual Open Systems and Columbia University
* Author : Christoffer Dall < c . dall @ virtualopensystems . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# ifndef __ARM64_KVM_EMULATE_H__
# define __ARM64_KVM_EMULATE_H__
# include <linux/kvm_host.h>
2014-11-24 13:59:30 +00:00
# include <asm/esr.h>
2012-12-10 13:27:52 +00:00
# include <asm/kvm_arm.h>
2014-11-24 13:59:30 +00:00
# include <asm/kvm_asm.h>
2012-12-10 13:27:52 +00:00
# include <asm/kvm_mmio.h>
# include <asm/ptrace.h>
2014-06-02 15:37:13 +02:00
# include <asm/cputype.h>
2012-12-10 13:27:52 +00:00
2013-02-06 19:40:29 +00:00
unsigned long * vcpu_reg32 ( const struct kvm_vcpu * vcpu , u8 reg_num ) ;
unsigned long * vcpu_spsr32 ( const struct kvm_vcpu * vcpu ) ;
2013-02-06 19:54:04 +00:00
bool kvm_condition_valid32 ( const struct kvm_vcpu * vcpu ) ;
void kvm_skip_instr32 ( struct kvm_vcpu * vcpu , bool is_wide_instr ) ;
2012-12-10 13:27:52 +00:00
void kvm_inject_undefined ( struct kvm_vcpu * vcpu ) ;
void kvm_inject_dabt ( struct kvm_vcpu * vcpu , unsigned long addr ) ;
void kvm_inject_pabt ( struct kvm_vcpu * vcpu , unsigned long addr ) ;
2014-10-16 17:21:16 +02:00
static inline void vcpu_reset_hcr ( struct kvm_vcpu * vcpu )
{
vcpu - > arch . hcr_el2 = HCR_GUEST_FLAGS ;
2015-01-11 14:10:11 +01:00
if ( test_bit ( KVM_ARM_VCPU_EL1_32BIT , vcpu - > arch . features ) )
vcpu - > arch . hcr_el2 & = ~ HCR_RW ;
2014-10-16 17:21:16 +02:00
}
2014-12-19 16:05:31 +00:00
static inline unsigned long vcpu_get_hcr ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . hcr_el2 ;
}
static inline void vcpu_set_hcr ( struct kvm_vcpu * vcpu , unsigned long hcr )
{
vcpu - > arch . hcr_el2 = hcr ;
}
2012-12-10 13:27:52 +00:00
static inline unsigned long * vcpu_pc ( const struct kvm_vcpu * vcpu )
{
return ( unsigned long * ) & vcpu_gp_regs ( vcpu ) - > regs . pc ;
}
static inline unsigned long * vcpu_elr_el1 ( const struct kvm_vcpu * vcpu )
{
return ( unsigned long * ) & vcpu_gp_regs ( vcpu ) - > elr_el1 ;
}
static inline unsigned long * vcpu_cpsr ( const struct kvm_vcpu * vcpu )
{
return ( unsigned long * ) & vcpu_gp_regs ( vcpu ) - > regs . pstate ;
}
static inline bool vcpu_mode_is_32bit ( const struct kvm_vcpu * vcpu )
{
2013-02-06 19:40:29 +00:00
return ! ! ( * vcpu_cpsr ( vcpu ) & PSR_MODE32_BIT ) ;
2012-12-10 13:27:52 +00:00
}
static inline bool kvm_condition_valid ( const struct kvm_vcpu * vcpu )
{
2013-02-06 19:54:04 +00:00
if ( vcpu_mode_is_32bit ( vcpu ) )
return kvm_condition_valid32 ( vcpu ) ;
return true ;
2012-12-10 13:27:52 +00:00
}
static inline void kvm_skip_instr ( struct kvm_vcpu * vcpu , bool is_wide_instr )
{
2013-02-06 19:54:04 +00:00
if ( vcpu_mode_is_32bit ( vcpu ) )
kvm_skip_instr32 ( vcpu , is_wide_instr ) ;
else
* vcpu_pc ( vcpu ) + = 4 ;
2012-12-10 13:27:52 +00:00
}
static inline void vcpu_set_thumb ( struct kvm_vcpu * vcpu )
{
2013-02-06 19:40:29 +00:00
* vcpu_cpsr ( vcpu ) | = COMPAT_PSR_T_BIT ;
2012-12-10 13:27:52 +00:00
}
static inline unsigned long * vcpu_reg ( const struct kvm_vcpu * vcpu , u8 reg_num )
{
2013-02-06 19:40:29 +00:00
if ( vcpu_mode_is_32bit ( vcpu ) )
return vcpu_reg32 ( vcpu , reg_num ) ;
2012-12-10 13:27:52 +00:00
return ( unsigned long * ) & vcpu_gp_regs ( vcpu ) - > regs . regs [ reg_num ] ;
}
/* Get vcpu SPSR for current mode */
static inline unsigned long * vcpu_spsr ( const struct kvm_vcpu * vcpu )
{
2013-02-06 19:40:29 +00:00
if ( vcpu_mode_is_32bit ( vcpu ) )
return vcpu_spsr32 ( vcpu ) ;
2012-12-10 13:27:52 +00:00
return ( unsigned long * ) & vcpu_gp_regs ( vcpu ) - > spsr [ KVM_SPSR_EL1 ] ;
}
static inline bool vcpu_mode_priv ( const struct kvm_vcpu * vcpu )
{
u32 mode = * vcpu_cpsr ( vcpu ) & PSR_MODE_MASK ;
2013-02-06 19:40:29 +00:00
if ( vcpu_mode_is_32bit ( vcpu ) )
return mode > COMPAT_PSR_MODE_USR ;
2012-12-10 13:27:52 +00:00
return mode ! = PSR_MODE_EL0t ;
}
static inline u32 kvm_vcpu_get_hsr ( const struct kvm_vcpu * vcpu )
{
return vcpu - > arch . fault . esr_el2 ;
}
static inline unsigned long kvm_vcpu_get_hfar ( const struct kvm_vcpu * vcpu )
{
return vcpu - > arch . fault . far_el2 ;
}
static inline phys_addr_t kvm_vcpu_get_fault_ipa ( const struct kvm_vcpu * vcpu )
{
return ( ( phys_addr_t ) vcpu - > arch . fault . hpfar_el2 & HPFAR_MASK ) < < 8 ;
}
2015-01-12 11:53:36 -05:00
static inline u32 kvm_vcpu_hvc_get_imm ( const struct kvm_vcpu * vcpu )
{
2015-01-23 13:39:51 +01:00
return kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_xVC_IMM_MASK ;
2015-01-12 11:53:36 -05:00
}
2012-12-10 13:27:52 +00:00
static inline bool kvm_vcpu_dabt_isvalid ( const struct kvm_vcpu * vcpu )
{
2014-11-24 13:59:30 +00:00
return ! ! ( kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_ISV ) ;
2012-12-10 13:27:52 +00:00
}
static inline bool kvm_vcpu_dabt_iswrite ( const struct kvm_vcpu * vcpu )
{
2014-11-24 13:59:30 +00:00
return ! ! ( kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_WNR ) ;
2012-12-10 13:27:52 +00:00
}
static inline bool kvm_vcpu_dabt_issext ( const struct kvm_vcpu * vcpu )
{
2014-11-24 13:59:30 +00:00
return ! ! ( kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_SSE ) ;
2012-12-10 13:27:52 +00:00
}
static inline int kvm_vcpu_dabt_get_rd ( const struct kvm_vcpu * vcpu )
{
2014-11-24 13:59:30 +00:00
return ( kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_SRT_MASK ) > > ESR_ELx_SRT_SHIFT ;
2012-12-10 13:27:52 +00:00
}
static inline bool kvm_vcpu_dabt_isextabt ( const struct kvm_vcpu * vcpu )
{
2014-11-24 13:59:30 +00:00
return ! ! ( kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_EA ) ;
2012-12-10 13:27:52 +00:00
}
static inline bool kvm_vcpu_dabt_iss1tw ( const struct kvm_vcpu * vcpu )
{
2014-11-24 13:59:30 +00:00
return ! ! ( kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_S1PTW ) ;
2012-12-10 13:27:52 +00:00
}
static inline int kvm_vcpu_dabt_get_as ( const struct kvm_vcpu * vcpu )
{
2014-11-24 13:59:30 +00:00
return 1 < < ( ( kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_SAS ) > > ESR_ELx_SAS_SHIFT ) ;
2012-12-10 13:27:52 +00:00
}
/* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit ( const struct kvm_vcpu * vcpu )
{
2014-11-24 13:59:30 +00:00
return ! ! ( kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_IL ) ;
2012-12-10 13:27:52 +00:00
}
static inline u8 kvm_vcpu_trap_get_class ( const struct kvm_vcpu * vcpu )
{
2014-11-24 13:59:30 +00:00
return kvm_vcpu_get_hsr ( vcpu ) > > ESR_ELx_EC_SHIFT ;
2012-12-10 13:27:52 +00:00
}
static inline bool kvm_vcpu_trap_is_iabt ( const struct kvm_vcpu * vcpu )
{
2014-11-24 13:59:30 +00:00
return kvm_vcpu_trap_get_class ( vcpu ) = = ESR_ELx_EC_IABT_LOW ;
2012-12-10 13:27:52 +00:00
}
static inline u8 kvm_vcpu_trap_get_fault ( const struct kvm_vcpu * vcpu )
2014-09-26 12:29:34 +02:00
{
2014-11-24 13:59:30 +00:00
return kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_FSC ;
2014-09-26 12:29:34 +02:00
}
static inline u8 kvm_vcpu_trap_get_fault_type ( const struct kvm_vcpu * vcpu )
2012-12-10 13:27:52 +00:00
{
2014-11-24 13:59:30 +00:00
return kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_FSC_TYPE ;
2012-12-10 13:27:52 +00:00
}
2014-06-02 15:37:13 +02:00
static inline unsigned long kvm_vcpu_get_mpidr_aff ( struct kvm_vcpu * vcpu )
2013-10-18 18:19:03 +01:00
{
2014-06-02 15:37:13 +02:00
return vcpu_sys_reg ( vcpu , MPIDR_EL1 ) & MPIDR_HWID_BITMASK ;
2013-10-18 18:19:03 +01:00
}
2013-11-05 14:12:15 +00:00
static inline void kvm_vcpu_set_be ( struct kvm_vcpu * vcpu )
{
if ( vcpu_mode_is_32bit ( vcpu ) )
* vcpu_cpsr ( vcpu ) | = COMPAT_PSR_E_BIT ;
else
vcpu_sys_reg ( vcpu , SCTLR_EL1 ) | = ( 1 < < 25 ) ;
}
2013-02-12 12:40:22 +00:00
static inline bool kvm_vcpu_is_be ( struct kvm_vcpu * vcpu )
{
if ( vcpu_mode_is_32bit ( vcpu ) )
return ! ! ( * vcpu_cpsr ( vcpu ) & COMPAT_PSR_E_BIT ) ;
return ! ! ( vcpu_sys_reg ( vcpu , SCTLR_EL1 ) & ( 1 < < 25 ) ) ;
}
static inline unsigned long vcpu_data_guest_to_host ( struct kvm_vcpu * vcpu ,
unsigned long data ,
unsigned int len )
{
if ( kvm_vcpu_is_be ( vcpu ) ) {
switch ( len ) {
case 1 :
return data & 0xff ;
case 2 :
return be16_to_cpu ( data & 0xffff ) ;
case 4 :
return be32_to_cpu ( data & 0xffffffff ) ;
default :
return be64_to_cpu ( data ) ;
}
2014-06-12 09:30:08 -07:00
} else {
switch ( len ) {
case 1 :
return data & 0xff ;
case 2 :
return le16_to_cpu ( data & 0xffff ) ;
case 4 :
return le32_to_cpu ( data & 0xffffffff ) ;
default :
return le64_to_cpu ( data ) ;
}
2013-02-12 12:40:22 +00:00
}
return data ; /* Leave LE untouched */
}
static inline unsigned long vcpu_data_host_to_guest ( struct kvm_vcpu * vcpu ,
unsigned long data ,
unsigned int len )
{
if ( kvm_vcpu_is_be ( vcpu ) ) {
switch ( len ) {
case 1 :
return data & 0xff ;
case 2 :
return cpu_to_be16 ( data & 0xffff ) ;
case 4 :
return cpu_to_be32 ( data & 0xffffffff ) ;
default :
return cpu_to_be64 ( data ) ;
}
2014-06-12 09:30:08 -07:00
} else {
switch ( len ) {
case 1 :
return data & 0xff ;
case 2 :
return cpu_to_le16 ( data & 0xffff ) ;
case 4 :
return cpu_to_le32 ( data & 0xffffffff ) ;
default :
return cpu_to_le64 ( data ) ;
}
2013-02-12 12:40:22 +00:00
}
return data ; /* Leave LE untouched */
}
2012-12-10 13:27:52 +00:00
# endif /* __ARM64_KVM_EMULATE_H__ */