2013-01-20 18:28:06 -05:00
/*
* Copyright ( C ) 2012 - Virtual Open Systems and Columbia University
* Author : Christoffer Dall < c . dall @ virtualopensystems . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License , version 2 , as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 , USA .
*/
2013-01-20 18:28:09 -05:00
# include <linux/mm.h>
# include <linux/kvm_host.h>
# include <asm/kvm_arm.h>
2013-01-20 18:28:06 -05:00
# include <asm/kvm_emulate.h>
2012-12-08 18:13:18 +00:00
# include <asm/opcodes.h>
2013-01-20 18:28:09 -05:00
# include <trace/events/kvm.h>
# include "trace.h"
2013-01-20 18:28:06 -05:00
# define VCPU_NR_MODES 6
# define VCPU_REG_OFFSET_USR 0
# define VCPU_REG_OFFSET_FIQ 1
# define VCPU_REG_OFFSET_IRQ 2
# define VCPU_REG_OFFSET_SVC 3
# define VCPU_REG_OFFSET_ABT 4
# define VCPU_REG_OFFSET_UND 5
# define REG_OFFSET(_reg) \
( offsetof ( struct kvm_regs , _reg ) / sizeof ( u32 ) )
# define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
static const unsigned long vcpu_reg_offsets [ VCPU_NR_MODES ] [ 15 ] = {
/* USR/SYS Registers */
[ VCPU_REG_OFFSET_USR ] = {
USR_REG_OFFSET ( 0 ) , USR_REG_OFFSET ( 1 ) , USR_REG_OFFSET ( 2 ) ,
USR_REG_OFFSET ( 3 ) , USR_REG_OFFSET ( 4 ) , USR_REG_OFFSET ( 5 ) ,
USR_REG_OFFSET ( 6 ) , USR_REG_OFFSET ( 7 ) , USR_REG_OFFSET ( 8 ) ,
USR_REG_OFFSET ( 9 ) , USR_REG_OFFSET ( 10 ) , USR_REG_OFFSET ( 11 ) ,
USR_REG_OFFSET ( 12 ) , USR_REG_OFFSET ( 13 ) , USR_REG_OFFSET ( 14 ) ,
} ,
/* FIQ Registers */
[ VCPU_REG_OFFSET_FIQ ] = {
USR_REG_OFFSET ( 0 ) , USR_REG_OFFSET ( 1 ) , USR_REG_OFFSET ( 2 ) ,
USR_REG_OFFSET ( 3 ) , USR_REG_OFFSET ( 4 ) , USR_REG_OFFSET ( 5 ) ,
USR_REG_OFFSET ( 6 ) , USR_REG_OFFSET ( 7 ) ,
REG_OFFSET ( fiq_regs [ 0 ] ) , /* r8 */
REG_OFFSET ( fiq_regs [ 1 ] ) , /* r9 */
REG_OFFSET ( fiq_regs [ 2 ] ) , /* r10 */
REG_OFFSET ( fiq_regs [ 3 ] ) , /* r11 */
REG_OFFSET ( fiq_regs [ 4 ] ) , /* r12 */
REG_OFFSET ( fiq_regs [ 5 ] ) , /* r13 */
REG_OFFSET ( fiq_regs [ 6 ] ) , /* r14 */
} ,
/* IRQ Registers */
[ VCPU_REG_OFFSET_IRQ ] = {
USR_REG_OFFSET ( 0 ) , USR_REG_OFFSET ( 1 ) , USR_REG_OFFSET ( 2 ) ,
USR_REG_OFFSET ( 3 ) , USR_REG_OFFSET ( 4 ) , USR_REG_OFFSET ( 5 ) ,
USR_REG_OFFSET ( 6 ) , USR_REG_OFFSET ( 7 ) , USR_REG_OFFSET ( 8 ) ,
USR_REG_OFFSET ( 9 ) , USR_REG_OFFSET ( 10 ) , USR_REG_OFFSET ( 11 ) ,
USR_REG_OFFSET ( 12 ) ,
REG_OFFSET ( irq_regs [ 0 ] ) , /* r13 */
REG_OFFSET ( irq_regs [ 1 ] ) , /* r14 */
} ,
/* SVC Registers */
[ VCPU_REG_OFFSET_SVC ] = {
USR_REG_OFFSET ( 0 ) , USR_REG_OFFSET ( 1 ) , USR_REG_OFFSET ( 2 ) ,
USR_REG_OFFSET ( 3 ) , USR_REG_OFFSET ( 4 ) , USR_REG_OFFSET ( 5 ) ,
USR_REG_OFFSET ( 6 ) , USR_REG_OFFSET ( 7 ) , USR_REG_OFFSET ( 8 ) ,
USR_REG_OFFSET ( 9 ) , USR_REG_OFFSET ( 10 ) , USR_REG_OFFSET ( 11 ) ,
USR_REG_OFFSET ( 12 ) ,
REG_OFFSET ( svc_regs [ 0 ] ) , /* r13 */
REG_OFFSET ( svc_regs [ 1 ] ) , /* r14 */
} ,
/* ABT Registers */
[ VCPU_REG_OFFSET_ABT ] = {
USR_REG_OFFSET ( 0 ) , USR_REG_OFFSET ( 1 ) , USR_REG_OFFSET ( 2 ) ,
USR_REG_OFFSET ( 3 ) , USR_REG_OFFSET ( 4 ) , USR_REG_OFFSET ( 5 ) ,
USR_REG_OFFSET ( 6 ) , USR_REG_OFFSET ( 7 ) , USR_REG_OFFSET ( 8 ) ,
USR_REG_OFFSET ( 9 ) , USR_REG_OFFSET ( 10 ) , USR_REG_OFFSET ( 11 ) ,
USR_REG_OFFSET ( 12 ) ,
REG_OFFSET ( abt_regs [ 0 ] ) , /* r13 */
REG_OFFSET ( abt_regs [ 1 ] ) , /* r14 */
} ,
/* UND Registers */
[ VCPU_REG_OFFSET_UND ] = {
USR_REG_OFFSET ( 0 ) , USR_REG_OFFSET ( 1 ) , USR_REG_OFFSET ( 2 ) ,
USR_REG_OFFSET ( 3 ) , USR_REG_OFFSET ( 4 ) , USR_REG_OFFSET ( 5 ) ,
USR_REG_OFFSET ( 6 ) , USR_REG_OFFSET ( 7 ) , USR_REG_OFFSET ( 8 ) ,
USR_REG_OFFSET ( 9 ) , USR_REG_OFFSET ( 10 ) , USR_REG_OFFSET ( 11 ) ,
USR_REG_OFFSET ( 12 ) ,
REG_OFFSET ( und_regs [ 0 ] ) , /* r13 */
REG_OFFSET ( und_regs [ 1 ] ) , /* r14 */
} ,
} ;
/*
* Return a pointer to the register number valid in the current mode of
* the virtual CPU .
*/
2012-10-03 11:17:02 +01:00
unsigned long * vcpu_reg ( struct kvm_vcpu * vcpu , u8 reg_num )
2013-01-20 18:28:06 -05:00
{
2016-01-03 11:26:01 +00:00
unsigned long * reg_array = ( unsigned long * ) & vcpu - > arch . ctxt . gp_regs ;
2012-10-03 11:17:02 +01:00
unsigned long mode = * vcpu_cpsr ( vcpu ) & MODE_MASK ;
2013-01-20 18:28:06 -05:00
switch ( mode ) {
case USR_MODE . . . SVC_MODE :
mode & = ~ MODE32_BIT ; /* 0 ... 3 */
break ;
case ABT_MODE :
mode = VCPU_REG_OFFSET_ABT ;
break ;
case UND_MODE :
mode = VCPU_REG_OFFSET_UND ;
break ;
case SYSTEM_MODE :
mode = VCPU_REG_OFFSET_USR ;
break ;
default :
BUG ( ) ;
}
return reg_array + vcpu_reg_offsets [ mode ] [ reg_num ] ;
}
/*
* Return the SPSR for the current mode of the virtual CPU .
*/
2017-12-27 20:01:52 +01:00
unsigned long * __vcpu_spsr ( struct kvm_vcpu * vcpu )
2013-01-20 18:28:06 -05:00
{
2012-10-03 11:17:02 +01:00
unsigned long mode = * vcpu_cpsr ( vcpu ) & MODE_MASK ;
2013-01-20 18:28:06 -05:00
switch ( mode ) {
case SVC_MODE :
2016-01-03 11:26:01 +00:00
return & vcpu - > arch . ctxt . gp_regs . KVM_ARM_SVC_spsr ;
2013-01-20 18:28:06 -05:00
case ABT_MODE :
2016-01-03 11:26:01 +00:00
return & vcpu - > arch . ctxt . gp_regs . KVM_ARM_ABT_spsr ;
2013-01-20 18:28:06 -05:00
case UND_MODE :
2016-01-03 11:26:01 +00:00
return & vcpu - > arch . ctxt . gp_regs . KVM_ARM_UND_spsr ;
2013-01-20 18:28:06 -05:00
case IRQ_MODE :
2016-01-03 11:26:01 +00:00
return & vcpu - > arch . ctxt . gp_regs . KVM_ARM_IRQ_spsr ;
2013-01-20 18:28:06 -05:00
case FIQ_MODE :
2016-01-03 11:26:01 +00:00
return & vcpu - > arch . ctxt . gp_regs . KVM_ARM_FIQ_spsr ;
2013-01-20 18:28:06 -05:00
default :
BUG ( ) ;
}
}
2013-01-20 18:28:09 -05:00
/******************************************************************************
* Inject exceptions into the guest
*/
2016-09-06 14:02:09 +01:00
/**
* kvm_inject_vabt - inject an async abort / SError into the guest
* @ vcpu : The VCPU to receive the exception
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code .
*/
void kvm_inject_vabt ( struct kvm_vcpu * vcpu )
{
2017-08-03 12:09:05 +02:00
* vcpu_hcr ( vcpu ) | = HCR_VA ;
2016-09-06 14:02:09 +01:00
}