2012-10-05 11:11:11 +01:00
/*
* Copyright ( C ) 2012 - Virtual Open Systems and Columbia University
* Author : Christoffer Dall < c . dall @ virtualopensystems . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License , version 2 , as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 , USA .
*/
# include <linux/kvm.h>
# include <linux/kvm_host.h>
# include <asm/kvm_emulate.h>
# include <asm/kvm_coproc.h>
# include <asm/kvm_mmu.h>
# include <asm/kvm_psci.h>
2012-10-05 13:42:45 +01:00
# include <trace/events/kvm.h>
# include "trace.h"
2012-10-05 11:11:11 +01:00
typedef int ( * exit_handle_fn ) ( struct kvm_vcpu * , struct kvm_run * ) ;
static int handle_hvc ( struct kvm_vcpu * vcpu , struct kvm_run * run )
{
2014-04-29 11:24:18 +05:30
int ret ;
2013-02-21 11:26:10 -08:00
trace_kvm_hvc ( * vcpu_pc ( vcpu ) , * vcpu_reg ( vcpu , 0 ) ,
kvm_vcpu_hvc_get_imm ( vcpu ) ) ;
2015-11-26 10:09:43 +00:00
vcpu - > stat . hvc_exit_stat + + ;
2013-02-21 11:26:10 -08:00
2014-04-29 11:24:18 +05:30
ret = kvm_psci_call ( vcpu ) ;
if ( ret < 0 ) {
kvm_inject_undefined ( vcpu ) ;
2012-10-05 11:11:11 +01:00
return 1 ;
2014-04-29 11:24:18 +05:30
}
2012-10-05 11:11:11 +01:00
2014-04-29 11:24:18 +05:30
return ret ;
2012-10-05 11:11:11 +01:00
}
static int handle_smc ( struct kvm_vcpu * vcpu , struct kvm_run * run )
{
kvm_inject_undefined ( vcpu ) ;
return 1 ;
}
2012-10-05 13:42:45 +01:00
/**
2013-10-15 18:10:42 -07:00
* kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
2012-10-05 13:42:45 +01:00
* @ vcpu : the vcpu pointer
* @ run : the kvm_run structure pointer
*
2013-10-15 18:10:42 -07:00
* WFE : Yield the CPU and come back to this vcpu when the scheduler
* decides to .
* WFI : Simply call kvm_vcpu_block ( ) , which will halt execution of
* world - switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM .
2012-10-05 13:42:45 +01:00
*/
2013-10-15 18:10:42 -07:00
static int kvm_handle_wfx ( struct kvm_vcpu * vcpu , struct kvm_run * run )
2012-10-05 13:42:45 +01:00
{
2015-01-12 16:56:16 +00:00
if ( kvm_vcpu_get_hsr ( vcpu ) & HSR_WFI_IS_WFE ) {
trace_kvm_wfx ( * vcpu_pc ( vcpu ) , true ) ;
2015-11-26 10:09:43 +00:00
vcpu - > stat . wfe_exit_stat + + ;
2013-10-08 18:38:13 +01:00
kvm_vcpu_on_spin ( vcpu ) ;
2015-01-12 16:56:16 +00:00
} else {
trace_kvm_wfx ( * vcpu_pc ( vcpu ) , false ) ;
2015-11-26 10:09:43 +00:00
vcpu - > stat . wfi_exit_stat + + ;
2013-10-08 18:38:13 +01:00
kvm_vcpu_block ( vcpu ) ;
2015-01-12 16:56:16 +00:00
}
2013-10-08 18:38:13 +01:00
2014-08-26 14:33:02 +02:00
kvm_skip_instr ( vcpu , kvm_vcpu_trap_il_is32bit ( vcpu ) ) ;
2012-10-05 13:42:45 +01:00
return 1 ;
}
2012-10-05 11:11:11 +01:00
static exit_handle_fn arm_exit_handlers [ ] = {
2013-10-15 18:10:42 -07:00
[ HSR_EC_WFI ] = kvm_handle_wfx ,
2012-10-05 11:11:11 +01:00
[ HSR_EC_CP15_32 ] = kvm_handle_cp15_32 ,
[ HSR_EC_CP15_64 ] = kvm_handle_cp15_64 ,
[ HSR_EC_CP14_MR ] = kvm_handle_cp14_access ,
[ HSR_EC_CP14_LS ] = kvm_handle_cp14_load_store ,
[ HSR_EC_CP14_64 ] = kvm_handle_cp14_access ,
[ HSR_EC_CP_0_13 ] = kvm_handle_cp_0_13_access ,
[ HSR_EC_CP10_ID ] = kvm_handle_cp10_id ,
[ HSR_EC_HVC ] = handle_hvc ,
[ HSR_EC_SMC ] = handle_smc ,
[ HSR_EC_IABT ] = kvm_handle_guest_abort ,
[ HSR_EC_DABT ] = kvm_handle_guest_abort ,
} ;
static exit_handle_fn kvm_get_exit_handler ( struct kvm_vcpu * vcpu )
{
u8 hsr_ec = kvm_vcpu_trap_get_class ( vcpu ) ;
if ( hsr_ec > = ARRAY_SIZE ( arm_exit_handlers ) | |
! arm_exit_handlers [ hsr_ec ] ) {
2013-05-03 09:13:19 -07:00
kvm_err ( " Unknown exception class: hsr: %#08x \n " ,
2012-10-05 11:11:11 +01:00
( unsigned int ) kvm_vcpu_get_hsr ( vcpu ) ) ;
BUG ( ) ;
}
return arm_exit_handlers [ hsr_ec ] ;
}
/*
* Return > 0 to return to guest , < 0 on error , 0 ( and set exit_reason ) on
* proper exit to userspace .
*/
int handle_exit ( struct kvm_vcpu * vcpu , struct kvm_run * run ,
int exception_index )
{
exit_handle_fn exit_handler ;
2016-09-06 14:02:13 +01:00
if ( ARM_ABORT_PENDING ( exception_index ) ) {
u8 hsr_ec = kvm_vcpu_trap_get_class ( vcpu ) ;
/*
* HVC / SMC already have an adjusted PC , which we need
* to correct in order to return to after having
* injected the abort .
*/
if ( hsr_ec = = HSR_EC_HVC | | hsr_ec = = HSR_EC_SMC ) {
u32 adj = kvm_vcpu_trap_il_is32bit ( vcpu ) ? 4 : 2 ;
* vcpu_pc ( vcpu ) - = adj ;
}
kvm_inject_vabt ( vcpu ) ;
return 1 ;
}
exception_index = ARM_EXCEPTION_CODE ( exception_index ) ;
2012-10-05 11:11:11 +01:00
switch ( exception_index ) {
case ARM_EXCEPTION_IRQ :
return 1 ;
case ARM_EXCEPTION_HVC :
/*
* See ARM ARM B1 .14 .1 : " Hyp traps on instructions
* that fail their condition code check "
*/
if ( ! kvm_condition_valid ( vcpu ) ) {
kvm_skip_instr ( vcpu , kvm_vcpu_trap_il_is32bit ( vcpu ) ) ;
return 1 ;
}
exit_handler = kvm_get_exit_handler ( vcpu ) ;
return exit_handler ( vcpu , run ) ;
2016-09-06 14:02:10 +01:00
case ARM_EXCEPTION_DATA_ABORT :
kvm_inject_vabt ( vcpu ) ;
return 1 ;
2012-10-05 11:11:11 +01:00
default :
kvm_pr_unimpl ( " Unsupported exception type: %d " ,
exception_index ) ;
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
return 0 ;
}
}