2012-12-10 20:40:41 +04:00
/*
* Copyright ( C ) 2012 , 2013 - ARM Ltd
* Author : Marc Zyngier < marc . zyngier @ arm . com >
*
* Derived from arch / arm / kvm / handle_exit . c :
* Copyright ( C ) 2012 - Virtual Open Systems and Columbia University
* Author : Christoffer Dall < c . dall @ virtualopensystems . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <linux/kvm.h>
# include <linux/kvm_host.h>
2014-11-24 16:59:30 +03:00
# include <asm/esr.h>
2015-10-25 22:57:11 +03:00
# include <asm/kvm_asm.h>
2012-12-10 20:40:41 +04:00
# include <asm/kvm_coproc.h>
2014-11-24 16:59:30 +03:00
# include <asm/kvm_emulate.h>
2012-12-10 20:40:41 +04:00
# include <asm/kvm_mmu.h>
2012-12-12 22:52:05 +04:00
# include <asm/kvm_psci.h>
2012-12-10 20:40:41 +04:00
2015-01-12 19:53:36 +03:00
# define CREATE_TRACE_POINTS
# include "trace.h"
2012-12-10 20:40:41 +04:00
typedef int ( * exit_handle_fn ) ( struct kvm_vcpu * , struct kvm_run * ) ;
static int handle_hvc ( struct kvm_vcpu * vcpu , struct kvm_run * run )
{
2014-04-29 09:54:18 +04:00
int ret ;
2015-12-04 15:03:14 +03:00
trace_kvm_hvc_arm64 ( * vcpu_pc ( vcpu ) , vcpu_get_reg ( vcpu , 0 ) ,
2015-01-12 19:53:36 +03:00
kvm_vcpu_hvc_get_imm ( vcpu ) ) ;
2015-11-26 13:09:43 +03:00
vcpu - > stat . hvc_exit_stat + + ;
2015-01-12 19:53:36 +03:00
2014-04-29 09:54:18 +04:00
ret = kvm_psci_call ( vcpu ) ;
if ( ret < 0 ) {
kvm_inject_undefined ( vcpu ) ;
2012-12-12 22:52:05 +04:00
return 1 ;
2014-04-29 09:54:18 +04:00
}
2012-12-12 22:52:05 +04:00
2014-04-29 09:54:18 +04:00
return ret ;
2012-12-10 20:40:41 +04:00
}
static int handle_smc ( struct kvm_vcpu * vcpu , struct kvm_run * run )
{
kvm_inject_undefined ( vcpu ) ;
return 1 ;
}
/**
2013-08-02 14:41:13 +04:00
* kvm_handle_wfx - handle a wait - for - interrupts or wait - for - event
* instruction executed by a guest
*
2012-12-10 20:40:41 +04:00
* @ vcpu : the vcpu pointer
*
2013-08-02 14:41:13 +04:00
* WFE : Yield the CPU and come back to this vcpu when the scheduler
* decides to .
* WFI : Simply call kvm_vcpu_block ( ) , which will halt execution of
2012-12-10 20:40:41 +04:00
* world - switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM .
*/
2013-08-02 14:41:13 +04:00
static int kvm_handle_wfx ( struct kvm_vcpu * vcpu , struct kvm_run * run )
2012-12-10 20:40:41 +04:00
{
2015-01-23 15:39:51 +03:00
if ( kvm_vcpu_get_hsr ( vcpu ) & ESR_ELx_WFx_ISS_WFE ) {
2015-01-12 19:53:36 +03:00
trace_kvm_wfx_arm64 ( * vcpu_pc ( vcpu ) , true ) ;
2015-11-26 13:09:43 +03:00
vcpu - > stat . wfe_exit_stat + + ;
2013-08-02 14:41:13 +04:00
kvm_vcpu_on_spin ( vcpu ) ;
2015-01-12 19:53:36 +03:00
} else {
trace_kvm_wfx_arm64 ( * vcpu_pc ( vcpu ) , false ) ;
2015-11-26 13:09:43 +03:00
vcpu - > stat . wfi_exit_stat + + ;
2013-08-02 14:41:13 +04:00
kvm_vcpu_block ( vcpu ) ;
2015-01-12 19:53:36 +03:00
}
2013-08-02 14:41:13 +04:00
2014-08-26 16:33:02 +04:00
kvm_skip_instr ( vcpu , kvm_vcpu_trap_il_is32bit ( vcpu ) ) ;
2012-12-10 20:40:41 +04:00
return 1 ;
}
2015-07-07 19:29:57 +03:00
/**
* kvm_handle_guest_debug - handle a debug exception instruction
*
* @ vcpu : the vcpu pointer
* @ run : access to the kvm_run structure for results
*
* We route all debug exceptions through the same handler . If both the
* guest and host are using the same debug facilities it will be up to
* userspace to re - inject the correct exception for guest delivery .
*
* @ return : 0 ( while setting run - > exit_reason ) , - 1 for error
*/
static int kvm_handle_guest_debug ( struct kvm_vcpu * vcpu , struct kvm_run * run )
{
u32 hsr = kvm_vcpu_get_hsr ( vcpu ) ;
int ret = 0 ;
run - > exit_reason = KVM_EXIT_DEBUG ;
run - > debug . arch . hsr = hsr ;
2016-05-31 14:33:02 +03:00
switch ( ESR_ELx_EC ( hsr ) ) {
2015-07-07 19:30:02 +03:00
case ESR_ELx_EC_WATCHPT_LOW :
run - > debug . arch . far = vcpu - > arch . fault . far_el2 ;
/* fall through */
2015-07-07 19:29:58 +03:00
case ESR_ELx_EC_SOFTSTP_LOW :
2015-07-07 19:30:02 +03:00
case ESR_ELx_EC_BREAKPT_LOW :
2015-07-07 19:29:57 +03:00
case ESR_ELx_EC_BKPT32 :
case ESR_ELx_EC_BRK64 :
break ;
default :
kvm_err ( " %s: un-handled case hsr: %#08x \n " ,
__func__ , ( unsigned int ) hsr ) ;
ret = - 1 ;
break ;
}
return ret ;
}
2012-12-10 20:40:41 +04:00
static exit_handle_fn arm_exit_handlers [ ] = {
2014-11-24 16:59:30 +03:00
[ ESR_ELx_EC_WFx ] = kvm_handle_wfx ,
[ ESR_ELx_EC_CP15_32 ] = kvm_handle_cp15_32 ,
[ ESR_ELx_EC_CP15_64 ] = kvm_handle_cp15_64 ,
[ ESR_ELx_EC_CP14_MR ] = kvm_handle_cp14_32 ,
[ ESR_ELx_EC_CP14_LS ] = kvm_handle_cp14_load_store ,
[ ESR_ELx_EC_CP14_64 ] = kvm_handle_cp14_64 ,
[ ESR_ELx_EC_HVC32 ] = handle_hvc ,
[ ESR_ELx_EC_SMC32 ] = handle_smc ,
[ ESR_ELx_EC_HVC64 ] = handle_hvc ,
[ ESR_ELx_EC_SMC64 ] = handle_smc ,
[ ESR_ELx_EC_SYS64 ] = kvm_handle_sys_reg ,
[ ESR_ELx_EC_IABT_LOW ] = kvm_handle_guest_abort ,
[ ESR_ELx_EC_DABT_LOW ] = kvm_handle_guest_abort ,
2015-07-07 19:29:58 +03:00
[ ESR_ELx_EC_SOFTSTP_LOW ] = kvm_handle_guest_debug ,
2015-07-07 19:30:02 +03:00
[ ESR_ELx_EC_WATCHPT_LOW ] = kvm_handle_guest_debug ,
[ ESR_ELx_EC_BREAKPT_LOW ] = kvm_handle_guest_debug ,
2015-07-07 19:29:57 +03:00
[ ESR_ELx_EC_BKPT32 ] = kvm_handle_guest_debug ,
[ ESR_ELx_EC_BRK64 ] = kvm_handle_guest_debug ,
2012-12-10 20:40:41 +04:00
} ;
static exit_handle_fn kvm_get_exit_handler ( struct kvm_vcpu * vcpu )
{
2015-01-07 14:26:18 +03:00
u32 hsr = kvm_vcpu_get_hsr ( vcpu ) ;
2016-05-31 14:33:02 +03:00
u8 hsr_ec = ESR_ELx_EC ( hsr ) ;
2012-12-10 20:40:41 +04:00
if ( hsr_ec > = ARRAY_SIZE ( arm_exit_handlers ) | |
! arm_exit_handlers [ hsr_ec ] ) {
2015-01-07 14:26:18 +03:00
kvm_err ( " Unknown exception class: hsr: %#08x -- %s \n " ,
hsr , esr_get_class_string ( hsr ) ) ;
2012-12-10 20:40:41 +04:00
BUG ( ) ;
}
return arm_exit_handlers [ hsr_ec ] ;
}
/*
* Return > 0 to return to guest , < 0 on error , 0 ( and set exit_reason ) on
* proper exit to userspace .
*/
int handle_exit ( struct kvm_vcpu * vcpu , struct kvm_run * run ,
int exception_index )
{
exit_handle_fn exit_handler ;
2016-09-06 16:02:06 +03:00
if ( ARM_SERROR_PENDING ( exception_index ) ) {
u8 hsr_ec = ESR_ELx_EC ( kvm_vcpu_get_hsr ( vcpu ) ) ;
/*
* HVC / SMC already have an adjusted PC , which we need
* to correct in order to return to after having
* injected the SError .
*/
if ( hsr_ec = = ESR_ELx_EC_HVC32 | | hsr_ec = = ESR_ELx_EC_HVC64 | |
hsr_ec = = ESR_ELx_EC_SMC32 | | hsr_ec = = ESR_ELx_EC_SMC64 ) {
u32 adj = kvm_vcpu_trap_il_is32bit ( vcpu ) ? 4 : 2 ;
* vcpu_pc ( vcpu ) - = adj ;
}
kvm_inject_vabt ( vcpu ) ;
return 1 ;
}
exception_index = ARM_EXCEPTION_CODE ( exception_index ) ;
2012-12-10 20:40:41 +04:00
switch ( exception_index ) {
case ARM_EXCEPTION_IRQ :
return 1 ;
2016-09-06 16:02:03 +03:00
case ARM_EXCEPTION_EL1_SERROR :
kvm_inject_vabt ( vcpu ) ;
return 1 ;
2012-12-10 20:40:41 +04:00
case ARM_EXCEPTION_TRAP :
/*
* See ARM ARM B1 .14 .1 : " Hyp traps on instructions
* that fail their condition code check "
*/
if ( ! kvm_condition_valid ( vcpu ) ) {
kvm_skip_instr ( vcpu , kvm_vcpu_trap_il_is32bit ( vcpu ) ) ;
return 1 ;
}
exit_handler = kvm_get_exit_handler ( vcpu ) ;
return exit_handler ( vcpu , run ) ;
2016-04-27 19:47:04 +03:00
case ARM_EXCEPTION_HYP_GONE :
/*
* EL2 has been reset to the hyp - stub . This happens when a guest
* is pre - empted by kvm_reboot ( ) ' s shutdown call .
*/
run - > exit_reason = KVM_EXIT_FAIL_ENTRY ;
return 0 ;
2012-12-10 20:40:41 +04:00
default :
kvm_pr_unimpl ( " Unsupported exception type: %d " ,
exception_index ) ;
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
return 0 ;
}
}