2012-11-21 18:34:09 -08:00
/*
2014-06-26 12:11:34 -07:00
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* KVM / MIPS : Deliver / Emulate exceptions to the guest kernel
*
* Copyright ( C ) 2012 MIPS Technologies , Inc . All rights reserved .
* Authors : Sanjay Lal < sanjayl @ kymasys . com >
*/
2012-11-21 18:34:09 -08:00
# include <linux/errno.h>
# include <linux/err.h>
# include <linux/kvm_host.h>
2016-11-16 23:48:56 +00:00
# include <linux/vmalloc.h>
# include <asm/mmu_context.h>
2012-11-21 18:34:09 -08:00
2014-06-26 12:11:38 -07:00
# include "interrupt.h"
2012-11-21 18:34:09 -08:00
static gpa_t kvm_trap_emul_gva_to_gpa_cb ( gva_t gva )
{
gpa_t gpa ;
2016-06-09 14:19:08 +01:00
gva_t kseg = KSEGX ( gva ) ;
2012-11-21 18:34:09 -08:00
if ( ( kseg = = CKSEG0 ) | | ( kseg = = CKSEG1 ) )
gpa = CPHYSADDR ( gva ) ;
else {
2014-06-26 12:11:35 -07:00
kvm_err ( " %s: cannot find GPA for GVA: %#lx \n " , __func__ , gva ) ;
2012-11-21 18:34:09 -08:00
kvm_mips_dump_host_tlbs ( ) ;
gpa = KVM_INVALID_ADDR ;
}
kvm_debug ( " %s: gva %#lx, gpa: %#llx \n " , __func__ , gva , gpa ) ;
return gpa ;
}
static int kvm_trap_emul_handle_cop_unusable ( struct kvm_vcpu * vcpu )
{
2015-02-06 10:56:27 +00:00
struct mips_coproc * cop0 = vcpu - > arch . cop0 ;
2012-11-21 18:34:09 -08:00
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2012-11-21 18:34:09 -08:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
2015-02-06 10:56:27 +00:00
if ( ( ( cause & CAUSEF_CE ) > > CAUSEB_CE ) = = 1 ) {
/* FPU Unusable */
if ( ! kvm_mips_guest_has_fpu ( & vcpu - > arch ) | |
( kvm_read_c0_guest_status ( cop0 ) & ST0_CU1 ) = = 0 ) {
/*
* Unusable / no FPU in guest :
* deliver guest COP1 Unusable Exception
*/
er = kvm_mips_emulate_fpu_exc ( cause , opc , run , vcpu ) ;
} else {
/* Restore FPU state */
kvm_own_fpu ( vcpu ) ;
er = EMULATE_DONE ;
}
} else {
2012-11-21 18:34:09 -08:00
er = kvm_mips_emulate_inst ( cause , opc , run , vcpu ) ;
2015-02-06 10:56:27 +00:00
}
2012-11-21 18:34:09 -08:00
switch ( er ) {
case EMULATE_DONE :
ret = RESUME_GUEST ;
break ;
case EMULATE_FAIL :
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
break ;
case EMULATE_WAIT :
run - > exit_reason = KVM_EXIT_INTR ;
ret = RESUME_HOST ;
break ;
default :
BUG ( ) ;
}
return ret ;
}
static int kvm_trap_emul_handle_tlb_mod ( struct kvm_vcpu * vcpu )
{
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2012-11-21 18:34:09 -08:00
unsigned long badvaddr = vcpu - > arch . host_cp0_badvaddr ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2012-11-21 18:34:09 -08:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
if ( KVM_GUEST_KSEGX ( badvaddr ) < KVM_GUEST_KSEG0
| | KVM_GUEST_KSEGX ( badvaddr ) = = KVM_GUEST_KSEG23 ) {
2016-06-09 14:19:09 +01:00
kvm_debug ( " USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx \n " ,
2014-06-26 12:11:34 -07:00
cause , opc , badvaddr ) ;
2012-11-21 18:34:09 -08:00
er = kvm_mips_handle_tlbmod ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_DONE )
ret = RESUME_GUEST ;
else {
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
} else if ( KVM_GUEST_KSEGX ( badvaddr ) = = KVM_GUEST_KSEG0 ) {
2014-06-26 12:11:34 -07:00
/*
* XXXKYMA : The guest kernel does not expect to get this fault
* when we are not using HIGHMEM . Need to address this in a
* HIGHMEM kernel
2012-11-21 18:34:09 -08:00
*/
2016-06-09 14:19:09 +01:00
kvm_err ( " TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx \n " ,
2014-06-26 12:11:35 -07:00
cause , opc , badvaddr ) ;
2012-11-21 18:34:09 -08:00
kvm_mips_dump_host_tlbs ( ) ;
kvm_arch_vcpu_dump_regs ( vcpu ) ;
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
} else {
2016-06-09 14:19:09 +01:00
kvm_err ( " Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx \n " ,
2014-06-26 12:11:35 -07:00
cause , opc , badvaddr ) ;
2012-11-21 18:34:09 -08:00
kvm_mips_dump_host_tlbs ( ) ;
kvm_arch_vcpu_dump_regs ( vcpu ) ;
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
return ret ;
}
2016-06-09 14:19:20 +01:00
static int kvm_trap_emul_handle_tlb_miss ( struct kvm_vcpu * vcpu , bool store )
2012-11-21 18:34:09 -08:00
{
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2012-11-21 18:34:09 -08:00
unsigned long badvaddr = vcpu - > arch . host_cp0_badvaddr ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2012-11-21 18:34:09 -08:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
if ( ( ( badvaddr & PAGE_MASK ) = = KVM_GUEST_COMMPAGE_ADDR )
& & KVM_GUEST_KERNEL_MODE ( vcpu ) ) {
if ( kvm_mips_handle_commpage_tlb_fault ( badvaddr , vcpu ) < 0 ) {
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
} else if ( KVM_GUEST_KSEGX ( badvaddr ) < KVM_GUEST_KSEG0
| | KVM_GUEST_KSEGX ( badvaddr ) = = KVM_GUEST_KSEG23 ) {
2016-06-09 14:19:20 +01:00
kvm_debug ( " USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx \n " ,
store ? " ST " : " LD " , cause , opc , badvaddr ) ;
2012-11-21 18:34:09 -08:00
2014-06-26 12:11:34 -07:00
/*
* User Address ( UA ) fault , this could happen if
* ( 1 ) TLB entry not present / valid in both Guest and shadow host
* TLBs , in this case we pass on the fault to the guest
* kernel and let it handle it .
* ( 2 ) TLB entry is present in the Guest TLB but not in the
* shadow , in this case we inject the TLB from the Guest TLB
* into the shadow host TLB
2012-11-21 18:34:09 -08:00
*/
er = kvm_mips_handle_tlbmiss ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_DONE )
ret = RESUME_GUEST ;
else {
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
} else if ( KVM_GUEST_KSEGX ( badvaddr ) = = KVM_GUEST_KSEG0 ) {
2016-06-09 14:19:20 +01:00
/*
* All KSEG0 faults are handled by KVM , as the guest kernel does
* not expect to ever get them
*/
2012-11-21 18:34:09 -08:00
if ( kvm_mips_handle_kseg0_tlb_fault
( vcpu - > arch . host_cp0_badvaddr , vcpu ) < 0 ) {
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
2016-08-19 15:09:47 +01:00
} else if ( KVM_GUEST_KERNEL_MODE ( vcpu )
& & ( KSEGX ( badvaddr ) = = CKSEG0 | | KSEGX ( badvaddr ) = = CKSEG1 ) ) {
/*
* With EVA we may get a TLB exception instead of an address
* error when the guest performs MMIO to KSeg1 addresses .
*/
kvm_debug ( " Emulate %s MMIO space \n " ,
store ? " Store to " : " Load from " ) ;
er = kvm_mips_emulate_inst ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_FAIL ) {
kvm_err ( " Emulate %s MMIO space failed \n " ,
store ? " Store to " : " Load from " ) ;
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
} else {
run - > exit_reason = KVM_EXIT_MMIO ;
ret = RESUME_HOST ;
}
2012-11-21 18:34:09 -08:00
} else {
2016-06-09 14:19:20 +01:00
kvm_err ( " Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx \n " ,
store ? " ST " : " LD " , cause , opc , badvaddr ) ;
2012-11-21 18:34:09 -08:00
kvm_mips_dump_host_tlbs ( ) ;
kvm_arch_vcpu_dump_regs ( vcpu ) ;
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
return ret ;
}
2016-06-09 14:19:20 +01:00
static int kvm_trap_emul_handle_tlb_st_miss ( struct kvm_vcpu * vcpu )
{
return kvm_trap_emul_handle_tlb_miss ( vcpu , true ) ;
}
static int kvm_trap_emul_handle_tlb_ld_miss ( struct kvm_vcpu * vcpu )
{
return kvm_trap_emul_handle_tlb_miss ( vcpu , false ) ;
}
2012-11-21 18:34:09 -08:00
static int kvm_trap_emul_handle_addr_err_st ( struct kvm_vcpu * vcpu )
{
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2012-11-21 18:34:09 -08:00
unsigned long badvaddr = vcpu - > arch . host_cp0_badvaddr ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2012-11-21 18:34:09 -08:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
if ( KVM_GUEST_KERNEL_MODE ( vcpu )
& & ( KSEGX ( badvaddr ) = = CKSEG0 | | KSEGX ( badvaddr ) = = CKSEG1 ) ) {
kvm_debug ( " Emulate Store to MMIO space \n " ) ;
er = kvm_mips_emulate_inst ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_FAIL ) {
2014-06-26 12:11:35 -07:00
kvm_err ( " Emulate Store to MMIO space failed \n " ) ;
2012-11-21 18:34:09 -08:00
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
} else {
run - > exit_reason = KVM_EXIT_MMIO ;
ret = RESUME_HOST ;
}
} else {
2016-06-09 14:19:09 +01:00
kvm_err ( " Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx \n " ,
2014-06-26 12:11:35 -07:00
cause , opc , badvaddr ) ;
2012-11-21 18:34:09 -08:00
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
return ret ;
}
static int kvm_trap_emul_handle_addr_err_ld ( struct kvm_vcpu * vcpu )
{
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2012-11-21 18:34:09 -08:00
unsigned long badvaddr = vcpu - > arch . host_cp0_badvaddr ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2012-11-21 18:34:09 -08:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
if ( KSEGX ( badvaddr ) = = CKSEG0 | | KSEGX ( badvaddr ) = = CKSEG1 ) {
kvm_debug ( " Emulate Load from MMIO space @ %#lx \n " , badvaddr ) ;
er = kvm_mips_emulate_inst ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_FAIL ) {
2014-06-26 12:11:35 -07:00
kvm_err ( " Emulate Load from MMIO space failed \n " ) ;
2012-11-21 18:34:09 -08:00
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
} else {
run - > exit_reason = KVM_EXIT_MMIO ;
ret = RESUME_HOST ;
}
} else {
2016-06-09 14:19:09 +01:00
kvm_err ( " Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx \n " ,
2014-06-26 12:11:35 -07:00
cause , opc , badvaddr ) ;
2012-11-21 18:34:09 -08:00
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
er = EMULATE_FAIL ;
}
return ret ;
}
static int kvm_trap_emul_handle_syscall ( struct kvm_vcpu * vcpu )
{
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2012-11-21 18:34:09 -08:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
er = kvm_mips_emulate_syscall ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_DONE )
ret = RESUME_GUEST ;
else {
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
return ret ;
}
static int kvm_trap_emul_handle_res_inst ( struct kvm_vcpu * vcpu )
{
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2012-11-21 18:34:09 -08:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
er = kvm_mips_handle_ri ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_DONE )
ret = RESUME_GUEST ;
else {
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
return ret ;
}
static int kvm_trap_emul_handle_break ( struct kvm_vcpu * vcpu )
{
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2012-11-21 18:34:09 -08:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
er = kvm_mips_emulate_bp_exc ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_DONE )
ret = RESUME_GUEST ;
else {
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
return ret ;
}
2015-02-06 16:03:57 +00:00
static int kvm_trap_emul_handle_trap ( struct kvm_vcpu * vcpu )
{
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2015-02-06 16:03:57 +00:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
er = kvm_mips_emulate_trap_exc ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_DONE ) {
ret = RESUME_GUEST ;
} else {
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
return ret ;
}
2015-02-06 10:56:27 +00:00
static int kvm_trap_emul_handle_msa_fpe ( struct kvm_vcpu * vcpu )
{
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2015-02-06 10:56:27 +00:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
er = kvm_mips_emulate_msafpe_exc ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_DONE ) {
ret = RESUME_GUEST ;
} else {
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
return ret ;
}
2015-02-06 10:56:27 +00:00
static int kvm_trap_emul_handle_fpe ( struct kvm_vcpu * vcpu )
{
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2015-02-06 10:56:27 +00:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
er = kvm_mips_emulate_fpe_exc ( cause , opc , run , vcpu ) ;
if ( er = = EMULATE_DONE ) {
ret = RESUME_GUEST ;
} else {
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
}
return ret ;
}
2015-02-06 10:56:27 +00:00
/**
* kvm_trap_emul_handle_msa_disabled ( ) - Guest used MSA while disabled in root .
* @ vcpu : Virtual CPU context .
*
* Handle when the guest attempts to use MSA when it is disabled .
*/
2015-02-06 11:11:56 +00:00
static int kvm_trap_emul_handle_msa_disabled ( struct kvm_vcpu * vcpu )
{
2015-02-06 10:56:27 +00:00
struct mips_coproc * cop0 = vcpu - > arch . cop0 ;
2015-02-06 11:11:56 +00:00
struct kvm_run * run = vcpu - > run ;
2016-06-09 14:19:08 +01:00
u32 __user * opc = ( u32 __user * ) vcpu - > arch . pc ;
2016-06-09 14:19:09 +01:00
u32 cause = vcpu - > arch . host_cp0_cause ;
2015-02-06 11:11:56 +00:00
enum emulation_result er = EMULATE_DONE ;
int ret = RESUME_GUEST ;
2015-02-06 10:56:27 +00:00
if ( ! kvm_mips_guest_has_msa ( & vcpu - > arch ) | |
( kvm_read_c0_guest_status ( cop0 ) & ( ST0_CU1 | ST0_FR ) ) = = ST0_CU1 ) {
/*
* No MSA in guest , or FPU enabled and not in FR = 1 mode ,
* guest reserved instruction exception
*/
er = kvm_mips_emulate_ri_exc ( cause , opc , run , vcpu ) ;
} else if ( ! ( kvm_read_c0_guest_config5 ( cop0 ) & MIPS_CONF5_MSAEN ) ) {
/* MSA disabled by guest, guest MSA disabled exception */
er = kvm_mips_emulate_msadis_exc ( cause , opc , run , vcpu ) ;
} else {
/* Restore MSA/FPU state */
kvm_own_msa ( vcpu ) ;
er = EMULATE_DONE ;
}
2015-02-06 11:11:56 +00:00
switch ( er ) {
case EMULATE_DONE :
ret = RESUME_GUEST ;
break ;
case EMULATE_FAIL :
run - > exit_reason = KVM_EXIT_INTERNAL_ERROR ;
ret = RESUME_HOST ;
break ;
default :
BUG ( ) ;
}
return ret ;
}
2012-11-21 18:34:09 -08:00
static int kvm_trap_emul_vm_init ( struct kvm * kvm )
{
return 0 ;
}
static int kvm_trap_emul_vcpu_init ( struct kvm_vcpu * vcpu )
{
2016-06-15 19:29:56 +01:00
vcpu - > arch . kscratch_enabled = 0xfc ;
2012-11-21 18:34:09 -08:00
return 0 ;
}
static int kvm_trap_emul_vcpu_setup ( struct kvm_vcpu * vcpu )
{
struct mips_coproc * cop0 = vcpu - > arch . cop0 ;
2016-06-15 19:30:00 +01:00
u32 config , config1 ;
2012-11-21 18:34:09 -08:00
int vcpu_id = vcpu - > vcpu_id ;
2014-06-26 12:11:34 -07:00
/*
* Arch specific stuff , set up config registers properly so that the
2016-07-04 19:35:15 +01:00
* guest will come up as expected
2012-11-21 18:34:09 -08:00
*/
2016-07-04 19:35:15 +01:00
# ifndef CONFIG_CPU_MIPSR6
/* r2-r5, simulate a MIPS 24kc */
2012-11-21 18:34:09 -08:00
kvm_write_c0_guest_prid ( cop0 , 0x00019300 ) ;
2016-07-04 19:35:15 +01:00
# else
/* r6+, simulate a generic QEMU machine */
kvm_write_c0_guest_prid ( cop0 , 0x00010000 ) ;
# endif
2016-06-15 19:30:00 +01:00
/*
* Have config1 , Cacheable , noncoherent , write - back , write allocate .
* Endianness , arch revision & virtually tagged icache should match
* host .
*/
config = read_c0_config ( ) & MIPS_CONF_AR ;
2016-06-15 19:30:01 +01:00
config | = MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB ;
2016-06-15 19:30:00 +01:00
# ifdef CONFIG_CPU_BIG_ENDIAN
config | = CONF_BE ;
# endif
if ( cpu_has_vtag_icache )
config | = MIPS_CONF_VI ;
kvm_write_c0_guest_config ( cop0 , config ) ;
2012-11-21 18:34:09 -08:00
/* Read the cache characteristics from the host Config1 Register */
config1 = ( read_c0_config1 ( ) & ~ 0x7f ) ;
/* Set up MMU size */
config1 & = ~ ( 0x3f < < 25 ) ;
config1 | = ( ( KVM_MIPS_GUEST_TLB_SIZE - 1 ) < < 25 ) ;
/* We unset some bits that we aren't emulating */
2016-06-15 19:30:01 +01:00
config1 & = ~ ( MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
MIPS_CONF1_WR | MIPS_CONF1_CA ) ;
2012-11-21 18:34:09 -08:00
kvm_write_c0_guest_config1 ( cop0 , config1 ) ;
2015-03-04 15:56:47 +00:00
/* Have config3, no tertiary/secondary caches implemented */
kvm_write_c0_guest_config2 ( cop0 , MIPS_CONF_M ) ;
/* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
2014-06-26 15:11:29 +01:00
/* Have config4, UserLocal */
kvm_write_c0_guest_config3 ( cop0 , MIPS_CONF_M | MIPS_CONF3_ULRI ) ;
/* Have config5 */
kvm_write_c0_guest_config4 ( cop0 , MIPS_CONF_M ) ;
/* No config6 */
kvm_write_c0_guest_config5 ( cop0 , 0 ) ;
2012-11-21 18:34:09 -08:00
/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
kvm_write_c0_guest_config7 ( cop0 , ( MIPS_CONF7_WII ) | ( 1 < < 10 ) ) ;
2014-06-26 12:11:34 -07:00
/*
2016-02-25 00:44:58 -08:00
* Setup IntCtl defaults , compatibility mode for timer interrupts ( HW5 )
2014-06-26 12:11:34 -07:00
*/
2012-11-21 18:34:09 -08:00
kvm_write_c0_guest_intctl ( cop0 , 0xFC000000 ) ;
/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
2016-05-11 13:50:49 +01:00
kvm_write_c0_guest_ebase ( cop0 , KVM_GUEST_KSEG0 |
( vcpu_id & MIPS_EBASE_CPUNUM ) ) ;
2012-11-21 18:34:09 -08:00
return 0 ;
}
2016-06-15 19:29:49 +01:00
static unsigned long kvm_trap_emul_num_regs ( struct kvm_vcpu * vcpu )
{
return 0 ;
}
static int kvm_trap_emul_copy_reg_indices ( struct kvm_vcpu * vcpu ,
u64 __user * indices )
{
return 0 ;
}
2014-05-29 10:16:29 +01:00
static int kvm_trap_emul_get_one_reg ( struct kvm_vcpu * vcpu ,
const struct kvm_one_reg * reg ,
s64 * v )
{
switch ( reg - > id ) {
case KVM_REG_MIPS_CP0_COUNT :
2014-05-29 10:16:35 +01:00
* v = kvm_mips_read_count ( vcpu ) ;
2014-05-29 10:16:29 +01:00
break ;
2014-05-29 10:16:37 +01:00
case KVM_REG_MIPS_COUNT_CTL :
* v = vcpu - > arch . count_ctl ;
break ;
case KVM_REG_MIPS_COUNT_RESUME :
* v = ktime_to_ns ( vcpu - > arch . count_resume ) ;
break ;
2014-05-29 10:16:38 +01:00
case KVM_REG_MIPS_COUNT_HZ :
* v = vcpu - > arch . count_hz ;
break ;
2014-05-29 10:16:29 +01:00
default :
return - EINVAL ;
}
return 0 ;
}
static int kvm_trap_emul_set_one_reg ( struct kvm_vcpu * vcpu ,
const struct kvm_one_reg * reg ,
s64 v )
{
struct mips_coproc * cop0 = vcpu - > arch . cop0 ;
2014-05-29 10:16:37 +01:00
int ret = 0 ;
2014-06-26 15:11:29 +01:00
unsigned int cur , change ;
2014-05-29 10:16:29 +01:00
switch ( reg - > id ) {
case KVM_REG_MIPS_CP0_COUNT :
2014-05-29 10:16:35 +01:00
kvm_mips_write_count ( vcpu , v ) ;
2014-05-29 10:16:29 +01:00
break ;
case KVM_REG_MIPS_CP0_COMPARE :
2016-04-22 10:38:46 +01:00
kvm_mips_write_compare ( vcpu , v , false ) ;
2014-05-29 10:16:35 +01:00
break ;
case KVM_REG_MIPS_CP0_CAUSE :
/*
* If the timer is stopped or started ( DC bit ) it must look
* atomic with changes to the interrupt pending bits ( TI , IRQ5 ) .
* A timer interrupt should not happen in between .
*/
if ( ( kvm_read_c0_guest_cause ( cop0 ) ^ v ) & CAUSEF_DC ) {
if ( v & CAUSEF_DC ) {
/* disable timer first */
kvm_mips_count_disable_cause ( vcpu ) ;
kvm_change_c0_guest_cause ( cop0 , ~ CAUSEF_DC , v ) ;
} else {
/* enable timer last */
kvm_change_c0_guest_cause ( cop0 , ~ CAUSEF_DC , v ) ;
kvm_mips_count_enable_cause ( vcpu ) ;
}
} else {
kvm_write_c0_guest_cause ( cop0 , v ) ;
}
2014-05-29 10:16:29 +01:00
break ;
2014-06-26 15:11:29 +01:00
case KVM_REG_MIPS_CP0_CONFIG :
/* read-only for now */
break ;
case KVM_REG_MIPS_CP0_CONFIG1 :
cur = kvm_read_c0_guest_config1 ( cop0 ) ;
change = ( cur ^ v ) & kvm_mips_config1_wrmask ( vcpu ) ;
if ( change ) {
v = cur ^ change ;
kvm_write_c0_guest_config1 ( cop0 , v ) ;
}
break ;
case KVM_REG_MIPS_CP0_CONFIG2 :
/* read-only for now */
break ;
case KVM_REG_MIPS_CP0_CONFIG3 :
cur = kvm_read_c0_guest_config3 ( cop0 ) ;
change = ( cur ^ v ) & kvm_mips_config3_wrmask ( vcpu ) ;
if ( change ) {
v = cur ^ change ;
kvm_write_c0_guest_config3 ( cop0 , v ) ;
}
break ;
case KVM_REG_MIPS_CP0_CONFIG4 :
cur = kvm_read_c0_guest_config4 ( cop0 ) ;
change = ( cur ^ v ) & kvm_mips_config4_wrmask ( vcpu ) ;
if ( change ) {
v = cur ^ change ;
kvm_write_c0_guest_config4 ( cop0 , v ) ;
}
break ;
case KVM_REG_MIPS_CP0_CONFIG5 :
cur = kvm_read_c0_guest_config5 ( cop0 ) ;
change = ( cur ^ v ) & kvm_mips_config5_wrmask ( vcpu ) ;
if ( change ) {
v = cur ^ change ;
kvm_write_c0_guest_config5 ( cop0 , v ) ;
}
break ;
2014-05-29 10:16:37 +01:00
case KVM_REG_MIPS_COUNT_CTL :
ret = kvm_mips_set_count_ctl ( vcpu , v ) ;
break ;
case KVM_REG_MIPS_COUNT_RESUME :
ret = kvm_mips_set_count_resume ( vcpu , v ) ;
break ;
2014-05-29 10:16:38 +01:00
case KVM_REG_MIPS_COUNT_HZ :
ret = kvm_mips_set_count_hz ( vcpu , v ) ;
break ;
2014-05-29 10:16:29 +01:00
default :
return - EINVAL ;
}
2014-05-29 10:16:37 +01:00
return ret ;
2014-05-29 10:16:29 +01:00
}
2016-11-12 00:00:13 +00:00
static int kvm_trap_emul_vcpu_load ( struct kvm_vcpu * vcpu , int cpu )
2015-02-09 16:35:20 +00:00
{
2016-11-16 23:48:56 +00:00
unsigned long asid_mask = cpu_asid_mask ( & cpu_data [ cpu ] ) ;
/* Allocate new kernel and user ASIDs if needed */
if ( ( vcpu - > arch . guest_kernel_asid [ cpu ] ^ asid_cache ( cpu ) ) &
asid_version_mask ( cpu ) ) {
kvm_get_new_mmu_context ( & vcpu - > arch . guest_kernel_mm , cpu , vcpu ) ;
vcpu - > arch . guest_kernel_asid [ cpu ] =
vcpu - > arch . guest_kernel_mm . context . asid [ cpu ] ;
kvm_debug ( " [%d]: cpu_context: %#lx \n " , cpu ,
cpu_context ( cpu , current - > mm ) ) ;
kvm_debug ( " [%d]: Allocated new ASID for Guest Kernel: %#x \n " ,
cpu , vcpu - > arch . guest_kernel_asid [ cpu ] ) ;
}
if ( ( vcpu - > arch . guest_user_asid [ cpu ] ^ asid_cache ( cpu ) ) &
asid_version_mask ( cpu ) ) {
kvm_get_new_mmu_context ( & vcpu - > arch . guest_user_mm , cpu , vcpu ) ;
vcpu - > arch . guest_user_asid [ cpu ] =
vcpu - > arch . guest_user_mm . context . asid [ cpu ] ;
kvm_debug ( " [%d]: cpu_context: %#lx \n " , cpu ,
cpu_context ( cpu , current - > mm ) ) ;
kvm_debug ( " [%d]: Allocated new ASID for Guest User: %#x \n " , cpu ,
vcpu - > arch . guest_user_asid [ cpu ] ) ;
}
/*
* Were we in guest context ? If so then the pre - empted ASID is
* no longer valid , we need to set it to what it should be based
* on the mode of the Guest ( Kernel / User )
*/
if ( current - > flags & PF_VCPU ) {
if ( KVM_GUEST_KERNEL_MODE ( vcpu ) )
write_c0_entryhi ( vcpu - > arch . guest_kernel_asid [ cpu ] &
asid_mask ) ;
else
write_c0_entryhi ( vcpu - > arch . guest_user_asid [ cpu ] &
asid_mask ) ;
ehb ( ) ;
}
2015-02-09 16:35:20 +00:00
return 0 ;
}
2016-11-12 00:00:13 +00:00
static int kvm_trap_emul_vcpu_put ( struct kvm_vcpu * vcpu , int cpu )
2015-02-09 16:35:20 +00:00
{
2016-11-12 00:00:13 +00:00
kvm_lose_fpu ( vcpu ) ;
2016-11-16 23:48:56 +00:00
if ( ( ( cpu_context ( cpu , current - > mm ) ^ asid_cache ( cpu ) ) &
asid_version_mask ( cpu ) ) ) {
kvm_debug ( " %s: Dropping MMU Context: %#lx \n " , __func__ ,
cpu_context ( cpu , current - > mm ) ) ;
drop_mmu_context ( current - > mm , cpu ) ;
}
write_c0_entryhi ( cpu_asid ( cpu , current - > mm ) ) ;
ehb ( ) ;
2015-02-09 16:35:20 +00:00
return 0 ;
}
2012-11-21 18:34:09 -08:00
static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
/* exit handlers */
. handle_cop_unusable = kvm_trap_emul_handle_cop_unusable ,
. handle_tlb_mod = kvm_trap_emul_handle_tlb_mod ,
. handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss ,
. handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss ,
. handle_addr_err_st = kvm_trap_emul_handle_addr_err_st ,
. handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld ,
. handle_syscall = kvm_trap_emul_handle_syscall ,
. handle_res_inst = kvm_trap_emul_handle_res_inst ,
. handle_break = kvm_trap_emul_handle_break ,
2015-02-06 16:03:57 +00:00
. handle_trap = kvm_trap_emul_handle_trap ,
2015-02-06 10:56:27 +00:00
. handle_msa_fpe = kvm_trap_emul_handle_msa_fpe ,
2015-02-06 10:56:27 +00:00
. handle_fpe = kvm_trap_emul_handle_fpe ,
2015-02-06 11:11:56 +00:00
. handle_msa_disabled = kvm_trap_emul_handle_msa_disabled ,
2012-11-21 18:34:09 -08:00
. vm_init = kvm_trap_emul_vm_init ,
. vcpu_init = kvm_trap_emul_vcpu_init ,
. vcpu_setup = kvm_trap_emul_vcpu_setup ,
. gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb ,
. queue_timer_int = kvm_mips_queue_timer_int_cb ,
. dequeue_timer_int = kvm_mips_dequeue_timer_int_cb ,
. queue_io_int = kvm_mips_queue_io_int_cb ,
. dequeue_io_int = kvm_mips_dequeue_io_int_cb ,
. irq_deliver = kvm_mips_irq_deliver_cb ,
. irq_clear = kvm_mips_irq_clear_cb ,
2016-06-15 19:29:49 +01:00
. num_regs = kvm_trap_emul_num_regs ,
. copy_reg_indices = kvm_trap_emul_copy_reg_indices ,
2014-05-29 10:16:29 +01:00
. get_one_reg = kvm_trap_emul_get_one_reg ,
. set_one_reg = kvm_trap_emul_set_one_reg ,
2016-11-12 00:00:13 +00:00
. vcpu_load = kvm_trap_emul_vcpu_load ,
. vcpu_put = kvm_trap_emul_vcpu_put ,
2012-11-21 18:34:09 -08:00
} ;
int kvm_mips_emulation_init ( struct kvm_mips_callbacks * * install_callbacks )
{
* install_callbacks = & kvm_trap_emul_callbacks ;
return 0 ;
}