2008-04-16 23:28:09 -05:00
/*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License , version 2 , as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 , USA .
*
* Copyright IBM Corp . 2007
*
* Authors : Hollis Blanchard < hollisb @ us . ibm . com >
*/
# include <linux/jiffies.h>
2009-11-02 12:02:31 +00:00
# include <linux/hrtimer.h>
2008-04-16 23:28:09 -05:00
# include <linux/types.h>
# include <linux/string.h>
# include <linux/kvm_host.h>
2008-11-05 09:36:16 -06:00
# include <asm/reg.h>
2008-04-16 23:28:09 -05:00
# include <asm/time.h>
# include <asm/byteorder.h>
# include <asm/kvm_ppc.h>
2008-11-05 09:36:15 -06:00
# include <asm/disassemble.h>
2008-12-02 15:51:57 -06:00
# include "timing.h"
2009-06-18 11:47:27 -03:00
# include "trace.h"
2008-04-16 23:28:09 -05:00
2009-01-03 16:23:05 -06:00
# define OP_TRAP 3
2009-10-30 05:47:16 +00:00
# define OP_TRAP_64 2
2009-01-03 16:23:05 -06:00
# define OP_31_XOP_LWZX 23
# define OP_31_XOP_LBZX 87
# define OP_31_XOP_STWX 151
# define OP_31_XOP_STBX 215
# define OP_31_XOP_STBUX 247
# define OP_31_XOP_LHZX 279
# define OP_31_XOP_LHZUX 311
# define OP_31_XOP_MFSPR 339
# define OP_31_XOP_STHX 407
# define OP_31_XOP_STHUX 439
# define OP_31_XOP_MTSPR 467
# define OP_31_XOP_DCBI 470
# define OP_31_XOP_LWBRX 534
# define OP_31_XOP_TLBSYNC 566
# define OP_31_XOP_STWBRX 662
# define OP_31_XOP_LHBRX 790
# define OP_31_XOP_STHBRX 918
# define OP_LWZ 32
# define OP_LWZU 33
# define OP_LBZ 34
# define OP_LBZU 35
# define OP_STW 36
# define OP_STWU 37
# define OP_STB 38
# define OP_STBU 39
# define OP_LHZ 40
# define OP_LHZU 41
# define OP_STH 44
# define OP_STHU 45
2009-10-30 05:47:16 +00:00
# ifdef CONFIG_PPC64
static int kvmppc_dec_enabled ( struct kvm_vcpu * vcpu )
{
return 1 ;
}
# else
static int kvmppc_dec_enabled ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . tcr & TCR_DIE ;
}
# endif
2008-11-05 09:36:16 -06:00
void kvmppc_emulate_dec ( struct kvm_vcpu * vcpu )
2008-04-16 23:28:09 -05:00
{
2009-11-02 12:02:31 +00:00
unsigned long dec_nsec ;
2009-10-30 05:47:15 +00:00
2009-11-02 12:02:31 +00:00
pr_debug ( " mtDEC: %x \n " , vcpu - > arch . dec ) ;
2009-10-30 05:47:16 +00:00
# ifdef CONFIG_PPC64
/* POWER4+ triggers a dec interrupt if the value is < 0 */
if ( vcpu - > arch . dec & 0x80000000 ) {
2009-11-02 12:02:31 +00:00
hrtimer_try_to_cancel ( & vcpu - > arch . dec_timer ) ;
2009-10-30 05:47:16 +00:00
kvmppc_core_queue_dec ( vcpu ) ;
return ;
}
# endif
if ( kvmppc_dec_enabled ( vcpu ) ) {
2008-04-16 23:28:09 -05:00
/* The decrementer ticks at the same rate as the timebase, so
* that ' s how we convert the guest DEC value to the number of
* host ticks . */
2009-11-02 12:02:31 +00:00
hrtimer_try_to_cancel ( & vcpu - > arch . dec_timer ) ;
dec_nsec = vcpu - > arch . dec ;
dec_nsec * = 1000 ;
dec_nsec / = tb_ticks_per_usec ;
hrtimer_start ( & vcpu - > arch . dec_timer , ktime_set ( 0 , dec_nsec ) ,
HRTIMER_MODE_REL ) ;
2009-10-30 05:47:16 +00:00
vcpu - > arch . dec_jiffies = get_tb ( ) ;
2008-04-16 23:28:09 -05:00
} else {
2009-11-02 12:02:31 +00:00
hrtimer_try_to_cancel ( & vcpu - > arch . dec_timer ) ;
2008-04-16 23:28:09 -05:00
}
}
/* XXX to do:
* lhax
* lhaux
* lswx
* lswi
* stswx
* stswi
* lha
* lhau
* lmw
* stmw
*
* XXX is_bigendian should depend on MMU mapping or MSR [ LE ]
*/
2008-11-05 09:36:16 -06:00
/* XXX Should probably auto-generate instruction decoding for a particular core
* from opcode tables in the future . */
2008-04-16 23:28:09 -05:00
int kvmppc_emulate_instruction ( struct kvm_run * run , struct kvm_vcpu * vcpu )
{
u32 inst = vcpu - > arch . last_inst ;
u32 ea ;
int ra ;
int rb ;
int rs ;
int rt ;
int sprn ;
enum emulation_result emulated = EMULATE_DONE ;
int advance = 1 ;
2008-12-02 15:51:57 -06:00
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type ( vcpu , EMULATED_INST_EXITS ) ;
2009-10-30 05:47:16 +00:00
pr_debug ( KERN_INFO " Emulating opcode %d / %d \n " , get_op ( inst ) , get_xop ( inst ) ) ;
2008-04-16 23:28:09 -05:00
switch ( get_op ( inst ) ) {
2009-01-03 16:23:05 -06:00
case OP_TRAP :
2009-10-30 05:47:16 +00:00
# ifdef CONFIG_PPC64
case OP_TRAP_64 :
# else
2008-11-05 09:36:24 -06:00
vcpu - > arch . esr | = ESR_PTR ;
2009-10-30 05:47:16 +00:00
# endif
2008-11-05 09:36:14 -06:00
kvmppc_core_queue_program ( vcpu ) ;
2008-04-16 23:28:09 -05:00
advance = 0 ;
break ;
case 31 :
switch ( get_xop ( inst ) ) {
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LWZX :
2008-05-21 18:22:52 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 4 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LBZX :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 1 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STWX :
2008-05-21 18:22:52 -05:00
rs = get_rs ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu ,
vcpu - > arch . gpr [ rs ] ,
4 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STBX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu ,
vcpu - > arch . gpr [ rs ] ,
1 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STBUX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
ea = vcpu - > arch . gpr [ rb ] ;
if ( ra )
ea + = vcpu - > arch . gpr [ ra ] ;
emulated = kvmppc_handle_store ( run , vcpu ,
vcpu - > arch . gpr [ rs ] ,
1 , 1 ) ;
vcpu - > arch . gpr [ rs ] = ea ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LHZX :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 2 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LHZUX :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
ea = vcpu - > arch . gpr [ rb ] ;
if ( ra )
ea + = vcpu - > arch . gpr [ ra ] ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 2 , 1 ) ;
vcpu - > arch . gpr [ ra ] = ea ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_MFSPR :
2008-04-16 23:28:09 -05:00
sprn = get_sprn ( inst ) ;
rt = get_rt ( inst ) ;
switch ( sprn ) {
case SPRN_SRR0 :
vcpu - > arch . gpr [ rt ] = vcpu - > arch . srr0 ; break ;
case SPRN_SRR1 :
vcpu - > arch . gpr [ rt ] = vcpu - > arch . srr1 ; break ;
case SPRN_PVR :
2009-10-30 05:47:16 +00:00
vcpu - > arch . gpr [ rt ] = vcpu - > arch . pvr ; break ;
2009-06-05 14:54:31 +08:00
case SPRN_PIR :
2009-10-30 05:47:16 +00:00
vcpu - > arch . gpr [ rt ] = vcpu - > vcpu_id ; break ;
case SPRN_MSSSR0 :
vcpu - > arch . gpr [ rt ] = 0 ; break ;
2008-04-16 23:28:09 -05:00
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways .
* In fact , we probably will never see these traps . */
case SPRN_TBWL :
2009-10-30 05:47:16 +00:00
vcpu - > arch . gpr [ rt ] = get_tb ( ) > > 32 ; break ;
2008-04-16 23:28:09 -05:00
case SPRN_TBWU :
2009-10-30 05:47:16 +00:00
vcpu - > arch . gpr [ rt ] = get_tb ( ) ; break ;
2008-04-16 23:28:09 -05:00
case SPRN_SPRG0 :
vcpu - > arch . gpr [ rt ] = vcpu - > arch . sprg0 ; break ;
case SPRN_SPRG1 :
vcpu - > arch . gpr [ rt ] = vcpu - > arch . sprg1 ; break ;
case SPRN_SPRG2 :
vcpu - > arch . gpr [ rt ] = vcpu - > arch . sprg2 ; break ;
case SPRN_SPRG3 :
vcpu - > arch . gpr [ rt ] = vcpu - > arch . sprg3 ; break ;
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap . */
2009-10-30 05:47:15 +00:00
case SPRN_DEC :
{
2009-10-30 05:47:16 +00:00
u64 jd = get_tb ( ) - vcpu - > arch . dec_jiffies ;
2009-10-30 05:47:15 +00:00
vcpu - > arch . gpr [ rt ] = vcpu - > arch . dec - jd ;
2009-10-30 05:47:16 +00:00
pr_debug ( KERN_INFO " mfDEC: %x - %llx = %lx \n " , vcpu - > arch . dec , jd , vcpu - > arch . gpr [ rt ] ) ;
2009-10-30 05:47:15 +00:00
break ;
}
2008-04-16 23:28:09 -05:00
default :
2008-11-05 09:36:16 -06:00
emulated = kvmppc_core_emulate_mfspr ( vcpu , sprn , rt ) ;
if ( emulated = = EMULATE_FAIL ) {
printk ( " mfspr: unknown spr %x \n " , sprn ) ;
vcpu - > arch . gpr [ rt ] = 0 ;
}
2008-04-16 23:28:09 -05:00
break ;
}
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STHX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu ,
vcpu - > arch . gpr [ rs ] ,
2 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STHUX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
ea = vcpu - > arch . gpr [ rb ] ;
if ( ra )
ea + = vcpu - > arch . gpr [ ra ] ;
emulated = kvmppc_handle_store ( run , vcpu ,
vcpu - > arch . gpr [ rs ] ,
2 , 1 ) ;
vcpu - > arch . gpr [ ra ] = ea ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_MTSPR :
2008-04-16 23:28:09 -05:00
sprn = get_sprn ( inst ) ;
rs = get_rs ( inst ) ;
switch ( sprn ) {
case SPRN_SRR0 :
vcpu - > arch . srr0 = vcpu - > arch . gpr [ rs ] ; break ;
case SPRN_SRR1 :
vcpu - > arch . srr1 = vcpu - > arch . gpr [ rs ] ; break ;
/* XXX We need to context-switch the timebase for
* watchdog and FIT . */
case SPRN_TBWL : break ;
case SPRN_TBWU : break ;
2009-10-30 05:47:16 +00:00
case SPRN_MSSSR0 : break ;
2008-04-16 23:28:09 -05:00
case SPRN_DEC :
vcpu - > arch . dec = vcpu - > arch . gpr [ rs ] ;
kvmppc_emulate_dec ( vcpu ) ;
break ;
case SPRN_SPRG0 :
vcpu - > arch . sprg0 = vcpu - > arch . gpr [ rs ] ; break ;
case SPRN_SPRG1 :
vcpu - > arch . sprg1 = vcpu - > arch . gpr [ rs ] ; break ;
case SPRN_SPRG2 :
vcpu - > arch . sprg2 = vcpu - > arch . gpr [ rs ] ; break ;
case SPRN_SPRG3 :
vcpu - > arch . sprg3 = vcpu - > arch . gpr [ rs ] ; break ;
default :
2008-11-05 09:36:16 -06:00
emulated = kvmppc_core_emulate_mtspr ( vcpu , sprn , rs ) ;
if ( emulated = = EMULATE_FAIL )
printk ( " mtspr: unknown spr %x \n " , sprn ) ;
2008-04-16 23:28:09 -05:00
break ;
}
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_DCBI :
2008-04-16 23:28:09 -05:00
/* Do nothing. The guest is performing dcbi because
* hardware DMA is not snooped by the dcache , but
* emulated DMA either goes through the dcache as
* normal writes , or the host kernel has handled dcache
* coherence . */
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LWBRX :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 4 , 0 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_TLBSYNC :
2008-04-16 23:28:09 -05:00
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STWBRX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu ,
vcpu - > arch . gpr [ rs ] ,
4 , 0 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LHBRX :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 2 , 0 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STHBRX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu ,
vcpu - > arch . gpr [ rs ] ,
2 , 0 ) ;
break ;
default :
2008-11-05 09:36:16 -06:00
/* Attempt core-specific emulation below. */
2008-04-16 23:28:09 -05:00
emulated = EMULATE_FAIL ;
}
break ;
2009-01-03 16:23:05 -06:00
case OP_LWZ :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 4 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_LWZU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 4 , 1 ) ;
vcpu - > arch . gpr [ ra ] = vcpu - > arch . paddr_accessed ;
break ;
2009-01-03 16:23:05 -06:00
case OP_LBZ :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 1 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_LBZU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 1 , 1 ) ;
vcpu - > arch . gpr [ ra ] = vcpu - > arch . paddr_accessed ;
break ;
2009-01-03 16:23:05 -06:00
case OP_STW :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu , vcpu - > arch . gpr [ rs ] ,
4 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_STWU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rs = get_rs ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu , vcpu - > arch . gpr [ rs ] ,
4 , 1 ) ;
vcpu - > arch . gpr [ ra ] = vcpu - > arch . paddr_accessed ;
break ;
2009-01-03 16:23:05 -06:00
case OP_STB :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu , vcpu - > arch . gpr [ rs ] ,
1 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_STBU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rs = get_rs ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu , vcpu - > arch . gpr [ rs ] ,
1 , 1 ) ;
vcpu - > arch . gpr [ ra ] = vcpu - > arch . paddr_accessed ;
break ;
2009-01-03 16:23:05 -06:00
case OP_LHZ :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 2 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_LHZU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 2 , 1 ) ;
vcpu - > arch . gpr [ ra ] = vcpu - > arch . paddr_accessed ;
break ;
2009-01-03 16:23:05 -06:00
case OP_STH :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu , vcpu - > arch . gpr [ rs ] ,
2 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_STHU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rs = get_rs ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu , vcpu - > arch . gpr [ rs ] ,
2 , 1 ) ;
vcpu - > arch . gpr [ ra ] = vcpu - > arch . paddr_accessed ;
break ;
default :
emulated = EMULATE_FAIL ;
2008-11-05 09:36:16 -06:00
}
if ( emulated = = EMULATE_FAIL ) {
emulated = kvmppc_core_emulate_op ( run , vcpu , inst , & advance ) ;
if ( emulated = = EMULATE_FAIL ) {
advance = 0 ;
printk ( KERN_ERR " Couldn't emulate instruction 0x%08x "
" (op %d xop %d) \n " , inst , get_op ( inst ) , get_xop ( inst ) ) ;
}
2008-04-16 23:28:09 -05:00
}
2009-06-18 11:47:27 -03:00
trace_kvm_ppc_instr ( inst , vcpu - > arch . pc , emulated ) ;
2008-07-14 14:00:04 +02:00
2008-04-16 23:28:09 -05:00
if ( advance )
vcpu - > arch . pc + = 4 ; /* Advance past emulated instruction. */
return emulated ;
}