2008-04-16 23:28:09 -05:00
/*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License , version 2 , as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 , USA .
*
* Copyright IBM Corp . 2007
*
* Authors : Hollis Blanchard < hollisb @ us . ibm . com >
*/
# include <linux/jiffies.h>
2009-11-02 12:02:31 +00:00
# include <linux/hrtimer.h>
2008-04-16 23:28:09 -05:00
# include <linux/types.h>
# include <linux/string.h>
# include <linux/kvm_host.h>
2008-11-05 09:36:16 -06:00
# include <asm/reg.h>
2008-04-16 23:28:09 -05:00
# include <asm/time.h>
# include <asm/byteorder.h>
# include <asm/kvm_ppc.h>
2008-11-05 09:36:15 -06:00
# include <asm/disassemble.h>
2008-12-02 15:51:57 -06:00
# include "timing.h"
2009-06-18 11:47:27 -03:00
# include "trace.h"
2008-04-16 23:28:09 -05:00
2009-01-03 16:23:05 -06:00
# define OP_TRAP 3
2009-10-30 05:47:16 +00:00
# define OP_TRAP_64 2
2009-01-03 16:23:05 -06:00
# define OP_31_XOP_LWZX 23
# define OP_31_XOP_LBZX 87
# define OP_31_XOP_STWX 151
# define OP_31_XOP_STBX 215
2010-03-24 21:48:27 +01:00
# define OP_31_XOP_LBZUX 119
2009-01-03 16:23:05 -06:00
# define OP_31_XOP_STBUX 247
# define OP_31_XOP_LHZX 279
# define OP_31_XOP_LHZUX 311
# define OP_31_XOP_MFSPR 339
2010-03-24 21:48:27 +01:00
# define OP_31_XOP_LHAX 343
2009-01-03 16:23:05 -06:00
# define OP_31_XOP_STHX 407
# define OP_31_XOP_STHUX 439
# define OP_31_XOP_MTSPR 467
# define OP_31_XOP_DCBI 470
# define OP_31_XOP_LWBRX 534
# define OP_31_XOP_TLBSYNC 566
# define OP_31_XOP_STWBRX 662
# define OP_31_XOP_LHBRX 790
# define OP_31_XOP_STHBRX 918
# define OP_LWZ 32
# define OP_LWZU 33
# define OP_LBZ 34
# define OP_LBZU 35
# define OP_STW 36
# define OP_STWU 37
# define OP_STB 38
# define OP_STBU 39
# define OP_LHZ 40
# define OP_LHZU 41
2010-02-19 11:00:30 +01:00
# define OP_LHA 42
# define OP_LHAU 43
2009-01-03 16:23:05 -06:00
# define OP_STH 44
# define OP_STHU 45
2010-04-16 00:11:42 +02:00
# ifdef CONFIG_PPC_BOOK3S
2009-10-30 05:47:16 +00:00
static int kvmppc_dec_enabled ( struct kvm_vcpu * vcpu )
{
return 1 ;
}
# else
static int kvmppc_dec_enabled ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . tcr & TCR_DIE ;
}
# endif
2008-11-05 09:36:16 -06:00
void kvmppc_emulate_dec ( struct kvm_vcpu * vcpu )
2008-04-16 23:28:09 -05:00
{
2009-11-02 12:02:31 +00:00
unsigned long dec_nsec ;
2009-10-30 05:47:15 +00:00
2009-11-02 12:02:31 +00:00
pr_debug ( " mtDEC: %x \n " , vcpu - > arch . dec ) ;
2010-04-16 00:11:42 +02:00
# ifdef CONFIG_PPC_BOOK3S
2009-12-21 20:21:24 +01:00
/* mtdec lowers the interrupt line when positive. */
kvmppc_core_dequeue_dec ( vcpu ) ;
2009-10-30 05:47:16 +00:00
/* POWER4+ triggers a dec interrupt if the value is < 0 */
if ( vcpu - > arch . dec & 0x80000000 ) {
2009-11-02 12:02:31 +00:00
hrtimer_try_to_cancel ( & vcpu - > arch . dec_timer ) ;
2009-10-30 05:47:16 +00:00
kvmppc_core_queue_dec ( vcpu ) ;
return ;
}
# endif
if ( kvmppc_dec_enabled ( vcpu ) ) {
2008-04-16 23:28:09 -05:00
/* The decrementer ticks at the same rate as the timebase, so
* that ' s how we convert the guest DEC value to the number of
* host ticks . */
2009-11-02 12:02:31 +00:00
hrtimer_try_to_cancel ( & vcpu - > arch . dec_timer ) ;
dec_nsec = vcpu - > arch . dec ;
dec_nsec * = 1000 ;
dec_nsec / = tb_ticks_per_usec ;
hrtimer_start ( & vcpu - > arch . dec_timer , ktime_set ( 0 , dec_nsec ) ,
HRTIMER_MODE_REL ) ;
2009-10-30 05:47:16 +00:00
vcpu - > arch . dec_jiffies = get_tb ( ) ;
2008-04-16 23:28:09 -05:00
} else {
2009-11-02 12:02:31 +00:00
hrtimer_try_to_cancel ( & vcpu - > arch . dec_timer ) ;
2008-04-16 23:28:09 -05:00
}
}
2011-04-27 17:24:21 -05:00
u32 kvmppc_get_dec ( struct kvm_vcpu * vcpu , u64 tb )
{
u64 jd = tb - vcpu - > arch . dec_jiffies ;
return vcpu - > arch . dec - jd ;
}
2008-04-16 23:28:09 -05:00
/* XXX to do:
* lhax
* lhaux
* lswx
* lswi
* stswx
* stswi
* lha
* lhau
* lmw
* stmw
*
* XXX is_bigendian should depend on MMU mapping or MSR [ LE ]
*/
2008-11-05 09:36:16 -06:00
/* XXX Should probably auto-generate instruction decoding for a particular core
* from opcode tables in the future . */
2008-04-16 23:28:09 -05:00
int kvmppc_emulate_instruction ( struct kvm_run * run , struct kvm_vcpu * vcpu )
{
2010-04-16 00:11:40 +02:00
u32 inst = kvmppc_get_last_inst ( vcpu ) ;
2008-04-16 23:28:09 -05:00
u32 ea ;
int ra ;
int rb ;
int rs ;
int rt ;
int sprn ;
enum emulation_result emulated = EMULATE_DONE ;
int advance = 1 ;
2008-12-02 15:51:57 -06:00
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type ( vcpu , EMULATED_INST_EXITS ) ;
2010-09-11 19:10:53 +00:00
pr_debug ( " Emulating opcode %d / %d \n " , get_op ( inst ) , get_xop ( inst ) ) ;
2009-10-30 05:47:16 +00:00
2008-04-16 23:28:09 -05:00
switch ( get_op ( inst ) ) {
2009-01-03 16:23:05 -06:00
case OP_TRAP :
2010-04-16 00:11:42 +02:00
# ifdef CONFIG_PPC_BOOK3S
2009-10-30 05:47:16 +00:00
case OP_TRAP_64 :
2010-02-02 19:44:35 +08:00
kvmppc_core_queue_program ( vcpu , SRR1_PROGTRAP ) ;
2009-10-30 05:47:16 +00:00
# else
2010-02-02 19:44:35 +08:00
kvmppc_core_queue_program ( vcpu , vcpu - > arch . esr | ESR_PTR ) ;
2009-10-30 05:47:16 +00:00
# endif
2008-04-16 23:28:09 -05:00
advance = 0 ;
break ;
case 31 :
switch ( get_xop ( inst ) ) {
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LWZX :
2008-05-21 18:22:52 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 4 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LBZX :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 1 , 1 ) ;
break ;
2010-03-24 21:48:27 +01:00
case OP_31_XOP_LBZUX :
rt = get_rt ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
ea = kvmppc_get_gpr ( vcpu , rb ) ;
if ( ra )
ea + = kvmppc_get_gpr ( vcpu , ra ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 1 , 1 ) ;
kvmppc_set_gpr ( vcpu , ra , ea ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STWX :
2008-05-21 18:22:52 -05:00
rs = get_rs ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu ,
2010-01-08 02:58:01 +01:00
kvmppc_get_gpr ( vcpu , rs ) ,
2008-05-21 18:22:52 -05:00
4 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STBX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu ,
2010-01-08 02:58:01 +01:00
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
1 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STBUX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
2010-01-08 02:58:01 +01:00
ea = kvmppc_get_gpr ( vcpu , rb ) ;
2008-04-16 23:28:09 -05:00
if ( ra )
2010-01-08 02:58:01 +01:00
ea + = kvmppc_get_gpr ( vcpu , ra ) ;
2008-04-16 23:28:09 -05:00
emulated = kvmppc_handle_store ( run , vcpu ,
2010-01-08 02:58:01 +01:00
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
1 , 1 ) ;
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , rs , ea ) ;
2008-04-16 23:28:09 -05:00
break ;
2010-03-24 21:48:27 +01:00
case OP_31_XOP_LHAX :
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_loads ( run , vcpu , rt , 2 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LHZX :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 2 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LHZUX :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
2010-01-08 02:58:01 +01:00
ea = kvmppc_get_gpr ( vcpu , rb ) ;
2008-04-16 23:28:09 -05:00
if ( ra )
2010-01-08 02:58:01 +01:00
ea + = kvmppc_get_gpr ( vcpu , ra ) ;
2008-04-16 23:28:09 -05:00
emulated = kvmppc_handle_load ( run , vcpu , rt , 2 , 1 ) ;
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , ra , ea ) ;
2008-04-16 23:28:09 -05:00
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_MFSPR :
2008-04-16 23:28:09 -05:00
sprn = get_sprn ( inst ) ;
rt = get_rt ( inst ) ;
switch ( sprn ) {
case SPRN_SRR0 :
2010-07-29 14:47:46 +02:00
kvmppc_set_gpr ( vcpu , rt , vcpu - > arch . shared - > srr0 ) ;
break ;
2008-04-16 23:28:09 -05:00
case SPRN_SRR1 :
2010-07-29 14:47:46 +02:00
kvmppc_set_gpr ( vcpu , rt , vcpu - > arch . shared - > srr1 ) ;
break ;
2008-04-16 23:28:09 -05:00
case SPRN_PVR :
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , rt , vcpu - > arch . pvr ) ; break ;
2009-06-05 14:54:31 +08:00
case SPRN_PIR :
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , rt , vcpu - > vcpu_id ) ; break ;
2009-10-30 05:47:16 +00:00
case SPRN_MSSSR0 :
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , rt , 0 ) ; break ;
2008-04-16 23:28:09 -05:00
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways .
* In fact , we probably will never see these traps . */
case SPRN_TBWL :
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , rt , get_tb ( ) > > 32 ) ; break ;
2008-04-16 23:28:09 -05:00
case SPRN_TBWU :
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , rt , get_tb ( ) ) ; break ;
2008-04-16 23:28:09 -05:00
case SPRN_SPRG0 :
2010-07-29 14:47:47 +02:00
kvmppc_set_gpr ( vcpu , rt , vcpu - > arch . shared - > sprg0 ) ;
break ;
2008-04-16 23:28:09 -05:00
case SPRN_SPRG1 :
2010-07-29 14:47:47 +02:00
kvmppc_set_gpr ( vcpu , rt , vcpu - > arch . shared - > sprg1 ) ;
break ;
2008-04-16 23:28:09 -05:00
case SPRN_SPRG2 :
2010-07-29 14:47:47 +02:00
kvmppc_set_gpr ( vcpu , rt , vcpu - > arch . shared - > sprg2 ) ;
break ;
2008-04-16 23:28:09 -05:00
case SPRN_SPRG3 :
2010-07-29 14:47:47 +02:00
kvmppc_set_gpr ( vcpu , rt , vcpu - > arch . shared - > sprg3 ) ;
break ;
2008-04-16 23:28:09 -05:00
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap . */
2009-10-30 05:47:15 +00:00
case SPRN_DEC :
{
2011-04-27 17:24:21 -05:00
kvmppc_set_gpr ( vcpu , rt ,
kvmppc_get_dec ( vcpu , get_tb ( ) ) ) ;
2009-10-30 05:47:15 +00:00
break ;
}
2008-04-16 23:28:09 -05:00
default :
2008-11-05 09:36:16 -06:00
emulated = kvmppc_core_emulate_mfspr ( vcpu , sprn , rt ) ;
if ( emulated = = EMULATE_FAIL ) {
printk ( " mfspr: unknown spr %x \n " , sprn ) ;
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , rt , 0 ) ;
2008-11-05 09:36:16 -06:00
}
2008-04-16 23:28:09 -05:00
break ;
}
2011-03-28 15:01:24 -05:00
kvmppc_set_exit_type ( vcpu , EMULATED_MFSPR_EXITS ) ;
2008-04-16 23:28:09 -05:00
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STHX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu ,
2010-01-08 02:58:01 +01:00
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
2 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STHUX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
2010-01-08 02:58:01 +01:00
ea = kvmppc_get_gpr ( vcpu , rb ) ;
2008-04-16 23:28:09 -05:00
if ( ra )
2010-01-08 02:58:01 +01:00
ea + = kvmppc_get_gpr ( vcpu , ra ) ;
2008-04-16 23:28:09 -05:00
emulated = kvmppc_handle_store ( run , vcpu ,
2010-01-08 02:58:01 +01:00
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
2 , 1 ) ;
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , ra , ea ) ;
2008-04-16 23:28:09 -05:00
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_MTSPR :
2008-04-16 23:28:09 -05:00
sprn = get_sprn ( inst ) ;
rs = get_rs ( inst ) ;
switch ( sprn ) {
case SPRN_SRR0 :
2010-07-29 14:47:46 +02:00
vcpu - > arch . shared - > srr0 = kvmppc_get_gpr ( vcpu , rs ) ;
break ;
2008-04-16 23:28:09 -05:00
case SPRN_SRR1 :
2010-07-29 14:47:46 +02:00
vcpu - > arch . shared - > srr1 = kvmppc_get_gpr ( vcpu , rs ) ;
break ;
2008-04-16 23:28:09 -05:00
/* XXX We need to context-switch the timebase for
* watchdog and FIT . */
case SPRN_TBWL : break ;
case SPRN_TBWU : break ;
2009-10-30 05:47:16 +00:00
case SPRN_MSSSR0 : break ;
2008-04-16 23:28:09 -05:00
case SPRN_DEC :
2010-01-08 02:58:01 +01:00
vcpu - > arch . dec = kvmppc_get_gpr ( vcpu , rs ) ;
2008-04-16 23:28:09 -05:00
kvmppc_emulate_dec ( vcpu ) ;
break ;
case SPRN_SPRG0 :
2010-07-29 14:47:47 +02:00
vcpu - > arch . shared - > sprg0 = kvmppc_get_gpr ( vcpu , rs ) ;
break ;
2008-04-16 23:28:09 -05:00
case SPRN_SPRG1 :
2010-07-29 14:47:47 +02:00
vcpu - > arch . shared - > sprg1 = kvmppc_get_gpr ( vcpu , rs ) ;
break ;
2008-04-16 23:28:09 -05:00
case SPRN_SPRG2 :
2010-07-29 14:47:47 +02:00
vcpu - > arch . shared - > sprg2 = kvmppc_get_gpr ( vcpu , rs ) ;
break ;
2008-04-16 23:28:09 -05:00
case SPRN_SPRG3 :
2010-07-29 14:47:47 +02:00
vcpu - > arch . shared - > sprg3 = kvmppc_get_gpr ( vcpu , rs ) ;
break ;
2008-04-16 23:28:09 -05:00
default :
2008-11-05 09:36:16 -06:00
emulated = kvmppc_core_emulate_mtspr ( vcpu , sprn , rs ) ;
if ( emulated = = EMULATE_FAIL )
printk ( " mtspr: unknown spr %x \n " , sprn ) ;
2008-04-16 23:28:09 -05:00
break ;
}
2011-03-28 15:01:24 -05:00
kvmppc_set_exit_type ( vcpu , EMULATED_MTSPR_EXITS ) ;
2008-04-16 23:28:09 -05:00
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_DCBI :
2008-04-16 23:28:09 -05:00
/* Do nothing. The guest is performing dcbi because
* hardware DMA is not snooped by the dcache , but
* emulated DMA either goes through the dcache as
* normal writes , or the host kernel has handled dcache
* coherence . */
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LWBRX :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 4 , 0 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_TLBSYNC :
2008-04-16 23:28:09 -05:00
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STWBRX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu ,
2010-01-08 02:58:01 +01:00
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
4 , 0 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_LHBRX :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 2 , 0 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_31_XOP_STHBRX :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
ra = get_ra ( inst ) ;
rb = get_rb ( inst ) ;
emulated = kvmppc_handle_store ( run , vcpu ,
2010-01-08 02:58:01 +01:00
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
2 , 0 ) ;
break ;
default :
2008-11-05 09:36:16 -06:00
/* Attempt core-specific emulation below. */
2008-04-16 23:28:09 -05:00
emulated = EMULATE_FAIL ;
}
break ;
2009-01-03 16:23:05 -06:00
case OP_LWZ :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 4 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_LWZU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 4 , 1 ) ;
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , ra , vcpu - > arch . paddr_accessed ) ;
2008-04-16 23:28:09 -05:00
break ;
2009-01-03 16:23:05 -06:00
case OP_LBZ :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 1 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_LBZU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 1 , 1 ) ;
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , ra , vcpu - > arch . paddr_accessed ) ;
2008-04-16 23:28:09 -05:00
break ;
2009-01-03 16:23:05 -06:00
case OP_STW :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
2010-01-08 02:58:01 +01:00
emulated = kvmppc_handle_store ( run , vcpu ,
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
4 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_STWU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rs = get_rs ( inst ) ;
2010-01-08 02:58:01 +01:00
emulated = kvmppc_handle_store ( run , vcpu ,
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
4 , 1 ) ;
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , ra , vcpu - > arch . paddr_accessed ) ;
2008-04-16 23:28:09 -05:00
break ;
2009-01-03 16:23:05 -06:00
case OP_STB :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
2010-01-08 02:58:01 +01:00
emulated = kvmppc_handle_store ( run , vcpu ,
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
1 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_STBU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rs = get_rs ( inst ) ;
2010-01-08 02:58:01 +01:00
emulated = kvmppc_handle_store ( run , vcpu ,
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
1 , 1 ) ;
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , ra , vcpu - > arch . paddr_accessed ) ;
2008-04-16 23:28:09 -05:00
break ;
2009-01-03 16:23:05 -06:00
case OP_LHZ :
2008-04-16 23:28:09 -05:00
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 2 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_LHZU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_load ( run , vcpu , rt , 2 , 1 ) ;
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , ra , vcpu - > arch . paddr_accessed ) ;
2008-04-16 23:28:09 -05:00
break ;
2010-02-19 11:00:30 +01:00
case OP_LHA :
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_loads ( run , vcpu , rt , 2 , 1 ) ;
break ;
case OP_LHAU :
ra = get_ra ( inst ) ;
rt = get_rt ( inst ) ;
emulated = kvmppc_handle_loads ( run , vcpu , rt , 2 , 1 ) ;
kvmppc_set_gpr ( vcpu , ra , vcpu - > arch . paddr_accessed ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_STH :
2008-04-16 23:28:09 -05:00
rs = get_rs ( inst ) ;
2010-01-08 02:58:01 +01:00
emulated = kvmppc_handle_store ( run , vcpu ,
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
2 , 1 ) ;
break ;
2009-01-03 16:23:05 -06:00
case OP_STHU :
2008-04-16 23:28:09 -05:00
ra = get_ra ( inst ) ;
rs = get_rs ( inst ) ;
2010-01-08 02:58:01 +01:00
emulated = kvmppc_handle_store ( run , vcpu ,
kvmppc_get_gpr ( vcpu , rs ) ,
2008-04-16 23:28:09 -05:00
2 , 1 ) ;
2010-01-08 02:58:01 +01:00
kvmppc_set_gpr ( vcpu , ra , vcpu - > arch . paddr_accessed ) ;
2008-04-16 23:28:09 -05:00
break ;
default :
emulated = EMULATE_FAIL ;
2008-11-05 09:36:16 -06:00
}
if ( emulated = = EMULATE_FAIL ) {
emulated = kvmppc_core_emulate_op ( run , vcpu , inst , & advance ) ;
2010-02-19 11:00:31 +01:00
if ( emulated = = EMULATE_AGAIN ) {
advance = 0 ;
} else if ( emulated = = EMULATE_FAIL ) {
2008-11-05 09:36:16 -06:00
advance = 0 ;
printk ( KERN_ERR " Couldn't emulate instruction 0x%08x "
" (op %d xop %d) \n " , inst , get_op ( inst ) , get_xop ( inst ) ) ;
2010-01-10 03:27:32 +01:00
kvmppc_core_queue_program ( vcpu , 0 ) ;
2008-11-05 09:36:16 -06:00
}
2008-04-16 23:28:09 -05:00
}
2010-04-16 00:11:40 +02:00
trace_kvm_ppc_instr ( inst , kvmppc_get_pc ( vcpu ) , emulated ) ;
2008-07-14 14:00:04 +02:00
2010-04-16 00:11:40 +02:00
/* Advance past emulated instruction. */
2008-04-16 23:28:09 -05:00
if ( advance )
2010-04-16 00:11:40 +02:00
kvmppc_set_pc ( vcpu , kvmppc_get_pc ( vcpu ) + 4 ) ;
2008-04-16 23:28:09 -05:00
return emulated ;
}