2009-10-30 08:47:14 +03:00
/*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License , version 2 , as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 , USA .
*
* Copyright SUSE Linux Products GmbH 2009
*
* Authors : Alexander Graf < agraf @ suse . de >
*/
# include <asm/kvm_ppc.h>
# include <asm/disassemble.h>
# include <asm/kvm_book3s.h>
# include <asm/reg.h>
2012-04-01 21:35:53 +04:00
# include <asm/switch_to.h>
2012-11-04 22:15:43 +04:00
# include <asm/time.h>
2009-10-30 08:47:14 +03:00
# define OP_19_XOP_RFID 18
# define OP_19_XOP_RFI 50
# define OP_31_XOP_MFMSR 83
# define OP_31_XOP_MTMSR 146
# define OP_31_XOP_MTMSRD 178
2010-02-19 13:00:37 +03:00
# define OP_31_XOP_MTSR 210
2009-10-30 08:47:14 +03:00
# define OP_31_XOP_MTSRIN 242
# define OP_31_XOP_TLBIEL 274
# define OP_31_XOP_TLBIE 306
2012-12-15 02:42:05 +04:00
/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
# define OP_31_XOP_FAKE_SC1 308
2009-10-30 08:47:14 +03:00
# define OP_31_XOP_SLBMTE 402
# define OP_31_XOP_SLBIE 434
# define OP_31_XOP_SLBIA 498
2010-03-24 23:48:24 +03:00
# define OP_31_XOP_MFSR 595
2009-10-30 08:47:14 +03:00
# define OP_31_XOP_MFSRIN 659
2010-03-24 23:48:33 +03:00
# define OP_31_XOP_DCBA 758
2009-10-30 08:47:14 +03:00
# define OP_31_XOP_SLBMFEV 851
# define OP_31_XOP_EIOIO 854
# define OP_31_XOP_SLBMFEE 915
/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
# define OP_31_XOP_DCBZ 1010
2010-03-24 23:48:28 +03:00
# define OP_LFS 48
# define OP_LFD 50
# define OP_STFS 52
# define OP_STFD 54
2010-02-19 13:00:33 +03:00
# define SPRN_GQR0 912
# define SPRN_GQR1 913
# define SPRN_GQR2 914
# define SPRN_GQR3 915
# define SPRN_GQR4 916
# define SPRN_GQR5 917
# define SPRN_GQR6 918
# define SPRN_GQR7 919
2010-04-16 02:11:53 +04:00
/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
* function pointers , so let ' s just disable the define . */
# undef mfsrin
2011-08-08 18:07:16 +04:00
enum priv_level {
PRIV_PROBLEM = 0 ,
PRIV_SUPER = 1 ,
PRIV_HYPER = 2 ,
} ;
static bool spr_allowed ( struct kvm_vcpu * vcpu , enum priv_level level )
{
/* PAPR VMs only access supervisor SPRs */
if ( vcpu - > arch . papr_enabled & & ( level > PRIV_SUPER ) )
return false ;
/* Limit user space to its own small SPR set */
2014-04-24 15:46:24 +04:00
if ( ( kvmppc_get_msr ( vcpu ) & MSR_PR ) & & level > PRIV_PROBLEM )
2011-08-08 18:07:16 +04:00
return false ;
return true ;
}
2013-10-07 20:47:53 +04:00
int kvmppc_core_emulate_op_pr ( struct kvm_run * run , struct kvm_vcpu * vcpu ,
unsigned int inst , int * advance )
2009-10-30 08:47:14 +03:00
{
int emulated = EMULATE_DONE ;
2012-05-04 16:01:33 +04:00
int rt = get_rt ( inst ) ;
int rs = get_rs ( inst ) ;
int ra = get_ra ( inst ) ;
int rb = get_rb ( inst ) ;
2014-05-13 19:05:51 +04:00
u32 inst_sc = 0x44000002 ;
2009-10-30 08:47:14 +03:00
switch ( get_op ( inst ) ) {
2014-05-13 19:05:51 +04:00
case 0 :
emulated = EMULATE_FAIL ;
if ( ( kvmppc_get_msr ( vcpu ) & MSR_LE ) & &
( inst = = swab32 ( inst_sc ) ) ) {
/*
* This is the byte reversed syscall instruction of our
* hypercall handler . Early versions of LE Linux didn ' t
* swap the instructions correctly and ended up in
* illegal instructions .
* Just always fail hypercalls on these broken systems .
*/
kvmppc_set_gpr ( vcpu , 3 , EV_UNIMPLEMENTED ) ;
kvmppc_set_pc ( vcpu , kvmppc_get_pc ( vcpu ) + 4 ) ;
emulated = EMULATE_DONE ;
}
break ;
2009-10-30 08:47:14 +03:00
case 19 :
switch ( get_xop ( inst ) ) {
case OP_19_XOP_RFID :
case OP_19_XOP_RFI :
2014-04-24 15:46:24 +04:00
kvmppc_set_pc ( vcpu , kvmppc_get_srr0 ( vcpu ) ) ;
kvmppc_set_msr ( vcpu , kvmppc_get_srr1 ( vcpu ) ) ;
2009-10-30 08:47:14 +03:00
* advance = 0 ;
break ;
default :
emulated = EMULATE_FAIL ;
break ;
}
break ;
case 31 :
switch ( get_xop ( inst ) ) {
case OP_31_XOP_MFMSR :
2014-04-24 15:46:24 +04:00
kvmppc_set_gpr ( vcpu , rt , kvmppc_get_msr ( vcpu ) ) ;
2009-10-30 08:47:14 +03:00
break ;
case OP_31_XOP_MTMSRD :
{
2012-05-04 16:01:33 +04:00
ulong rs_val = kvmppc_get_gpr ( vcpu , rs ) ;
2009-10-30 08:47:14 +03:00
if ( inst & 0x10000 ) {
2014-04-24 15:46:24 +04:00
ulong new_msr = kvmppc_get_msr ( vcpu ) ;
2012-05-04 16:01:33 +04:00
new_msr & = ~ ( MSR_RI | MSR_EE ) ;
new_msr | = rs_val & ( MSR_RI | MSR_EE ) ;
2014-04-24 15:46:24 +04:00
kvmppc_set_msr_fast ( vcpu , new_msr ) ;
2009-10-30 08:47:14 +03:00
} else
2012-05-04 16:01:33 +04:00
kvmppc_set_msr ( vcpu , rs_val ) ;
2009-10-30 08:47:14 +03:00
break ;
}
case OP_31_XOP_MTMSR :
2012-05-04 16:01:33 +04:00
kvmppc_set_msr ( vcpu , kvmppc_get_gpr ( vcpu , rs ) ) ;
2009-10-30 08:47:14 +03:00
break ;
2010-03-24 23:48:24 +03:00
case OP_31_XOP_MFSR :
{
int srnum ;
srnum = kvmppc_get_field ( inst , 12 + 32 , 15 + 32 ) ;
if ( vcpu - > arch . mmu . mfsrin ) {
u32 sr ;
sr = vcpu - > arch . mmu . mfsrin ( vcpu , srnum ) ;
2012-05-04 16:01:33 +04:00
kvmppc_set_gpr ( vcpu , rt , sr ) ;
2010-03-24 23:48:24 +03:00
}
break ;
}
2009-10-30 08:47:14 +03:00
case OP_31_XOP_MFSRIN :
{
int srnum ;
2012-05-04 16:01:33 +04:00
srnum = ( kvmppc_get_gpr ( vcpu , rb ) > > 28 ) & 0xf ;
2009-10-30 08:47:14 +03:00
if ( vcpu - > arch . mmu . mfsrin ) {
u32 sr ;
sr = vcpu - > arch . mmu . mfsrin ( vcpu , srnum ) ;
2012-05-04 16:01:33 +04:00
kvmppc_set_gpr ( vcpu , rt , sr ) ;
2009-10-30 08:47:14 +03:00
}
break ;
}
2010-02-19 13:00:37 +03:00
case OP_31_XOP_MTSR :
vcpu - > arch . mmu . mtsrin ( vcpu ,
( inst > > 16 ) & 0xf ,
2012-05-04 16:01:33 +04:00
kvmppc_get_gpr ( vcpu , rs ) ) ;
2010-02-19 13:00:37 +03:00
break ;
2009-10-30 08:47:14 +03:00
case OP_31_XOP_MTSRIN :
vcpu - > arch . mmu . mtsrin ( vcpu ,
2012-05-04 16:01:33 +04:00
( kvmppc_get_gpr ( vcpu , rb ) > > 28 ) & 0xf ,
kvmppc_get_gpr ( vcpu , rs ) ) ;
2009-10-30 08:47:14 +03:00
break ;
case OP_31_XOP_TLBIE :
case OP_31_XOP_TLBIEL :
{
bool large = ( inst & 0x00200000 ) ? true : false ;
2012-05-04 16:01:33 +04:00
ulong addr = kvmppc_get_gpr ( vcpu , rb ) ;
2009-10-30 08:47:14 +03:00
vcpu - > arch . mmu . tlbie ( vcpu , addr , large ) ;
break ;
}
2013-10-07 20:47:59 +04:00
# ifdef CONFIG_PPC_BOOK3S_64
2012-12-15 02:42:05 +04:00
case OP_31_XOP_FAKE_SC1 :
{
/* SC 1 papr hypercalls */
ulong cmd = kvmppc_get_gpr ( vcpu , 3 ) ;
int i ;
2014-04-24 15:46:24 +04:00
if ( ( kvmppc_get_msr ( vcpu ) & MSR_PR ) | |
2012-12-15 02:42:05 +04:00
! vcpu - > arch . papr_enabled ) {
emulated = EMULATE_FAIL ;
break ;
}
if ( kvmppc_h_pr ( vcpu , cmd ) = = EMULATE_DONE )
break ;
run - > papr_hcall . nr = cmd ;
for ( i = 0 ; i < 9 ; + + i ) {
ulong gpr = kvmppc_get_gpr ( vcpu , 4 + i ) ;
run - > papr_hcall . args [ i ] = gpr ;
}
2013-04-08 04:32:14 +04:00
run - > exit_reason = KVM_EXIT_PAPR_HCALL ;
vcpu - > arch . hcall_needed = 1 ;
2013-04-08 04:32:13 +04:00
emulated = EMULATE_EXIT_USER ;
2012-12-15 02:42:05 +04:00
break ;
}
# endif
2009-10-30 08:47:14 +03:00
case OP_31_XOP_EIOIO :
break ;
case OP_31_XOP_SLBMTE :
if ( ! vcpu - > arch . mmu . slbmte )
return EMULATE_FAIL ;
2010-01-08 04:58:01 +03:00
vcpu - > arch . mmu . slbmte ( vcpu ,
2012-05-04 16:01:33 +04:00
kvmppc_get_gpr ( vcpu , rs ) ,
kvmppc_get_gpr ( vcpu , rb ) ) ;
2009-10-30 08:47:14 +03:00
break ;
case OP_31_XOP_SLBIE :
if ( ! vcpu - > arch . mmu . slbie )
return EMULATE_FAIL ;
2010-01-08 04:58:01 +03:00
vcpu - > arch . mmu . slbie ( vcpu ,
2012-05-04 16:01:33 +04:00
kvmppc_get_gpr ( vcpu , rb ) ) ;
2009-10-30 08:47:14 +03:00
break ;
case OP_31_XOP_SLBIA :
if ( ! vcpu - > arch . mmu . slbia )
return EMULATE_FAIL ;
vcpu - > arch . mmu . slbia ( vcpu ) ;
break ;
case OP_31_XOP_SLBMFEE :
if ( ! vcpu - > arch . mmu . slbmfee ) {
emulated = EMULATE_FAIL ;
} else {
2012-05-04 16:01:33 +04:00
ulong t , rb_val ;
2009-10-30 08:47:14 +03:00
2012-05-04 16:01:33 +04:00
rb_val = kvmppc_get_gpr ( vcpu , rb ) ;
t = vcpu - > arch . mmu . slbmfee ( vcpu , rb_val ) ;
kvmppc_set_gpr ( vcpu , rt , t ) ;
2009-10-30 08:47:14 +03:00
}
break ;
case OP_31_XOP_SLBMFEV :
if ( ! vcpu - > arch . mmu . slbmfev ) {
emulated = EMULATE_FAIL ;
} else {
2012-05-04 16:01:33 +04:00
ulong t , rb_val ;
2009-10-30 08:47:14 +03:00
2012-05-04 16:01:33 +04:00
rb_val = kvmppc_get_gpr ( vcpu , rb ) ;
t = vcpu - > arch . mmu . slbmfev ( vcpu , rb_val ) ;
kvmppc_set_gpr ( vcpu , rt , t ) ;
2009-10-30 08:47:14 +03:00
}
break ;
2010-03-24 23:48:33 +03:00
case OP_31_XOP_DCBA :
/* Gets treated as NOP */
break ;
2009-10-30 08:47:14 +03:00
case OP_31_XOP_DCBZ :
{
2012-05-04 16:01:33 +04:00
ulong rb_val = kvmppc_get_gpr ( vcpu , rb ) ;
ulong ra_val = 0 ;
2010-02-19 13:00:38 +03:00
ulong addr , vaddr ;
2009-10-30 08:47:14 +03:00
u32 zeros [ 8 ] = { 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 } ;
2010-03-24 23:48:32 +03:00
u32 dsisr ;
int r ;
2009-10-30 08:47:14 +03:00
2012-05-04 16:01:33 +04:00
if ( ra )
ra_val = kvmppc_get_gpr ( vcpu , ra ) ;
2009-10-30 08:47:14 +03:00
2012-05-04 16:01:33 +04:00
addr = ( ra_val + rb_val ) & ~ 31ULL ;
2014-04-24 15:46:24 +04:00
if ( ! ( kvmppc_get_msr ( vcpu ) & MSR_SF ) )
2009-10-30 08:47:14 +03:00
addr & = 0xffffffff ;
2010-02-19 13:00:38 +03:00
vaddr = addr ;
2009-10-30 08:47:14 +03:00
2010-03-24 23:48:32 +03:00
r = kvmppc_st ( vcpu , & addr , 32 , zeros , true ) ;
if ( ( r = = - ENOENT ) | | ( r = = - EPERM ) ) {
* advance = 0 ;
2014-04-24 15:46:24 +04:00
kvmppc_set_dar ( vcpu , vaddr ) ;
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 08:52:43 +04:00
vcpu - > arch . fault_dar = vaddr ;
2010-03-24 23:48:32 +03:00
dsisr = DSISR_ISSTORE ;
if ( r = = - ENOENT )
dsisr | = DSISR_NOHPTE ;
else if ( r = = - EPERM )
dsisr | = DSISR_PROTFAULT ;
2014-04-24 15:46:24 +04:00
kvmppc_set_dsisr ( vcpu , dsisr ) ;
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 08:52:43 +04:00
vcpu - > arch . fault_dsisr = dsisr ;
2010-03-24 23:48:32 +03:00
2009-10-30 08:47:14 +03:00
kvmppc_book3s_queue_irqprio ( vcpu ,
BOOK3S_INTERRUPT_DATA_STORAGE ) ;
}
break ;
}
default :
emulated = EMULATE_FAIL ;
}
break ;
default :
emulated = EMULATE_FAIL ;
}
2010-02-19 13:00:44 +03:00
if ( emulated = = EMULATE_FAIL )
emulated = kvmppc_emulate_paired_single ( run , vcpu ) ;
2009-10-30 08:47:14 +03:00
return emulated ;
}
2009-11-30 06:02:02 +03:00
void kvmppc_set_bat ( struct kvm_vcpu * vcpu , struct kvmppc_bat * bat , bool upper ,
u32 val )
{
if ( upper ) {
/* Upper BAT */
u32 bl = ( val > > 2 ) & 0x7ff ;
bat - > bepi_mask = ( ~ bl < < 17 ) ;
bat - > bepi = val & 0xfffe0000 ;
bat - > vs = ( val & 2 ) ? 1 : 0 ;
bat - > vp = ( val & 1 ) ? 1 : 0 ;
bat - > raw = ( bat - > raw & 0xffffffff00000000ULL ) | val ;
} else {
/* Lower BAT */
bat - > brpn = val & 0xfffe0000 ;
bat - > wimg = ( val > > 3 ) & 0xf ;
bat - > pp = val & 3 ;
bat - > raw = ( bat - > raw & 0x00000000ffffffffULL ) | ( ( u64 ) val < < 32 ) ;
}
}
2010-08-03 01:23:04 +04:00
static struct kvmppc_bat * kvmppc_find_bat ( struct kvm_vcpu * vcpu , int sprn )
2010-03-24 23:48:25 +03:00
{
struct kvmppc_vcpu_book3s * vcpu_book3s = to_book3s ( vcpu ) ;
struct kvmppc_bat * bat ;
switch ( sprn ) {
case SPRN_IBAT0U . . . SPRN_IBAT3L :
bat = & vcpu_book3s - > ibat [ ( sprn - SPRN_IBAT0U ) / 2 ] ;
break ;
case SPRN_IBAT4U . . . SPRN_IBAT7L :
bat = & vcpu_book3s - > ibat [ 4 + ( ( sprn - SPRN_IBAT4U ) / 2 ) ] ;
break ;
case SPRN_DBAT0U . . . SPRN_DBAT3L :
bat = & vcpu_book3s - > dbat [ ( sprn - SPRN_DBAT0U ) / 2 ] ;
break ;
case SPRN_DBAT4U . . . SPRN_DBAT7L :
bat = & vcpu_book3s - > dbat [ 4 + ( ( sprn - SPRN_DBAT4U ) / 2 ) ] ;
break ;
default :
BUG ( ) ;
}
2010-08-03 01:23:04 +04:00
return bat ;
2009-10-30 08:47:14 +03:00
}
2013-10-07 20:47:53 +04:00
int kvmppc_core_emulate_mtspr_pr ( struct kvm_vcpu * vcpu , int sprn , ulong spr_val )
2009-10-30 08:47:14 +03:00
{
int emulated = EMULATE_DONE ;
switch ( sprn ) {
case SPRN_SDR1 :
2011-08-08 18:07:16 +04:00
if ( ! spr_allowed ( vcpu , PRIV_HYPER ) )
goto unprivileged ;
2010-01-08 04:58:01 +03:00
to_book3s ( vcpu ) - > sdr1 = spr_val ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_DSISR :
2014-04-24 15:46:24 +04:00
kvmppc_set_dsisr ( vcpu , spr_val ) ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_DAR :
2014-04-24 15:46:24 +04:00
kvmppc_set_dar ( vcpu , spr_val ) ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_HIOR :
2010-01-08 04:58:01 +03:00
to_book3s ( vcpu ) - > hior = spr_val ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_IBAT0U . . . SPRN_IBAT3L :
case SPRN_IBAT4U . . . SPRN_IBAT7L :
case SPRN_DBAT0U . . . SPRN_DBAT3L :
case SPRN_DBAT4U . . . SPRN_DBAT7L :
2010-08-03 01:23:04 +04:00
{
struct kvmppc_bat * bat = kvmppc_find_bat ( vcpu , sprn ) ;
kvmppc_set_bat ( vcpu , bat , ! ( sprn % 2 ) , ( u32 ) spr_val ) ;
2009-10-30 08:47:14 +03:00
/* BAT writes happen so rarely that we're ok to flush
* everything here */
kvmppc_mmu_pte_flush ( vcpu , 0 , 0 ) ;
2010-03-24 23:48:25 +03:00
kvmppc_mmu_flush_segments ( vcpu ) ;
2009-10-30 08:47:14 +03:00
break ;
2010-08-03 01:23:04 +04:00
}
2009-10-30 08:47:14 +03:00
case SPRN_HID0 :
2010-01-08 04:58:01 +03:00
to_book3s ( vcpu ) - > hid [ 0 ] = spr_val ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_HID1 :
2010-01-08 04:58:01 +03:00
to_book3s ( vcpu ) - > hid [ 1 ] = spr_val ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_HID2 :
2010-01-08 04:58:01 +03:00
to_book3s ( vcpu ) - > hid [ 2 ] = spr_val ;
2009-10-30 08:47:14 +03:00
break ;
2010-02-19 13:00:33 +03:00
case SPRN_HID2_GEKKO :
to_book3s ( vcpu ) - > hid [ 2 ] = spr_val ;
/* HID2.PSE controls paired single on gekko */
switch ( vcpu - > arch . pvr ) {
case 0x00080200 : /* lonestar 2.0 */
case 0x00088202 : /* lonestar 2.2 */
case 0x70000100 : /* gekko 1.0 */
case 0x00080100 : /* gekko 2.0 */
case 0x00083203 : /* gekko 2.3a */
case 0x00083213 : /* gekko 2.3b */
case 0x00083204 : /* gekko 2.4 */
case 0x00083214 : /* gekko 2.4e (8SE) - retail HW2 */
2010-04-20 04:49:54 +04:00
case 0x00087200 : /* broadway */
if ( vcpu - > arch . hflags & BOOK3S_HFLAG_NATIVE_PS ) {
/* Native paired singles */
} else if ( spr_val & ( 1 < < 29 ) ) { /* HID2.PSE */
2010-02-19 13:00:33 +03:00
vcpu - > arch . hflags | = BOOK3S_HFLAG_PAIRED_SINGLE ;
kvmppc_giveup_ext ( vcpu , MSR_FP ) ;
} else {
vcpu - > arch . hflags & = ~ BOOK3S_HFLAG_PAIRED_SINGLE ;
}
break ;
}
break ;
2009-10-30 08:47:14 +03:00
case SPRN_HID4 :
2010-02-19 13:00:33 +03:00
case SPRN_HID4_GEKKO :
2010-01-08 04:58:01 +03:00
to_book3s ( vcpu ) - > hid [ 4 ] = spr_val ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_HID5 :
2010-01-08 04:58:01 +03:00
to_book3s ( vcpu ) - > hid [ 5 ] = spr_val ;
2009-10-30 08:47:14 +03:00
/* guest HID5 set can change is_dcbz32 */
if ( vcpu - > arch . mmu . is_dcbz32 ( vcpu ) & &
( mfmsr ( ) & MSR_HV ) )
vcpu - > arch . hflags | = BOOK3S_HFLAG_DCBZ32 ;
break ;
2010-02-19 13:00:33 +03:00
case SPRN_GQR0 :
case SPRN_GQR1 :
case SPRN_GQR2 :
case SPRN_GQR3 :
case SPRN_GQR4 :
case SPRN_GQR5 :
case SPRN_GQR6 :
case SPRN_GQR7 :
to_book3s ( vcpu ) - > gqr [ sprn - SPRN_GQR0 ] = spr_val ;
break ;
2014-04-29 18:48:44 +04:00
case SPRN_FSCR :
vcpu - > arch . fscr = spr_val ;
break ;
2014-04-29 15:36:21 +04:00
# ifdef CONFIG_PPC_BOOK3S_64
case SPRN_BESCR :
vcpu - > arch . bescr = spr_val ;
break ;
case SPRN_EBBHR :
vcpu - > arch . ebbhr = spr_val ;
break ;
case SPRN_EBBRR :
vcpu - > arch . ebbrr = spr_val ;
break ;
2014-04-29 19:54:40 +04:00
# ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case SPRN_TFHAR :
vcpu - > arch . tfhar = spr_val ;
break ;
case SPRN_TEXASR :
vcpu - > arch . texasr = spr_val ;
break ;
case SPRN_TFIAR :
vcpu - > arch . tfiar = spr_val ;
break ;
# endif
2014-04-29 15:36:21 +04:00
# endif
2009-10-30 08:47:14 +03:00
case SPRN_ICTC :
case SPRN_THRM1 :
case SPRN_THRM2 :
case SPRN_THRM3 :
case SPRN_CTRLF :
case SPRN_CTRLT :
2010-02-19 13:00:33 +03:00
case SPRN_L2CR :
2012-11-04 22:15:43 +04:00
case SPRN_DSCR :
2010-02-19 13:00:33 +03:00
case SPRN_MMCR0_GEKKO :
case SPRN_MMCR1_GEKKO :
case SPRN_PMC1_GEKKO :
case SPRN_PMC2_GEKKO :
case SPRN_PMC3_GEKKO :
case SPRN_PMC4_GEKKO :
case SPRN_WPAR_GEKKO :
2012-12-20 08:52:39 +04:00
case SPRN_MSSSR0 :
2013-07-02 18:15:10 +04:00
case SPRN_DABR :
2014-04-22 14:41:06 +04:00
# ifdef CONFIG_PPC_BOOK3S_64
case SPRN_MMCRS :
case SPRN_MMCRA :
case SPRN_MMCR0 :
case SPRN_MMCR1 :
case SPRN_MMCR2 :
# endif
2009-10-30 08:47:14 +03:00
break ;
2011-08-08 18:07:16 +04:00
unprivileged :
2009-10-30 08:47:14 +03:00
default :
printk ( KERN_INFO " KVM: invalid SPR write: %d \n " , sprn ) ;
# ifndef DEBUG_SPR
emulated = EMULATE_FAIL ;
# endif
break ;
}
return emulated ;
}
2013-10-07 20:47:53 +04:00
int kvmppc_core_emulate_mfspr_pr ( struct kvm_vcpu * vcpu , int sprn , ulong * spr_val )
2009-10-30 08:47:14 +03:00
{
int emulated = EMULATE_DONE ;
switch ( sprn ) {
2010-03-24 23:48:25 +03:00
case SPRN_IBAT0U . . . SPRN_IBAT3L :
case SPRN_IBAT4U . . . SPRN_IBAT7L :
case SPRN_DBAT0U . . . SPRN_DBAT3L :
case SPRN_DBAT4U . . . SPRN_DBAT7L :
2010-08-03 01:23:04 +04:00
{
struct kvmppc_bat * bat = kvmppc_find_bat ( vcpu , sprn ) ;
if ( sprn % 2 )
2012-05-04 16:55:12 +04:00
* spr_val = bat - > raw > > 32 ;
2010-08-03 01:23:04 +04:00
else
2012-05-04 16:55:12 +04:00
* spr_val = bat - > raw ;
2010-08-03 01:23:04 +04:00
2010-03-24 23:48:25 +03:00
break ;
2010-08-03 01:23:04 +04:00
}
2009-10-30 08:47:14 +03:00
case SPRN_SDR1 :
2011-08-08 18:07:16 +04:00
if ( ! spr_allowed ( vcpu , PRIV_HYPER ) )
goto unprivileged ;
2012-05-04 16:55:12 +04:00
* spr_val = to_book3s ( vcpu ) - > sdr1 ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_DSISR :
2014-04-24 15:46:24 +04:00
* spr_val = kvmppc_get_dsisr ( vcpu ) ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_DAR :
2014-04-24 15:46:24 +04:00
* spr_val = kvmppc_get_dar ( vcpu ) ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_HIOR :
2012-05-04 16:55:12 +04:00
* spr_val = to_book3s ( vcpu ) - > hior ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_HID0 :
2012-05-04 16:55:12 +04:00
* spr_val = to_book3s ( vcpu ) - > hid [ 0 ] ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_HID1 :
2012-05-04 16:55:12 +04:00
* spr_val = to_book3s ( vcpu ) - > hid [ 1 ] ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_HID2 :
2010-02-19 13:00:33 +03:00
case SPRN_HID2_GEKKO :
2012-05-04 16:55:12 +04:00
* spr_val = to_book3s ( vcpu ) - > hid [ 2 ] ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_HID4 :
2010-02-19 13:00:33 +03:00
case SPRN_HID4_GEKKO :
2012-05-04 16:55:12 +04:00
* spr_val = to_book3s ( vcpu ) - > hid [ 4 ] ;
2009-10-30 08:47:14 +03:00
break ;
case SPRN_HID5 :
2012-05-04 16:55:12 +04:00
* spr_val = to_book3s ( vcpu ) - > hid [ 5 ] ;
2009-10-30 08:47:14 +03:00
break ;
2011-08-08 19:22:59 +04:00
case SPRN_CFAR :
2012-11-04 22:15:43 +04:00
case SPRN_DSCR :
2012-05-04 16:55:12 +04:00
* spr_val = 0 ;
2011-08-08 19:22:59 +04:00
break ;
2012-11-04 22:15:43 +04:00
case SPRN_PURR :
2014-06-04 15:17:55 +04:00
/*
* On exit we would have updated purr
*/
* spr_val = vcpu - > arch . purr ;
2012-11-04 22:15:43 +04:00
break ;
case SPRN_SPURR :
2014-06-04 15:17:55 +04:00
/*
* On exit we would have updated spurr
*/
* spr_val = vcpu - > arch . spurr ;
2012-11-04 22:15:43 +04:00
break ;
2014-06-05 16:08:02 +04:00
case SPRN_VTB :
* spr_val = vcpu - > arch . vtb ;
break ;
2010-02-19 13:00:33 +03:00
case SPRN_GQR0 :
case SPRN_GQR1 :
case SPRN_GQR2 :
case SPRN_GQR3 :
case SPRN_GQR4 :
case SPRN_GQR5 :
case SPRN_GQR6 :
case SPRN_GQR7 :
2012-05-04 16:55:12 +04:00
* spr_val = to_book3s ( vcpu ) - > gqr [ sprn - SPRN_GQR0 ] ;
2010-02-19 13:00:33 +03:00
break ;
2014-04-29 18:48:44 +04:00
case SPRN_FSCR :
* spr_val = vcpu - > arch . fscr ;
break ;
2014-04-29 15:36:21 +04:00
# ifdef CONFIG_PPC_BOOK3S_64
case SPRN_BESCR :
* spr_val = vcpu - > arch . bescr ;
break ;
case SPRN_EBBHR :
* spr_val = vcpu - > arch . ebbhr ;
break ;
case SPRN_EBBRR :
* spr_val = vcpu - > arch . ebbrr ;
break ;
2014-04-29 19:54:40 +04:00
# ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case SPRN_TFHAR :
* spr_val = vcpu - > arch . tfhar ;
break ;
case SPRN_TEXASR :
* spr_val = vcpu - > arch . texasr ;
break ;
case SPRN_TFIAR :
* spr_val = vcpu - > arch . tfiar ;
break ;
# endif
2014-04-29 15:36:21 +04:00
# endif
2009-10-30 08:47:14 +03:00
case SPRN_THRM1 :
case SPRN_THRM2 :
case SPRN_THRM3 :
case SPRN_CTRLF :
case SPRN_CTRLT :
2010-02-19 13:00:33 +03:00
case SPRN_L2CR :
case SPRN_MMCR0_GEKKO :
case SPRN_MMCR1_GEKKO :
case SPRN_PMC1_GEKKO :
case SPRN_PMC2_GEKKO :
case SPRN_PMC3_GEKKO :
case SPRN_PMC4_GEKKO :
case SPRN_WPAR_GEKKO :
2012-12-20 08:52:39 +04:00
case SPRN_MSSSR0 :
2013-07-02 18:15:10 +04:00
case SPRN_DABR :
2014-04-22 14:41:06 +04:00
# ifdef CONFIG_PPC_BOOK3S_64
case SPRN_MMCRS :
case SPRN_MMCRA :
case SPRN_MMCR0 :
case SPRN_MMCR1 :
case SPRN_MMCR2 :
2014-04-25 18:07:21 +04:00
case SPRN_TIR :
2014-04-22 14:41:06 +04:00
# endif
2012-05-04 16:55:12 +04:00
* spr_val = 0 ;
2009-10-30 08:47:14 +03:00
break ;
default :
2011-08-08 18:07:16 +04:00
unprivileged :
2009-10-30 08:47:14 +03:00
printk ( KERN_INFO " KVM: invalid SPR read: %d \n " , sprn ) ;
# ifndef DEBUG_SPR
emulated = EMULATE_FAIL ;
# endif
break ;
}
return emulated ;
}
2010-03-24 23:48:28 +03:00
u32 kvmppc_alignment_dsisr ( struct kvm_vcpu * vcpu , unsigned int inst )
{
2014-05-12 15:34:06 +04:00
return make_dsisr ( inst ) ;
2010-03-24 23:48:28 +03:00
}
ulong kvmppc_alignment_dar ( struct kvm_vcpu * vcpu , unsigned int inst )
{
2014-05-12 15:34:05 +04:00
# ifdef CONFIG_PPC_BOOK3S_64
/*
* Linux ' s fix_alignment ( ) assumes that DAR is valid , so can we
*/
return vcpu - > arch . fault_dar ;
# else
2010-03-24 23:48:28 +03:00
ulong dar = 0 ;
2012-05-04 16:01:33 +04:00
ulong ra = get_ra ( inst ) ;
ulong rb = get_rb ( inst ) ;
2010-03-24 23:48:28 +03:00
switch ( get_op ( inst ) ) {
case OP_LFS :
case OP_LFD :
case OP_STFD :
case OP_STFS :
if ( ra )
dar = kvmppc_get_gpr ( vcpu , ra ) ;
dar + = ( s32 ) ( ( s16 ) inst ) ;
break ;
case 31 :
if ( ra )
dar = kvmppc_get_gpr ( vcpu , ra ) ;
2012-05-04 16:01:33 +04:00
dar + = kvmppc_get_gpr ( vcpu , rb ) ;
2010-03-24 23:48:28 +03:00
break ;
default :
printk ( KERN_INFO " KVM: Unaligned instruction 0x%x \n " , inst ) ;
break ;
}
return dar ;
2014-05-12 15:34:05 +04:00
# endif
2010-03-24 23:48:28 +03:00
}