2008-04-17 08:28:09 +04:00
/*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License , version 2 , as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 , USA .
*
* Copyright IBM Corp . 2007
*
* Authors : Hollis Blanchard < hollisb @ us . ibm . com >
* Christian Ehrhardt < ehrhardt @ linux . vnet . ibm . com >
*/
# include <linux/errno.h>
# include <linux/err.h>
# include <linux/kvm_host.h>
# include <linux/module.h>
# include <linux/vmalloc.h>
# include <linux/fs.h>
2008-12-03 00:51:55 +03:00
2008-04-17 08:28:09 +04:00
# include <asm/cputable.h>
# include <asm/uaccess.h>
# include <asm/kvm_ppc.h>
2008-12-03 00:51:57 +03:00
# include "timing.h"
2008-11-05 18:36:13 +03:00
# include <asm/cacheflush.h>
2008-04-17 08:28:09 +04:00
2008-11-05 18:36:16 +03:00
# include "booke.h"
2008-04-17 08:28:09 +04:00
2008-11-05 18:36:13 +03:00
unsigned long kvmppc_booke_handlers ;
2008-04-17 08:28:09 +04:00
# define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
# define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
struct kvm_stats_debugfs_item debugfs_entries [ ] = {
{ " mmio " , VCPU_STAT ( mmio_exits ) } ,
{ " dcr " , VCPU_STAT ( dcr_exits ) } ,
{ " sig " , VCPU_STAT ( signal_exits ) } ,
{ " itlb_r " , VCPU_STAT ( itlb_real_miss_exits ) } ,
{ " itlb_v " , VCPU_STAT ( itlb_virt_miss_exits ) } ,
{ " dtlb_r " , VCPU_STAT ( dtlb_real_miss_exits ) } ,
{ " dtlb_v " , VCPU_STAT ( dtlb_virt_miss_exits ) } ,
{ " sysc " , VCPU_STAT ( syscall_exits ) } ,
{ " isi " , VCPU_STAT ( isi_exits ) } ,
{ " dsi " , VCPU_STAT ( dsi_exits ) } ,
{ " inst_emu " , VCPU_STAT ( emulated_inst_exits ) } ,
{ " dec " , VCPU_STAT ( dec_exits ) } ,
{ " ext_intr " , VCPU_STAT ( ext_intr_exits ) } ,
2008-04-26 02:55:49 +04:00
{ " halt_wakeup " , VCPU_STAT ( halt_wakeup ) } ,
2008-04-17 08:28:09 +04:00
{ NULL }
} ;
/* TODO: use vcpu_printf() */
void kvmppc_dump_vcpu ( struct kvm_vcpu * vcpu )
{
int i ;
2008-11-05 18:36:19 +03:00
printk ( " pc: %08lx msr: %08lx \n " , vcpu - > arch . pc , vcpu - > arch . msr ) ;
printk ( " lr: %08lx ctr: %08lx \n " , vcpu - > arch . lr , vcpu - > arch . ctr ) ;
printk ( " srr0: %08lx srr1: %08lx \n " , vcpu - > arch . srr0 , vcpu - > arch . srr1 ) ;
2008-04-17 08:28:09 +04:00
printk ( " exceptions: %08lx \n " , vcpu - > arch . pending_exceptions ) ;
for ( i = 0 ; i < 32 ; i + = 4 ) {
2008-11-05 18:36:19 +03:00
printk ( " gpr%02d: %08lx %08lx %08lx %08lx \n " , i ,
2008-04-17 08:28:09 +04:00
vcpu - > arch . gpr [ i ] ,
vcpu - > arch . gpr [ i + 1 ] ,
vcpu - > arch . gpr [ i + 2 ] ,
vcpu - > arch . gpr [ i + 3 ] ) ;
}
}
2008-11-05 18:36:23 +03:00
static void kvmppc_booke_queue_irqprio ( struct kvm_vcpu * vcpu ,
unsigned int priority )
2008-11-05 18:36:14 +03:00
{
set_bit ( priority , & vcpu - > arch . pending_exceptions ) ;
}
void kvmppc_core_queue_program ( struct kvm_vcpu * vcpu )
{
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_PROGRAM ) ;
2008-11-05 18:36:14 +03:00
}
void kvmppc_core_queue_dec ( struct kvm_vcpu * vcpu )
{
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_DECREMENTER ) ;
2008-11-05 18:36:14 +03:00
}
int kvmppc_core_pending_dec ( struct kvm_vcpu * vcpu )
{
2008-11-05 18:36:23 +03:00
return test_bit ( BOOKE_IRQPRIO_DECREMENTER , & vcpu - > arch . pending_exceptions ) ;
2008-11-05 18:36:14 +03:00
}
void kvmppc_core_queue_external ( struct kvm_vcpu * vcpu ,
struct kvm_interrupt * irq )
{
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_EXTERNAL ) ;
2008-11-05 18:36:14 +03:00
}
2008-11-05 18:36:23 +03:00
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver ( struct kvm_vcpu * vcpu ,
unsigned int priority )
2008-04-17 08:28:09 +04:00
{
2008-11-05 18:36:23 +03:00
int allowed = 0 ;
ulong msr_mask ;
switch ( priority ) {
case BOOKE_IRQPRIO_PROGRAM :
case BOOKE_IRQPRIO_DTLB_MISS :
case BOOKE_IRQPRIO_ITLB_MISS :
case BOOKE_IRQPRIO_SYSCALL :
case BOOKE_IRQPRIO_DATA_STORAGE :
case BOOKE_IRQPRIO_INST_STORAGE :
case BOOKE_IRQPRIO_FP_UNAVAIL :
case BOOKE_IRQPRIO_AP_UNAVAIL :
case BOOKE_IRQPRIO_ALIGNMENT :
allowed = 1 ;
msr_mask = MSR_CE | MSR_ME | MSR_DE ;
2008-04-17 08:28:09 +04:00
break ;
2008-11-05 18:36:23 +03:00
case BOOKE_IRQPRIO_CRITICAL :
case BOOKE_IRQPRIO_WATCHDOG :
allowed = vcpu - > arch . msr & MSR_CE ;
msr_mask = MSR_ME ;
2008-04-17 08:28:09 +04:00
break ;
2008-11-05 18:36:23 +03:00
case BOOKE_IRQPRIO_MACHINE_CHECK :
allowed = vcpu - > arch . msr & MSR_ME ;
msr_mask = 0 ;
2008-04-17 08:28:09 +04:00
break ;
2008-11-05 18:36:23 +03:00
case BOOKE_IRQPRIO_EXTERNAL :
case BOOKE_IRQPRIO_DECREMENTER :
case BOOKE_IRQPRIO_FIT :
allowed = vcpu - > arch . msr & MSR_EE ;
msr_mask = MSR_CE | MSR_ME | MSR_DE ;
2008-04-17 08:28:09 +04:00
break ;
2008-11-05 18:36:23 +03:00
case BOOKE_IRQPRIO_DEBUG :
allowed = vcpu - > arch . msr & MSR_DE ;
msr_mask = MSR_ME ;
2008-04-17 08:28:09 +04:00
break ;
}
2008-11-05 18:36:23 +03:00
if ( allowed ) {
vcpu - > arch . srr0 = vcpu - > arch . pc ;
vcpu - > arch . srr1 = vcpu - > arch . msr ;
vcpu - > arch . pc = vcpu - > arch . ivpr | vcpu - > arch . ivor [ priority ] ;
kvmppc_set_msr ( vcpu , vcpu - > arch . msr & msr_mask ) ;
2008-04-17 08:28:09 +04:00
2008-11-05 18:36:23 +03:00
clear_bit ( priority , & vcpu - > arch . pending_exceptions ) ;
2008-04-17 08:28:09 +04:00
}
2008-11-05 18:36:23 +03:00
return allowed ;
2008-04-17 08:28:09 +04:00
}
/* Check pending exceptions and deliver one, if possible. */
2008-11-05 18:36:14 +03:00
void kvmppc_core_deliver_interrupts ( struct kvm_vcpu * vcpu )
2008-04-17 08:28:09 +04:00
{
unsigned long * pending = & vcpu - > arch . pending_exceptions ;
unsigned int priority ;
2008-11-05 18:36:22 +03:00
priority = __ffs ( * pending ) ;
2008-04-17 08:28:09 +04:00
while ( priority < = BOOKE_MAX_INTERRUPT ) {
2008-11-05 18:36:23 +03:00
if ( kvmppc_booke_irqprio_deliver ( vcpu , priority ) )
2008-04-17 08:28:09 +04:00
break ;
priority = find_next_bit ( pending ,
BITS_PER_BYTE * sizeof ( * pending ) ,
priority + 1 ) ;
}
}
/**
* kvmppc_handle_exit
*
* Return value is in the form ( errcode < < 2 | RESUME_FLAG_HOST | RESUME_FLAG_NV )
*/
int kvmppc_handle_exit ( struct kvm_run * run , struct kvm_vcpu * vcpu ,
unsigned int exit_nr )
{
enum emulation_result er ;
int r = RESUME_HOST ;
2008-12-03 00:51:57 +03:00
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats ( vcpu ) ;
2008-04-17 08:28:09 +04:00
local_irq_enable ( ) ;
run - > exit_reason = KVM_EXIT_UNKNOWN ;
run - > ready_for_interrupt_injection = 1 ;
switch ( exit_nr ) {
case BOOKE_INTERRUPT_MACHINE_CHECK :
printk ( " MACHINE CHECK: %lx \n " , mfspr ( SPRN_MCSR ) ) ;
kvmppc_dump_vcpu ( vcpu ) ;
r = RESUME_HOST ;
break ;
case BOOKE_INTERRUPT_EXTERNAL :
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , EXT_INTR_EXITS ) ;
2008-11-05 18:36:21 +03:00
if ( need_resched ( ) )
cond_resched ( ) ;
r = RESUME_GUEST ;
break ;
2008-04-17 08:28:09 +04:00
case BOOKE_INTERRUPT_DECREMENTER :
/* Since we switched IVPR back to the host's value, the host
* handled this interrupt the moment we enabled interrupts .
* Now we just offer it a chance to reschedule the guest . */
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , DEC_EXITS ) ;
2008-04-17 08:28:09 +04:00
if ( need_resched ( ) )
cond_resched ( ) ;
r = RESUME_GUEST ;
break ;
case BOOKE_INTERRUPT_PROGRAM :
if ( vcpu - > arch . msr & MSR_PR ) {
/* Program traps generated by user-level software must be handled
* by the guest kernel . */
vcpu - > arch . esr = vcpu - > arch . fault_esr ;
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_PROGRAM ) ;
2008-04-17 08:28:09 +04:00
r = RESUME_GUEST ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , USR_PR_INST ) ;
2008-04-17 08:28:09 +04:00
break ;
}
er = kvmppc_emulate_instruction ( run , vcpu ) ;
switch ( er ) {
case EMULATE_DONE :
2008-12-03 00:51:57 +03:00
/* don't overwrite subtypes, just account kvm_stats */
2008-12-03 00:51:58 +03:00
kvmppc_account_exit_stat ( vcpu , EMULATED_INST_EXITS ) ;
2008-04-17 08:28:09 +04:00
/* Future optimization: only reload non-volatiles if
* they were actually modified by emulation . */
r = RESUME_GUEST_NV ;
break ;
case EMULATE_DO_DCR :
run - > exit_reason = KVM_EXIT_DCR ;
r = RESUME_HOST ;
break ;
case EMULATE_FAIL :
/* XXX Deliver Program interrupt to guest. */
2008-11-05 18:36:19 +03:00
printk ( KERN_CRIT " %s: emulation at %lx failed (%08x) \n " ,
2008-04-17 08:28:09 +04:00
__func__ , vcpu - > arch . pc , vcpu - > arch . last_inst ) ;
/* For debugging, encode the failing instruction and
* report it to userspace . */
run - > hw . hardware_exit_reason = ~ 0ULL < < 32 ;
run - > hw . hardware_exit_reason | = vcpu - > arch . last_inst ;
r = RESUME_HOST ;
break ;
default :
BUG ( ) ;
}
break ;
2008-04-29 20:18:23 +04:00
case BOOKE_INTERRUPT_FP_UNAVAIL :
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_FP_UNAVAIL ) ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , FP_UNAVAIL ) ;
2008-04-29 20:18:23 +04:00
r = RESUME_GUEST ;
break ;
2008-04-17 08:28:09 +04:00
case BOOKE_INTERRUPT_DATA_STORAGE :
vcpu - > arch . dear = vcpu - > arch . fault_dear ;
vcpu - > arch . esr = vcpu - > arch . fault_esr ;
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_DATA_STORAGE ) ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , DSI_EXITS ) ;
2008-04-17 08:28:09 +04:00
r = RESUME_GUEST ;
break ;
case BOOKE_INTERRUPT_INST_STORAGE :
vcpu - > arch . esr = vcpu - > arch . fault_esr ;
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_INST_STORAGE ) ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , ISI_EXITS ) ;
2008-04-17 08:28:09 +04:00
r = RESUME_GUEST ;
break ;
case BOOKE_INTERRUPT_SYSCALL :
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_SYSCALL ) ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , SYSCALL_EXITS ) ;
2008-04-17 08:28:09 +04:00
r = RESUME_GUEST ;
break ;
case BOOKE_INTERRUPT_DTLB_MISS : {
unsigned long eaddr = vcpu - > arch . fault_dear ;
2008-12-03 00:51:55 +03:00
int gtlb_index ;
2009-01-04 01:23:00 +03:00
gpa_t gpaddr ;
2008-04-17 08:28:09 +04:00
gfn_t gfn ;
/* Check the guest TLB. */
2009-01-04 01:23:03 +03:00
gtlb_index = kvmppc_mmu_dtlb_index ( vcpu , eaddr ) ;
2008-12-03 00:51:55 +03:00
if ( gtlb_index < 0 ) {
2008-04-17 08:28:09 +04:00
/* The guest didn't have a mapping for it. */
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_DTLB_MISS ) ;
2008-04-17 08:28:09 +04:00
vcpu - > arch . dear = vcpu - > arch . fault_dear ;
vcpu - > arch . esr = vcpu - > arch . fault_esr ;
2009-01-04 01:23:11 +03:00
kvmppc_mmu_dtlb_miss ( vcpu ) ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , DTLB_REAL_MISS_EXITS ) ;
2008-04-17 08:28:09 +04:00
r = RESUME_GUEST ;
break ;
}
2009-01-04 01:23:02 +03:00
gpaddr = kvmppc_mmu_xlate ( vcpu , gtlb_index , eaddr ) ;
2009-01-04 01:23:00 +03:00
gfn = gpaddr > > PAGE_SHIFT ;
2008-04-17 08:28:09 +04:00
if ( kvm_is_visible_gfn ( vcpu - > kvm , gfn ) ) {
/* The guest TLB had a mapping, but the shadow TLB
* didn ' t , and it is RAM . This could be because :
* a ) the entry is mapping the host kernel , or
* b ) the guest used a large mapping which we ' re faking
* Either way , we need to satisfy the fault without
* invoking the guest . */
2009-01-04 01:23:01 +03:00
kvmppc_mmu_map ( vcpu , eaddr , gpaddr , gtlb_index ) ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , DTLB_VIRT_MISS_EXITS ) ;
2008-04-17 08:28:09 +04:00
r = RESUME_GUEST ;
} else {
/* Guest has mapped and accessed a page which is not
* actually RAM . */
2009-01-04 01:23:00 +03:00
vcpu - > arch . paddr_accessed = gpaddr ;
2008-04-17 08:28:09 +04:00
r = kvmppc_emulate_mmio ( run , vcpu ) ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , MMIO_EXITS ) ;
2008-04-17 08:28:09 +04:00
}
break ;
}
case BOOKE_INTERRUPT_ITLB_MISS : {
unsigned long eaddr = vcpu - > arch . pc ;
2008-12-03 00:51:53 +03:00
gpa_t gpaddr ;
2008-04-17 08:28:09 +04:00
gfn_t gfn ;
2008-12-03 00:51:55 +03:00
int gtlb_index ;
2008-04-17 08:28:09 +04:00
r = RESUME_GUEST ;
/* Check the guest TLB. */
2009-01-04 01:23:03 +03:00
gtlb_index = kvmppc_mmu_itlb_index ( vcpu , eaddr ) ;
2008-12-03 00:51:55 +03:00
if ( gtlb_index < 0 ) {
2008-04-17 08:28:09 +04:00
/* The guest didn't have a mapping for it. */
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_ITLB_MISS ) ;
2009-01-04 01:23:11 +03:00
kvmppc_mmu_itlb_miss ( vcpu ) ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , ITLB_REAL_MISS_EXITS ) ;
2008-04-17 08:28:09 +04:00
break ;
}
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , ITLB_VIRT_MISS_EXITS ) ;
2008-04-17 08:28:09 +04:00
2009-01-04 01:23:02 +03:00
gpaddr = kvmppc_mmu_xlate ( vcpu , gtlb_index , eaddr ) ;
2008-12-03 00:51:53 +03:00
gfn = gpaddr > > PAGE_SHIFT ;
2008-04-17 08:28:09 +04:00
if ( kvm_is_visible_gfn ( vcpu - > kvm , gfn ) ) {
/* The guest TLB had a mapping, but the shadow TLB
* didn ' t . This could be because :
* a ) the entry is mapping the host kernel , or
* b ) the guest used a large mapping which we ' re faking
* Either way , we need to satisfy the fault without
* invoking the guest . */
2009-01-04 01:23:01 +03:00
kvmppc_mmu_map ( vcpu , eaddr , gpaddr , gtlb_index ) ;
2008-04-17 08:28:09 +04:00
} else {
/* Guest mapped and leaped at non-RAM! */
2008-11-05 18:36:23 +03:00
kvmppc_booke_queue_irqprio ( vcpu , BOOKE_IRQPRIO_MACHINE_CHECK ) ;
2008-04-17 08:28:09 +04:00
}
break ;
}
2008-07-25 22:54:49 +04:00
case BOOKE_INTERRUPT_DEBUG : {
u32 dbsr ;
vcpu - > arch . pc = mfspr ( SPRN_CSRR0 ) ;
/* clear IAC events in DBSR register */
dbsr = mfspr ( SPRN_DBSR ) ;
dbsr & = DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4 ;
mtspr ( SPRN_DBSR , dbsr ) ;
run - > exit_reason = KVM_EXIT_DEBUG ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , DEBUG_EXITS ) ;
2008-07-25 22:54:49 +04:00
r = RESUME_HOST ;
break ;
}
2008-04-17 08:28:09 +04:00
default :
printk ( KERN_EMERG " exit_nr %d \n " , exit_nr ) ;
BUG ( ) ;
}
local_irq_disable ( ) ;
2008-11-05 18:36:14 +03:00
kvmppc_core_deliver_interrupts ( vcpu ) ;
2008-04-17 08:28:09 +04:00
if ( ! ( r & RESUME_HOST ) ) {
/* To avoid clobbering exit_reason, only check for signals if
* we aren ' t already exiting to userspace for some other
* reason . */
if ( signal_pending ( current ) ) {
run - > exit_reason = KVM_EXIT_INTR ;
r = ( - EINTR < < 2 ) | RESUME_HOST | ( r & RESUME_FLAG_NV ) ;
2008-12-03 00:51:58 +03:00
kvmppc_account_exit ( vcpu , SIGNAL_EXITS ) ;
2008-04-17 08:28:09 +04:00
}
}
return r ;
}
/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
int kvm_arch_vcpu_setup ( struct kvm_vcpu * vcpu )
{
vcpu - > arch . pc = 0 ;
vcpu - > arch . msr = 0 ;
vcpu - > arch . gpr [ 1 ] = ( 16 < < 20 ) - 8 ; /* -8 for the callee-save LR slot */
2008-07-25 22:54:53 +04:00
vcpu - > arch . shadow_pid = 1 ;
2008-04-17 08:28:09 +04:00
/* Eye-catching number so we know if the guest takes an interrupt
* before it ' s programmed its own IVPR . */
vcpu - > arch . ivpr = 0x55550000 ;
2008-12-03 00:51:57 +03:00
kvmppc_init_timing_stats ( vcpu ) ;
2008-11-05 18:36:17 +03:00
return kvmppc_core_vcpu_setup ( vcpu ) ;
2008-04-17 08:28:09 +04:00
}
int kvm_arch_vcpu_ioctl_get_regs ( struct kvm_vcpu * vcpu , struct kvm_regs * regs )
{
int i ;
regs - > pc = vcpu - > arch . pc ;
regs - > cr = vcpu - > arch . cr ;
regs - > ctr = vcpu - > arch . ctr ;
regs - > lr = vcpu - > arch . lr ;
regs - > xer = vcpu - > arch . xer ;
regs - > msr = vcpu - > arch . msr ;
regs - > srr0 = vcpu - > arch . srr0 ;
regs - > srr1 = vcpu - > arch . srr1 ;
regs - > pid = vcpu - > arch . pid ;
regs - > sprg0 = vcpu - > arch . sprg0 ;
regs - > sprg1 = vcpu - > arch . sprg1 ;
regs - > sprg2 = vcpu - > arch . sprg2 ;
regs - > sprg3 = vcpu - > arch . sprg3 ;
regs - > sprg5 = vcpu - > arch . sprg4 ;
regs - > sprg6 = vcpu - > arch . sprg5 ;
regs - > sprg7 = vcpu - > arch . sprg6 ;
for ( i = 0 ; i < ARRAY_SIZE ( regs - > gpr ) ; i + + )
regs - > gpr [ i ] = vcpu - > arch . gpr [ i ] ;
return 0 ;
}
int kvm_arch_vcpu_ioctl_set_regs ( struct kvm_vcpu * vcpu , struct kvm_regs * regs )
{
int i ;
vcpu - > arch . pc = regs - > pc ;
vcpu - > arch . cr = regs - > cr ;
vcpu - > arch . ctr = regs - > ctr ;
vcpu - > arch . lr = regs - > lr ;
vcpu - > arch . xer = regs - > xer ;
2008-11-05 18:36:20 +03:00
kvmppc_set_msr ( vcpu , regs - > msr ) ;
2008-04-17 08:28:09 +04:00
vcpu - > arch . srr0 = regs - > srr0 ;
vcpu - > arch . srr1 = regs - > srr1 ;
vcpu - > arch . sprg0 = regs - > sprg0 ;
vcpu - > arch . sprg1 = regs - > sprg1 ;
vcpu - > arch . sprg2 = regs - > sprg2 ;
vcpu - > arch . sprg3 = regs - > sprg3 ;
vcpu - > arch . sprg5 = regs - > sprg4 ;
vcpu - > arch . sprg6 = regs - > sprg5 ;
vcpu - > arch . sprg7 = regs - > sprg6 ;
for ( i = 0 ; i < ARRAY_SIZE ( vcpu - > arch . gpr ) ; i + + )
vcpu - > arch . gpr [ i ] = regs - > gpr [ i ] ;
return 0 ;
}
int kvm_arch_vcpu_ioctl_get_sregs ( struct kvm_vcpu * vcpu ,
struct kvm_sregs * sregs )
{
return - ENOTSUPP ;
}
int kvm_arch_vcpu_ioctl_set_sregs ( struct kvm_vcpu * vcpu ,
struct kvm_sregs * sregs )
{
return - ENOTSUPP ;
}
int kvm_arch_vcpu_ioctl_get_fpu ( struct kvm_vcpu * vcpu , struct kvm_fpu * fpu )
{
return - ENOTSUPP ;
}
int kvm_arch_vcpu_ioctl_set_fpu ( struct kvm_vcpu * vcpu , struct kvm_fpu * fpu )
{
return - ENOTSUPP ;
}
int kvm_arch_vcpu_ioctl_translate ( struct kvm_vcpu * vcpu ,
struct kvm_translation * tr )
{
2008-11-05 18:36:17 +03:00
return kvmppc_core_vcpu_translate ( vcpu , tr ) ;
2008-04-17 08:28:09 +04:00
}
2008-11-05 18:36:13 +03:00
2008-11-05 18:36:18 +03:00
int kvmppc_booke_init ( void )
2008-11-05 18:36:13 +03:00
{
unsigned long ivor [ 16 ] ;
unsigned long max_ivor = 0 ;
int i ;
/* We install our own exception handlers by hijacking IVPR. IVPR must
* be 16 - bit aligned , so we need a 64 KB allocation . */
kvmppc_booke_handlers = __get_free_pages ( GFP_KERNEL | __GFP_ZERO ,
VCPU_SIZE_ORDER ) ;
if ( ! kvmppc_booke_handlers )
return - ENOMEM ;
/* XXX make sure our handlers are smaller than Linux's */
/* Copy our interrupt handlers to match host IVORs. That way we don't
* have to swap the IVORs on every guest / host transition . */
ivor [ 0 ] = mfspr ( SPRN_IVOR0 ) ;
ivor [ 1 ] = mfspr ( SPRN_IVOR1 ) ;
ivor [ 2 ] = mfspr ( SPRN_IVOR2 ) ;
ivor [ 3 ] = mfspr ( SPRN_IVOR3 ) ;
ivor [ 4 ] = mfspr ( SPRN_IVOR4 ) ;
ivor [ 5 ] = mfspr ( SPRN_IVOR5 ) ;
ivor [ 6 ] = mfspr ( SPRN_IVOR6 ) ;
ivor [ 7 ] = mfspr ( SPRN_IVOR7 ) ;
ivor [ 8 ] = mfspr ( SPRN_IVOR8 ) ;
ivor [ 9 ] = mfspr ( SPRN_IVOR9 ) ;
ivor [ 10 ] = mfspr ( SPRN_IVOR10 ) ;
ivor [ 11 ] = mfspr ( SPRN_IVOR11 ) ;
ivor [ 12 ] = mfspr ( SPRN_IVOR12 ) ;
ivor [ 13 ] = mfspr ( SPRN_IVOR13 ) ;
ivor [ 14 ] = mfspr ( SPRN_IVOR14 ) ;
ivor [ 15 ] = mfspr ( SPRN_IVOR15 ) ;
for ( i = 0 ; i < 16 ; i + + ) {
if ( ivor [ i ] > max_ivor )
max_ivor = ivor [ i ] ;
memcpy ( ( void * ) kvmppc_booke_handlers + ivor [ i ] ,
kvmppc_handlers_start + i * kvmppc_handler_len ,
kvmppc_handler_len ) ;
}
flush_icache_range ( kvmppc_booke_handlers ,
kvmppc_booke_handlers + max_ivor + kvmppc_handler_len ) ;
2008-11-05 18:36:18 +03:00
return 0 ;
2008-11-05 18:36:13 +03:00
}
2008-11-05 18:36:18 +03:00
void __exit kvmppc_booke_exit ( void )
2008-11-05 18:36:13 +03:00
{
free_pages ( kvmppc_booke_handlers , VCPU_SIZE_ORDER ) ;
kvm_exit ( ) ;
}