2008-07-03 14:59:22 +03:00
# ifndef ARCH_X86_KVM_X86_H
# define ARCH_X86_KVM_X86_H
# include <linux/kvm_host.h>
2010-01-21 15:31:48 +02:00
# include "kvm_cache_regs.h"
2008-07-03 14:59:22 +03:00
static inline void kvm_clear_exception_queue ( struct kvm_vcpu * vcpu )
{
vcpu - > arch . exception . pending = false ;
}
2009-05-11 13:35:50 +03:00
static inline void kvm_queue_interrupt ( struct kvm_vcpu * vcpu , u8 vector ,
bool soft )
2008-07-03 15:17:01 +03:00
{
vcpu - > arch . interrupt . pending = true ;
2009-05-11 13:35:50 +03:00
vcpu - > arch . interrupt . soft = soft ;
2008-07-03 15:17:01 +03:00
vcpu - > arch . interrupt . nr = vector ;
}
static inline void kvm_clear_interrupt_queue ( struct kvm_vcpu * vcpu )
{
vcpu - > arch . interrupt . pending = false ;
}
2009-05-11 13:35:46 +03:00
static inline bool kvm_event_needs_reinjection ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . exception . pending | | vcpu - > arch . interrupt . pending | |
vcpu - > arch . nmi_injected ;
}
2009-05-11 13:35:50 +03:00
static inline bool kvm_exception_is_soft ( unsigned int nr )
{
return ( nr = = BP_VECTOR ) | | ( nr = = OF_VECTOR ) ;
}
2009-07-05 17:39:35 +03:00
2010-01-21 15:31:48 +02:00
static inline bool is_protmode ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr0_bits ( vcpu , X86_CR0_PE ) ;
}
2010-01-21 15:31:49 +02:00
static inline int is_long_mode ( struct kvm_vcpu * vcpu )
{
# ifdef CONFIG_X86_64
2010-01-21 15:31:50 +02:00
return vcpu - > arch . efer & EFER_LMA ;
2010-01-21 15:31:49 +02:00
# else
return 0 ;
# endif
}
2010-09-10 17:30:50 +02:00
static inline bool mmu_is_nested ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . walk_mmu = = & vcpu - > arch . nested_mmu ;
}
2010-01-21 15:31:49 +02:00
static inline int is_pae ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr4_bits ( vcpu , X86_CR4_PAE ) ;
}
static inline int is_pse ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr4_bits ( vcpu , X86_CR4_PSE ) ;
}
static inline int is_paging ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr0_bits ( vcpu , X86_CR0_PG ) ;
}
2010-12-07 17:15:05 +01:00
static inline u32 bit ( int bitno )
{
return 1 < < ( bitno & 31 ) ;
}
2011-07-12 03:23:20 +08:00
static inline void vcpu_cache_mmio_info ( struct kvm_vcpu * vcpu ,
gva_t gva , gfn_t gfn , unsigned access )
{
vcpu - > arch . mmio_gva = gva & PAGE_MASK ;
vcpu - > arch . access = access ;
vcpu - > arch . mmio_gfn = gfn ;
}
/*
* Clear the mmio cache info for the given gva ,
* specially , if gva is ~ 0ul , we clear all mmio cache info .
*/
static inline void vcpu_clear_mmio_info ( struct kvm_vcpu * vcpu , gva_t gva )
{
if ( gva ! = ( ~ 0ul ) & & vcpu - > arch . mmio_gva ! = ( gva & PAGE_MASK ) )
return ;
vcpu - > arch . mmio_gva = 0 ;
}
static inline bool vcpu_match_mmio_gva ( struct kvm_vcpu * vcpu , unsigned long gva )
{
if ( vcpu - > arch . mmio_gva & & vcpu - > arch . mmio_gva = = ( gva & PAGE_MASK ) )
return true ;
return false ;
}
static inline bool vcpu_match_mmio_gpa ( struct kvm_vcpu * vcpu , gpa_t gpa )
{
if ( vcpu - > arch . mmio_gfn & & vcpu - > arch . mmio_gfn = = gpa > > PAGE_SHIFT )
return true ;
return false ;
}
2010-04-19 13:32:45 +08:00
void kvm_before_handle_nmi ( struct kvm_vcpu * vcpu ) ;
void kvm_after_handle_nmi ( struct kvm_vcpu * vcpu ) ;
2011-04-13 09:12:54 -05:00
int kvm_inject_realmode_interrupt ( struct kvm_vcpu * vcpu , int irq , int inc_eip ) ;
2010-04-19 13:32:45 +08:00
2010-08-19 22:07:17 -10:00
void kvm_write_tsc ( struct kvm_vcpu * vcpu , u64 data ) ;
2011-05-25 23:04:56 +03:00
int kvm_read_guest_virt ( struct x86_emulate_ctxt * ctxt ,
gva_t addr , void * val , unsigned int bytes ,
struct x86_exception * exception ) ;
2011-05-25 23:08:00 +03:00
int kvm_write_guest_virt_system ( struct x86_emulate_ctxt * ctxt ,
gva_t addr , void * val , unsigned int bytes ,
struct x86_exception * exception ) ;
2011-11-23 16:30:32 +02:00
extern u64 host_xcr0 ;
2008-07-03 14:59:22 +03:00
# endif