2008-07-03 14:59:22 +03:00
# ifndef ARCH_X86_KVM_X86_H
# define ARCH_X86_KVM_X86_H
# include <linux/kvm_host.h>
2010-01-21 15:31:48 +02:00
# include "kvm_cache_regs.h"
2008-07-03 14:59:22 +03:00
2015-04-27 15:11:25 +02:00
# define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
2008-07-03 14:59:22 +03:00
static inline void kvm_clear_exception_queue ( struct kvm_vcpu * vcpu )
{
vcpu - > arch . exception . pending = false ;
}
2009-05-11 13:35:50 +03:00
static inline void kvm_queue_interrupt ( struct kvm_vcpu * vcpu , u8 vector ,
bool soft )
2008-07-03 15:17:01 +03:00
{
vcpu - > arch . interrupt . pending = true ;
2009-05-11 13:35:50 +03:00
vcpu - > arch . interrupt . soft = soft ;
2008-07-03 15:17:01 +03:00
vcpu - > arch . interrupt . nr = vector ;
}
static inline void kvm_clear_interrupt_queue ( struct kvm_vcpu * vcpu )
{
vcpu - > arch . interrupt . pending = false ;
}
2009-05-11 13:35:46 +03:00
static inline bool kvm_event_needs_reinjection ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . exception . pending | | vcpu - > arch . interrupt . pending | |
vcpu - > arch . nmi_injected ;
}
2009-05-11 13:35:50 +03:00
static inline bool kvm_exception_is_soft ( unsigned int nr )
{
return ( nr = = BP_VECTOR ) | | ( nr = = OF_VECTOR ) ;
}
2009-07-05 17:39:35 +03:00
2010-01-21 15:31:48 +02:00
static inline bool is_protmode ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr0_bits ( vcpu , X86_CR0_PE ) ;
}
2010-01-21 15:31:49 +02:00
static inline int is_long_mode ( struct kvm_vcpu * vcpu )
{
# ifdef CONFIG_X86_64
2010-01-21 15:31:50 +02:00
return vcpu - > arch . efer & EFER_LMA ;
2010-01-21 15:31:49 +02:00
# else
return 0 ;
# endif
}
2014-06-18 17:19:23 +03:00
static inline bool is_64_bit_mode ( struct kvm_vcpu * vcpu )
{
int cs_db , cs_l ;
if ( ! is_long_mode ( vcpu ) )
return false ;
kvm_x86_ops - > get_cs_db_l_bits ( vcpu , & cs_db , & cs_l ) ;
return cs_l ;
}
2010-09-10 17:30:50 +02:00
static inline bool mmu_is_nested ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . walk_mmu = = & vcpu - > arch . nested_mmu ;
}
2010-01-21 15:31:49 +02:00
static inline int is_pae ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr4_bits ( vcpu , X86_CR4_PAE ) ;
}
static inline int is_pse ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr4_bits ( vcpu , X86_CR4_PSE ) ;
}
static inline int is_paging ( struct kvm_vcpu * vcpu )
{
2012-03-08 12:45:54 +01:00
return likely ( kvm_read_cr0_bits ( vcpu , X86_CR0_PG ) ) ;
2010-01-21 15:31:49 +02:00
}
2010-12-07 17:15:05 +01:00
static inline u32 bit ( int bitno )
{
return 1 < < ( bitno & 31 ) ;
}
2011-07-12 03:23:20 +08:00
static inline void vcpu_cache_mmio_info ( struct kvm_vcpu * vcpu ,
gva_t gva , gfn_t gfn , unsigned access )
{
vcpu - > arch . mmio_gva = gva & PAGE_MASK ;
vcpu - > arch . access = access ;
vcpu - > arch . mmio_gfn = gfn ;
2014-08-18 15:46:07 -07:00
vcpu - > arch . mmio_gen = kvm_memslots ( vcpu - > kvm ) - > generation ;
}
static inline bool vcpu_match_mmio_gen ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . mmio_gen = = kvm_memslots ( vcpu - > kvm ) - > generation ;
2011-07-12 03:23:20 +08:00
}
/*
2014-08-18 15:46:07 -07:00
* Clear the mmio cache info for the given gva . If gva is MMIO_GVA_ANY , we
* clear all mmio cache info .
2011-07-12 03:23:20 +08:00
*/
2014-08-18 15:46:07 -07:00
# define MMIO_GVA_ANY (~(gva_t)0)
2011-07-12 03:23:20 +08:00
static inline void vcpu_clear_mmio_info ( struct kvm_vcpu * vcpu , gva_t gva )
{
2014-08-18 15:46:07 -07:00
if ( gva ! = MMIO_GVA_ANY & & vcpu - > arch . mmio_gva ! = ( gva & PAGE_MASK ) )
2011-07-12 03:23:20 +08:00
return ;
vcpu - > arch . mmio_gva = 0 ;
}
static inline bool vcpu_match_mmio_gva ( struct kvm_vcpu * vcpu , unsigned long gva )
{
2014-08-18 15:46:07 -07:00
if ( vcpu_match_mmio_gen ( vcpu ) & & vcpu - > arch . mmio_gva & &
vcpu - > arch . mmio_gva = = ( gva & PAGE_MASK ) )
2011-07-12 03:23:20 +08:00
return true ;
return false ;
}
static inline bool vcpu_match_mmio_gpa ( struct kvm_vcpu * vcpu , gpa_t gpa )
{
2014-08-18 15:46:07 -07:00
if ( vcpu_match_mmio_gen ( vcpu ) & & vcpu - > arch . mmio_gfn & &
vcpu - > arch . mmio_gfn = = gpa > > PAGE_SHIFT )
2011-07-12 03:23:20 +08:00
return true ;
return false ;
}
2014-06-18 17:19:23 +03:00
static inline unsigned long kvm_register_readl ( struct kvm_vcpu * vcpu ,
enum kvm_reg reg )
{
unsigned long val = kvm_register_read ( vcpu , reg ) ;
return is_64_bit_mode ( vcpu ) ? val : ( u32 ) val ;
}
2014-06-18 17:19:26 +03:00
static inline void kvm_register_writel ( struct kvm_vcpu * vcpu ,
enum kvm_reg reg ,
unsigned long val )
{
if ( ! is_64_bit_mode ( vcpu ) )
val = ( u32 ) val ;
return kvm_register_write ( vcpu , reg , val ) ;
}
2010-04-19 13:32:45 +08:00
void kvm_before_handle_nmi ( struct kvm_vcpu * vcpu ) ;
void kvm_after_handle_nmi ( struct kvm_vcpu * vcpu ) ;
2015-01-01 22:05:18 -05:00
void kvm_set_pending_timer ( struct kvm_vcpu * vcpu ) ;
2011-04-13 09:12:54 -05:00
int kvm_inject_realmode_interrupt ( struct kvm_vcpu * vcpu , int irq , int inc_eip ) ;
2010-04-19 13:32:45 +08:00
2012-11-29 12:42:12 -08:00
void kvm_write_tsc ( struct kvm_vcpu * vcpu , struct msr_data * msr ) ;
2010-08-19 22:07:17 -10:00
2011-05-25 23:04:56 +03:00
int kvm_read_guest_virt ( struct x86_emulate_ctxt * ctxt ,
gva_t addr , void * val , unsigned int bytes ,
struct x86_exception * exception ) ;
2011-05-25 23:08:00 +03:00
int kvm_write_guest_virt_system ( struct x86_emulate_ctxt * ctxt ,
gva_t addr , void * val , unsigned int bytes ,
struct x86_exception * exception ) ;
2015-06-15 16:55:31 +08:00
void kvm_vcpu_mtrr_init ( struct kvm_vcpu * vcpu ) ;
2015-06-15 16:55:22 +08:00
u8 kvm_mtrr_get_guest_memory_type ( struct kvm_vcpu * vcpu , gfn_t gfn ) ;
2014-09-18 22:39:44 +03:00
bool kvm_mtrr_valid ( struct kvm_vcpu * vcpu , u32 msr , u64 data ) ;
2015-06-15 16:55:22 +08:00
int kvm_mtrr_set_msr ( struct kvm_vcpu * vcpu , u32 msr , u64 data ) ;
int kvm_mtrr_get_msr ( struct kvm_vcpu * vcpu , u32 msr , u64 * pdata ) ;
2014-09-18 22:39:44 +03:00
2014-02-24 10:58:09 +00:00
# define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
2014-10-22 17:35:24 +08:00
| XSTATE_BNDREGS | XSTATE_BNDCSR \
| XSTATE_AVX512 )
2011-11-23 16:30:32 +02:00
extern u64 host_xcr0 ;
2014-02-24 12:15:16 +01:00
extern u64 kvm_supported_xcr0 ( void ) ;
2014-01-06 12:00:02 -02:00
extern unsigned int min_timer_period_us ;
2014-12-16 09:08:15 -05:00
extern unsigned int lapic_timer_advance_ns ;
2012-08-05 15:58:32 +03:00
extern struct static_key kvm_no_apic_vcpu ;
2008-07-03 14:59:22 +03:00
# endif