2008-07-03 14:59:22 +03:00
# ifndef ARCH_X86_KVM_X86_H
# define ARCH_X86_KVM_X86_H
2017-04-21 12:27:17 +02:00
# include <asm/processor.h>
# include <asm/mwait.h>
2008-07-03 14:59:22 +03:00
# include <linux/kvm_host.h>
2016-06-20 22:28:02 -03:00
# include <asm/pvclock.h>
2010-01-21 15:31:48 +02:00
# include "kvm_cache_regs.h"
2008-07-03 14:59:22 +03:00
2015-04-27 15:11:25 +02:00
# define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
2008-07-03 14:59:22 +03:00
static inline void kvm_clear_exception_queue ( struct kvm_vcpu * vcpu )
{
2017-08-24 03:35:09 -07:00
vcpu - > arch . exception . injected = false ;
2008-07-03 14:59:22 +03:00
}
2009-05-11 13:35:50 +03:00
static inline void kvm_queue_interrupt ( struct kvm_vcpu * vcpu , u8 vector ,
bool soft )
2008-07-03 15:17:01 +03:00
{
vcpu - > arch . interrupt . pending = true ;
2009-05-11 13:35:50 +03:00
vcpu - > arch . interrupt . soft = soft ;
2008-07-03 15:17:01 +03:00
vcpu - > arch . interrupt . nr = vector ;
}
static inline void kvm_clear_interrupt_queue ( struct kvm_vcpu * vcpu )
{
vcpu - > arch . interrupt . pending = false ;
}
2009-05-11 13:35:46 +03:00
static inline bool kvm_event_needs_reinjection ( struct kvm_vcpu * vcpu )
{
2017-08-24 03:35:09 -07:00
return vcpu - > arch . exception . injected | | vcpu - > arch . interrupt . pending | |
2009-05-11 13:35:46 +03:00
vcpu - > arch . nmi_injected ;
}
2009-05-11 13:35:50 +03:00
static inline bool kvm_exception_is_soft ( unsigned int nr )
{
return ( nr = = BP_VECTOR ) | | ( nr = = OF_VECTOR ) ;
}
2009-07-05 17:39:35 +03:00
2010-01-21 15:31:48 +02:00
static inline bool is_protmode ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr0_bits ( vcpu , X86_CR0_PE ) ;
}
2010-01-21 15:31:49 +02:00
static inline int is_long_mode ( struct kvm_vcpu * vcpu )
{
# ifdef CONFIG_X86_64
2010-01-21 15:31:50 +02:00
return vcpu - > arch . efer & EFER_LMA ;
2010-01-21 15:31:49 +02:00
# else
return 0 ;
# endif
}
2014-06-18 17:19:23 +03:00
static inline bool is_64_bit_mode ( struct kvm_vcpu * vcpu )
{
int cs_db , cs_l ;
if ( ! is_long_mode ( vcpu ) )
return false ;
kvm_x86_ops - > get_cs_db_l_bits ( vcpu , & cs_db , & cs_l ) ;
return cs_l ;
}
2017-08-24 20:27:55 +08:00
static inline bool is_la57_mode ( struct kvm_vcpu * vcpu )
{
# ifdef CONFIG_X86_64
return ( vcpu - > arch . efer & EFER_LMA ) & &
kvm_read_cr4_bits ( vcpu , X86_CR4_LA57 ) ;
# else
return 0 ;
# endif
}
2010-09-10 17:30:50 +02:00
static inline bool mmu_is_nested ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . walk_mmu = = & vcpu - > arch . nested_mmu ;
}
2010-01-21 15:31:49 +02:00
static inline int is_pae ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr4_bits ( vcpu , X86_CR4_PAE ) ;
}
static inline int is_pse ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr4_bits ( vcpu , X86_CR4_PSE ) ;
}
static inline int is_paging ( struct kvm_vcpu * vcpu )
{
2012-03-08 12:45:54 +01:00
return likely ( kvm_read_cr0_bits ( vcpu , X86_CR0_PG ) ) ;
2010-01-21 15:31:49 +02:00
}
2010-12-07 17:15:05 +01:00
static inline u32 bit ( int bitno )
{
return 1 < < ( bitno & 31 ) ;
}
2017-08-24 20:27:56 +08:00
static inline u8 vcpu_virt_addr_bits ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr4_bits ( vcpu , X86_CR4_LA57 ) ? 57 : 48 ;
}
static inline u8 ctxt_virt_addr_bits ( struct x86_emulate_ctxt * ctxt )
{
return ( ctxt - > ops - > get_cr ( ctxt , 4 ) & X86_CR4_LA57 ) ? 57 : 48 ;
}
static inline u64 get_canonical ( u64 la , u8 vaddr_bits )
{
return ( ( int64_t ) la < < ( 64 - vaddr_bits ) ) > > ( 64 - vaddr_bits ) ;
}
static inline bool is_noncanonical_address ( u64 la , struct kvm_vcpu * vcpu )
{
# ifdef CONFIG_X86_64
return get_canonical ( la , vcpu_virt_addr_bits ( vcpu ) ) ! = la ;
# else
return false ;
# endif
}
static inline bool emul_is_noncanonical_address ( u64 la ,
struct x86_emulate_ctxt * ctxt )
{
# ifdef CONFIG_X86_64
return get_canonical ( la , ctxt_virt_addr_bits ( ctxt ) ) ! = la ;
# else
return false ;
# endif
}
2011-07-12 03:23:20 +08:00
static inline void vcpu_cache_mmio_info ( struct kvm_vcpu * vcpu ,
gva_t gva , gfn_t gfn , unsigned access )
{
2017-08-17 18:36:58 +02:00
/*
* If this is a shadow nested page table , the " GVA " is
* actually a nGPA .
*/
vcpu - > arch . mmio_gva = mmu_is_nested ( vcpu ) ? 0 : gva & PAGE_MASK ;
2011-07-12 03:23:20 +08:00
vcpu - > arch . access = access ;
vcpu - > arch . mmio_gfn = gfn ;
2014-08-18 15:46:07 -07:00
vcpu - > arch . mmio_gen = kvm_memslots ( vcpu - > kvm ) - > generation ;
}
static inline bool vcpu_match_mmio_gen ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . mmio_gen = = kvm_memslots ( vcpu - > kvm ) - > generation ;
2011-07-12 03:23:20 +08:00
}
/*
2014-08-18 15:46:07 -07:00
* Clear the mmio cache info for the given gva . If gva is MMIO_GVA_ANY , we
* clear all mmio cache info .
2011-07-12 03:23:20 +08:00
*/
2014-08-18 15:46:07 -07:00
# define MMIO_GVA_ANY (~(gva_t)0)
2011-07-12 03:23:20 +08:00
static inline void vcpu_clear_mmio_info ( struct kvm_vcpu * vcpu , gva_t gva )
{
2014-08-18 15:46:07 -07:00
if ( gva ! = MMIO_GVA_ANY & & vcpu - > arch . mmio_gva ! = ( gva & PAGE_MASK ) )
2011-07-12 03:23:20 +08:00
return ;
vcpu - > arch . mmio_gva = 0 ;
}
static inline bool vcpu_match_mmio_gva ( struct kvm_vcpu * vcpu , unsigned long gva )
{
2014-08-18 15:46:07 -07:00
if ( vcpu_match_mmio_gen ( vcpu ) & & vcpu - > arch . mmio_gva & &
vcpu - > arch . mmio_gva = = ( gva & PAGE_MASK ) )
2011-07-12 03:23:20 +08:00
return true ;
return false ;
}
static inline bool vcpu_match_mmio_gpa ( struct kvm_vcpu * vcpu , gpa_t gpa )
{
2014-08-18 15:46:07 -07:00
if ( vcpu_match_mmio_gen ( vcpu ) & & vcpu - > arch . mmio_gfn & &
vcpu - > arch . mmio_gfn = = gpa > > PAGE_SHIFT )
2011-07-12 03:23:20 +08:00
return true ;
return false ;
}
2014-06-18 17:19:23 +03:00
static inline unsigned long kvm_register_readl ( struct kvm_vcpu * vcpu ,
enum kvm_reg reg )
{
unsigned long val = kvm_register_read ( vcpu , reg ) ;
return is_64_bit_mode ( vcpu ) ? val : ( u32 ) val ;
}
2014-06-18 17:19:26 +03:00
static inline void kvm_register_writel ( struct kvm_vcpu * vcpu ,
enum kvm_reg reg ,
unsigned long val )
{
if ( ! is_64_bit_mode ( vcpu ) )
val = ( u32 ) val ;
return kvm_register_write ( vcpu , reg , val ) ;
}
2015-07-23 08:22:45 +02:00
static inline bool kvm_check_has_quirk ( struct kvm * kvm , u64 quirk )
{
return ! ( kvm - > arch . disabled_quirks & quirk ) ;
}
2010-04-19 13:32:45 +08:00
void kvm_before_handle_nmi ( struct kvm_vcpu * vcpu ) ;
void kvm_after_handle_nmi ( struct kvm_vcpu * vcpu ) ;
2015-01-01 22:05:18 -05:00
void kvm_set_pending_timer ( struct kvm_vcpu * vcpu ) ;
2011-04-13 09:12:54 -05:00
int kvm_inject_realmode_interrupt ( struct kvm_vcpu * vcpu , int irq , int inc_eip ) ;
2010-04-19 13:32:45 +08:00
2012-11-29 12:42:12 -08:00
void kvm_write_tsc ( struct kvm_vcpu * vcpu , struct msr_data * msr ) ;
2016-09-01 14:21:03 +02:00
u64 get_kvmclock_ns ( struct kvm * kvm ) ;
2010-08-19 22:07:17 -10:00
2011-05-25 23:04:56 +03:00
int kvm_read_guest_virt ( struct x86_emulate_ctxt * ctxt ,
gva_t addr , void * val , unsigned int bytes ,
struct x86_exception * exception ) ;
2011-05-25 23:08:00 +03:00
int kvm_write_guest_virt_system ( struct x86_emulate_ctxt * ctxt ,
gva_t addr , void * val , unsigned int bytes ,
struct x86_exception * exception ) ;
2015-06-15 16:55:31 +08:00
void kvm_vcpu_mtrr_init ( struct kvm_vcpu * vcpu ) ;
2015-06-15 16:55:22 +08:00
u8 kvm_mtrr_get_guest_memory_type ( struct kvm_vcpu * vcpu , gfn_t gfn ) ;
2014-09-18 22:39:44 +03:00
bool kvm_mtrr_valid ( struct kvm_vcpu * vcpu , u32 msr , u64 data ) ;
2015-06-15 16:55:22 +08:00
int kvm_mtrr_set_msr ( struct kvm_vcpu * vcpu , u32 msr , u64 data ) ;
int kvm_mtrr_get_msr ( struct kvm_vcpu * vcpu , u32 msr , u64 * pdata ) ;
2015-06-15 16:55:35 +08:00
bool kvm_mtrr_check_gfn_range_consistency ( struct kvm_vcpu * vcpu , gfn_t gfn ,
int page_num ) ;
2016-01-25 16:53:33 +08:00
bool kvm_vector_hashing_enabled ( void ) ;
2014-09-18 22:39:44 +03:00
2015-09-02 16:31:26 -07:00
# define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
2016-03-22 16:51:16 +08:00
| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
| XFEATURE_MASK_PKRU )
2011-11-23 16:30:32 +02:00
extern u64 host_xcr0 ;
2014-02-24 12:15:16 +01:00
extern u64 kvm_supported_xcr0 ( void ) ;
2014-01-06 12:00:02 -02:00
extern unsigned int min_timer_period_us ;
2014-12-16 09:08:15 -05:00
extern unsigned int lapic_timer_advance_ns ;
2012-08-05 15:58:32 +03:00
extern struct static_key kvm_no_apic_vcpu ;
2016-01-22 11:39:22 +01:00
2016-06-20 22:28:02 -03:00
static inline u64 nsec_to_cycles ( struct kvm_vcpu * vcpu , u64 nsec )
{
return pvclock_scale_delta ( nsec , vcpu - > arch . virtual_tsc_mult ,
vcpu - > arch . virtual_tsc_shift ) ;
}
2016-01-22 11:39:22 +01:00
/* Same "calling convention" as do_div:
* - divide ( n < < 32 ) by base
* - put result in n
* - return remainder
*/
# define do_shl32_div32(n, base) \
( { \
u32 __quot , __rem ; \
asm ( " divl %2 " : " =a " ( __quot ) , " =d " ( __rem ) \
: " rm " ( base ) , " 0 " ( 0 ) , " 1 " ( ( u32 ) n ) ) ; \
n = __quot ; \
__rem ; \
} )
2017-04-21 12:27:17 +02:00
static inline bool kvm_mwait_in_guest ( void )
{
unsigned int eax , ebx , ecx , edx ;
if ( ! cpu_has ( & boot_cpu_data , X86_FEATURE_MWAIT ) )
return false ;
switch ( boot_cpu_data . x86_vendor ) {
case X86_VENDOR_AMD :
/* All AMD CPUs have a working MWAIT implementation */
return true ;
case X86_VENDOR_INTEL :
/* Handle Intel below */
break ;
default :
return false ;
}
/*
* Intel CPUs without CPUID5_ECX_INTERRUPT_BREAK are problematic as
* they would allow guest to stop the CPU completely by disabling
* interrupts then invoking MWAIT .
*/
if ( boot_cpu_data . cpuid_level < CPUID_MWAIT_LEAF )
return false ;
cpuid ( CPUID_MWAIT_LEAF , & eax , & ebx , & ecx , & edx ) ;
if ( ! ( ecx & CPUID5_ECX_INTERRUPT_BREAK ) )
return false ;
return true ;
}
2008-07-03 14:59:22 +03:00
# endif