2011-11-23 18:30:32 +04:00
# ifndef ARCH_X86_KVM_CPUID_H
# define ARCH_X86_KVM_CPUID_H
# include "x86.h"
2015-11-23 13:12:22 +03:00
# include <asm/cpu.h>
2011-11-23 18:30:32 +04:00
2014-09-16 16:10:03 +04:00
int kvm_update_cpuid ( struct kvm_vcpu * vcpu ) ;
2016-03-08 11:52:13 +03:00
bool kvm_mpx_supported ( void ) ;
2011-11-23 18:30:32 +04:00
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry ( struct kvm_vcpu * vcpu ,
u32 function , u32 index ) ;
2013-09-22 18:44:50 +04:00
int kvm_dev_ioctl_get_cpuid ( struct kvm_cpuid2 * cpuid ,
struct kvm_cpuid_entry2 __user * entries ,
unsigned int type ) ;
2011-11-23 18:30:32 +04:00
int kvm_vcpu_ioctl_set_cpuid ( struct kvm_vcpu * vcpu ,
struct kvm_cpuid * cpuid ,
struct kvm_cpuid_entry __user * entries ) ;
int kvm_vcpu_ioctl_set_cpuid2 ( struct kvm_vcpu * vcpu ,
struct kvm_cpuid2 * cpuid ,
struct kvm_cpuid_entry2 __user * entries ) ;
int kvm_vcpu_ioctl_get_cpuid2 ( struct kvm_vcpu * vcpu ,
struct kvm_cpuid2 * cpuid ,
struct kvm_cpuid_entry2 __user * entries ) ;
2012-06-07 15:07:48 +04:00
void kvm_cpuid ( struct kvm_vcpu * vcpu , u32 * eax , u32 * ebx , u32 * ecx , u32 * edx ) ;
2011-11-23 18:30:32 +04:00
2015-03-29 23:56:12 +03:00
int cpuid_query_maxphyaddr ( struct kvm_vcpu * vcpu ) ;
static inline int cpuid_maxphyaddr ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . maxphyaddr ;
}
2011-11-23 18:30:32 +04:00
static inline bool guest_cpuid_has_xsave ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
2012-11-06 22:24:07 +04:00
if ( ! static_cpu_has ( X86_FEATURE_XSAVE ) )
2015-03-31 02:46:09 +03:00
return false ;
2012-11-06 22:24:07 +04:00
2011-11-23 18:30:32 +04:00
best = kvm_find_cpuid_entry ( vcpu , 1 , 0 ) ;
return best & & ( best - > ecx & bit ( X86_FEATURE_XSAVE ) ) ;
}
2015-12-22 17:20:00 +03:00
static inline bool guest_cpuid_has_mtrr ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 1 , 0 ) ;
return best & & ( best - > edx & bit ( X86_FEATURE_MTRR ) ) ;
}
2012-11-30 00:42:50 +04:00
static inline bool guest_cpuid_has_tsc_adjust ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 7 , 0 ) ;
return best & & ( best - > ebx & bit ( X86_FEATURE_TSC_ADJUST ) ) ;
}
2011-11-23 18:30:32 +04:00
static inline bool guest_cpuid_has_smep ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 7 , 0 ) ;
return best & & ( best - > ebx & bit ( X86_FEATURE_SMEP ) ) ;
}
2014-04-01 13:46:34 +04:00
static inline bool guest_cpuid_has_smap ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 7 , 0 ) ;
return best & & ( best - > ebx & bit ( X86_FEATURE_SMAP ) ) ;
}
2011-11-23 18:30:32 +04:00
static inline bool guest_cpuid_has_fsgsbase ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 7 , 0 ) ;
return best & & ( best - > ebx & bit ( X86_FEATURE_FSGSBASE ) ) ;
}
2016-03-22 11:51:21 +03:00
static inline bool guest_cpuid_has_pku ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 7 , 0 ) ;
return best & & ( best - > ecx & bit ( X86_FEATURE_PKU ) ) ;
}
2015-05-05 12:50:23 +03:00
static inline bool guest_cpuid_has_longmode ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x80000001 , 0 ) ;
return best & & ( best - > edx & bit ( X86_FEATURE_LM ) ) ;
}
2012-01-09 23:00:35 +04:00
static inline bool guest_cpuid_has_osvw ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x80000001 , 0 ) ;
return best & & ( best - > ecx & bit ( X86_FEATURE_OSVW ) ) ;
}
2012-07-02 05:18:48 +04:00
static inline bool guest_cpuid_has_pcid ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 1 , 0 ) ;
return best & & ( best - > ecx & bit ( X86_FEATURE_PCID ) ) ;
}
2014-01-24 19:48:44 +04:00
static inline bool guest_cpuid_has_x2apic ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 1 , 0 ) ;
return best & & ( best - > ecx & bit ( X86_FEATURE_X2APIC ) ) ;
}
2014-09-02 15:24:12 +04:00
static inline bool guest_cpuid_is_amd ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0 , 0 ) ;
return best & & best - > ebx = = X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx ;
}
2014-05-07 16:32:50 +04:00
static inline bool guest_cpuid_has_gbpages ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x80000001 , 0 ) ;
return best & & ( best - > edx & bit ( X86_FEATURE_GBPAGES ) ) ;
}
2014-07-15 18:37:46 +04:00
static inline bool guest_cpuid_has_rtm ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 7 , 0 ) ;
return best & & ( best - > ebx & bit ( X86_FEATURE_RTM ) ) ;
}
2015-05-20 23:41:25 +03:00
2015-09-09 09:05:57 +03:00
static inline bool guest_cpuid_has_rdtscp ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x80000001 , 0 ) ;
return best & & ( best - > edx & bit ( X86_FEATURE_RDTSCP ) ) ;
}
2015-10-14 16:10:54 +03:00
/*
* NRIPS is provided through cpuidfn 0x8000000a . edx bit 3
*/
# define BIT_NRIPS 3
static inline bool guest_cpuid_has_nrips ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x8000000a , 0 ) ;
/*
* NRIPS is a scattered cpuid feature , so we can ' t use
* X86_FEATURE_NRIPS here ( X86_FEATURE_NRIPS would be bit
* position 8 , not 3 ) .
*/
return best & & ( best - > edx & bit ( BIT_NRIPS ) ) ;
}
# undef BIT_NRIPS
2015-11-23 13:12:22 +03:00
static inline int guest_cpuid_family ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x1 , 0 ) ;
if ( ! best )
return - 1 ;
return x86_family ( best - > eax ) ;
}
static inline int guest_cpuid_model ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x1 , 0 ) ;
if ( ! best )
return - 1 ;
return x86_model ( best - > eax ) ;
}
static inline int guest_cpuid_stepping ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x1 , 0 ) ;
if ( ! best )
return - 1 ;
return x86_stepping ( best - > eax ) ;
}
2011-11-23 18:30:32 +04:00
# endif