2011-11-23 16:30:32 +02:00
# ifndef ARCH_X86_KVM_CPUID_H
# define ARCH_X86_KVM_CPUID_H
# include "x86.h"
2015-11-23 11:12:22 +01:00
# include <asm/cpu.h>
2017-08-05 00:12:49 +02:00
# include <asm/processor.h>
2011-11-23 16:30:32 +02:00
2014-09-16 15:10:03 +03:00
int kvm_update_cpuid ( struct kvm_vcpu * vcpu ) ;
2016-03-08 09:52:13 +01:00
bool kvm_mpx_supported ( void ) ;
2011-11-23 16:30:32 +02:00
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry ( struct kvm_vcpu * vcpu ,
u32 function , u32 index ) ;
2013-09-22 16:44:50 +02:00
int kvm_dev_ioctl_get_cpuid ( struct kvm_cpuid2 * cpuid ,
struct kvm_cpuid_entry2 __user * entries ,
unsigned int type ) ;
2011-11-23 16:30:32 +02:00
int kvm_vcpu_ioctl_set_cpuid ( struct kvm_vcpu * vcpu ,
struct kvm_cpuid * cpuid ,
struct kvm_cpuid_entry __user * entries ) ;
int kvm_vcpu_ioctl_set_cpuid2 ( struct kvm_vcpu * vcpu ,
struct kvm_cpuid2 * cpuid ,
struct kvm_cpuid_entry2 __user * entries ) ;
int kvm_vcpu_ioctl_get_cpuid2 ( struct kvm_vcpu * vcpu ,
struct kvm_cpuid2 * cpuid ,
struct kvm_cpuid_entry2 __user * entries ) ;
2012-06-07 14:07:48 +03:00
void kvm_cpuid ( struct kvm_vcpu * vcpu , u32 * eax , u32 * ebx , u32 * ecx , u32 * edx ) ;
2011-11-23 16:30:32 +02:00
2015-03-29 23:56:12 +03:00
int cpuid_query_maxphyaddr ( struct kvm_vcpu * vcpu ) ;
static inline int cpuid_maxphyaddr ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . maxphyaddr ;
}
2011-11-23 16:30:32 +02:00
2017-08-05 00:12:49 +02:00
struct cpuid_reg {
u32 function ;
u32 index ;
int reg ;
} ;
2011-11-23 16:30:32 +02:00
2017-08-05 00:12:49 +02:00
static const struct cpuid_reg reverse_cpuid [ ] = {
[ CPUID_1_EDX ] = { 1 , 0 , CPUID_EDX } ,
[ CPUID_8000_0001_EDX ] = { 0x80000001 , 0 , CPUID_EDX } ,
[ CPUID_8086_0001_EDX ] = { 0x80860001 , 0 , CPUID_EDX } ,
[ CPUID_1_ECX ] = { 1 , 0 , CPUID_ECX } ,
[ CPUID_C000_0001_EDX ] = { 0xc0000001 , 0 , CPUID_EDX } ,
[ CPUID_8000_0001_ECX ] = { 0xc0000001 , 0 , CPUID_ECX } ,
[ CPUID_7_0_EBX ] = { 7 , 0 , CPUID_EBX } ,
[ CPUID_D_1_EAX ] = { 0xd , 1 , CPUID_EAX } ,
[ CPUID_F_0_EDX ] = { 0xf , 0 , CPUID_EDX } ,
[ CPUID_F_1_EDX ] = { 0xf , 1 , CPUID_EDX } ,
[ CPUID_8000_0008_EBX ] = { 0x80000008 , 0 , CPUID_EBX } ,
[ CPUID_6_EAX ] = { 6 , 0 , CPUID_EAX } ,
[ CPUID_8000_000A_EDX ] = { 0x8000000a , 0 , CPUID_EDX } ,
[ CPUID_7_ECX ] = { 7 , 0 , CPUID_ECX } ,
[ CPUID_8000_0007_EBX ] = { 0x80000007 , 0 , CPUID_EBX } ,
} ;
2011-11-23 16:30:32 +02:00
2017-08-05 00:12:49 +02:00
static __always_inline struct cpuid_reg x86_feature_cpuid ( unsigned x86_feature )
2015-12-22 15:20:00 +01:00
{
2017-08-05 00:12:49 +02:00
unsigned x86_leaf = x86_feature / 32 ;
2015-12-22 15:20:00 +01:00
2017-08-05 00:12:49 +02:00
BUILD_BUG_ON ( ! __builtin_constant_p ( x86_leaf ) ) ;
BUILD_BUG_ON ( x86_leaf > = ARRAY_SIZE ( reverse_cpuid ) ) ;
BUILD_BUG_ON ( reverse_cpuid [ x86_leaf ] . function = = 0 ) ;
2015-12-22 15:20:00 +01:00
2017-08-05 00:12:49 +02:00
return reverse_cpuid [ x86_leaf ] ;
2011-11-23 16:30:32 +02:00
}
2017-08-05 00:12:49 +02:00
static __always_inline int * guest_cpuid_get_register ( struct kvm_vcpu * vcpu , unsigned x86_feature )
2014-04-01 17:46:34 +08:00
{
2017-08-05 00:12:49 +02:00
struct kvm_cpuid_entry2 * entry ;
const struct cpuid_reg cpuid = x86_feature_cpuid ( x86_feature ) ;
2015-05-05 11:50:23 +02:00
2017-08-05 00:12:49 +02:00
entry = kvm_find_cpuid_entry ( vcpu , cpuid . function , cpuid . index ) ;
if ( ! entry )
return NULL ;
2012-01-09 14:00:35 -05:00
2017-08-05 00:12:49 +02:00
switch ( cpuid . reg ) {
case CPUID_EAX :
return & entry - > eax ;
case CPUID_EBX :
return & entry - > ebx ;
case CPUID_ECX :
return & entry - > ecx ;
case CPUID_EDX :
return & entry - > edx ;
default :
BUILD_BUG ( ) ;
return NULL ;
}
2012-01-09 14:00:35 -05:00
}
2017-08-05 00:12:49 +02:00
static __always_inline bool guest_cpuid_has ( struct kvm_vcpu * vcpu , unsigned x86_feature )
2012-07-02 01:18:48 +00:00
{
2017-08-05 00:12:49 +02:00
int * reg ;
2012-07-02 01:18:48 +00:00
2017-08-05 00:12:49 +02:00
if ( x86_feature = = X86_FEATURE_XSAVE & &
! static_cpu_has ( X86_FEATURE_XSAVE ) )
return false ;
2012-07-02 01:18:48 +00:00
2017-08-05 00:12:49 +02:00
reg = guest_cpuid_get_register ( vcpu , x86_feature ) ;
if ( ! reg )
return false ;
2014-01-24 16:48:44 +01:00
2017-08-05 00:12:49 +02:00
return * reg & bit ( x86_feature ) ;
2014-01-24 16:48:44 +01:00
}
2014-09-02 13:24:12 +02:00
static inline bool guest_cpuid_is_amd ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0 , 0 ) ;
return best & & best - > ebx = = X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx ;
}
2015-11-23 11:12:22 +01:00
static inline int guest_cpuid_family ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x1 , 0 ) ;
if ( ! best )
return - 1 ;
return x86_family ( best - > eax ) ;
}
static inline int guest_cpuid_model ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x1 , 0 ) ;
if ( ! best )
return - 1 ;
return x86_model ( best - > eax ) ;
}
static inline int guest_cpuid_stepping ( struct kvm_vcpu * vcpu )
{
struct kvm_cpuid_entry2 * best ;
best = kvm_find_cpuid_entry ( vcpu , 0x1 , 0 ) ;
if ( ! best )
return - 1 ;
return x86_stepping ( best - > eax ) ;
}
2017-03-20 01:16:28 -07:00
static inline bool supports_cpuid_fault ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT ;
}
static inline bool cpuid_fault_enabled ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . msr_misc_features_enables &
MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ;
}
2011-11-23 16:30:32 +02:00
# endif