2008-06-27 21:58:02 +04:00
# ifndef ASM_KVM_CACHE_REGS_H
# define ASM_KVM_CACHE_REGS_H
2010-01-21 16:31:51 +03:00
# define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
# define KVM_POSSIBLE_CR4_GUEST_BITS \
( X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE )
2008-06-27 21:58:02 +04:00
static inline unsigned long kvm_register_read ( struct kvm_vcpu * vcpu ,
enum kvm_reg reg )
{
if ( ! test_bit ( reg , ( unsigned long * ) & vcpu - > arch . regs_avail ) )
kvm_x86_ops - > cache_reg ( vcpu , reg ) ;
return vcpu - > arch . regs [ reg ] ;
}
static inline void kvm_register_write ( struct kvm_vcpu * vcpu ,
enum kvm_reg reg ,
unsigned long val )
{
vcpu - > arch . regs [ reg ] = val ;
__set_bit ( reg , ( unsigned long * ) & vcpu - > arch . regs_dirty ) ;
__set_bit ( reg , ( unsigned long * ) & vcpu - > arch . regs_avail ) ;
}
static inline unsigned long kvm_rip_read ( struct kvm_vcpu * vcpu )
{
return kvm_register_read ( vcpu , VCPU_REGS_RIP ) ;
}
static inline void kvm_rip_write ( struct kvm_vcpu * vcpu , unsigned long val )
{
kvm_register_write ( vcpu , VCPU_REGS_RIP , val ) ;
}
2009-05-31 23:58:47 +04:00
static inline u64 kvm_pdptr_read ( struct kvm_vcpu * vcpu , int index )
{
2010-05-04 14:00:55 +04:00
might_sleep ( ) ; /* on svm */
2009-05-31 23:58:47 +04:00
if ( ! test_bit ( VCPU_EXREG_PDPTR ,
( unsigned long * ) & vcpu - > arch . regs_avail ) )
kvm_x86_ops - > cache_reg ( vcpu , VCPU_EXREG_PDPTR ) ;
2010-09-10 19:30:57 +04:00
return vcpu - > arch . walk_mmu - > pdptrs [ index ] ;
2009-05-31 23:58:47 +04:00
}
2009-12-29 19:07:30 +03:00
static inline ulong kvm_read_cr0_bits ( struct kvm_vcpu * vcpu , ulong mask )
{
2010-01-21 16:31:51 +03:00
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS ;
if ( tmask & vcpu - > arch . cr0_guest_owned_bits )
2009-12-29 19:43:06 +03:00
kvm_x86_ops - > decache_cr0_guest_bits ( vcpu ) ;
2009-12-29 19:07:30 +03:00
return vcpu - > arch . cr0 & mask ;
}
static inline ulong kvm_read_cr0 ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr0_bits ( vcpu , ~ 0UL ) ;
}
2009-12-07 13:16:48 +03:00
static inline ulong kvm_read_cr4_bits ( struct kvm_vcpu * vcpu , ulong mask )
{
2010-01-21 16:31:51 +03:00
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS ;
if ( tmask & vcpu - > arch . cr4_guest_owned_bits )
2009-12-07 13:16:48 +03:00
kvm_x86_ops - > decache_cr4_guest_bits ( vcpu ) ;
return vcpu - > arch . cr4 & mask ;
}
2010-12-05 18:30:00 +03:00
static inline ulong kvm_read_cr3 ( struct kvm_vcpu * vcpu )
{
2010-12-05 19:56:11 +03:00
if ( ! test_bit ( VCPU_EXREG_CR3 , ( ulong * ) & vcpu - > arch . regs_avail ) )
kvm_x86_ops - > decache_cr3 ( vcpu ) ;
2010-12-05 18:30:00 +03:00
return vcpu - > arch . cr3 ;
}
2009-12-07 13:16:48 +03:00
static inline ulong kvm_read_cr4 ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr4_bits ( vcpu , ~ 0UL ) ;
}
2010-06-10 07:27:12 +04:00
static inline u64 kvm_read_edx_eax ( struct kvm_vcpu * vcpu )
{
return ( kvm_register_read ( vcpu , VCPU_REGS_RAX ) & - 1u )
| ( ( u64 ) ( kvm_register_read ( vcpu , VCPU_REGS_RDX ) & - 1u ) < < 32 ) ;
}
2016-03-22 11:51:20 +03:00
static inline u32 kvm_read_pkru ( struct kvm_vcpu * vcpu )
{
return kvm_x86_ops - > get_pkru ( vcpu ) ;
}
2010-11-29 19:51:47 +03:00
static inline void enter_guest_mode ( struct kvm_vcpu * vcpu )
{
vcpu - > arch . hflags | = HF_GUEST_MASK ;
}
static inline void leave_guest_mode ( struct kvm_vcpu * vcpu )
{
vcpu - > arch . hflags & = ~ HF_GUEST_MASK ;
}
static inline bool is_guest_mode ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . hflags & HF_GUEST_MASK ;
}
2015-04-01 16:06:40 +03:00
static inline bool is_smm ( struct kvm_vcpu * vcpu )
{
return vcpu - > arch . hflags & HF_SMM_MASK ;
}
2008-06-27 21:58:02 +04:00
# endif