2008-06-27 21:58:02 +04:00
# ifndef ASM_KVM_CACHE_REGS_H
# define ASM_KVM_CACHE_REGS_H
2010-01-21 16:31:51 +03:00
# define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
# define KVM_POSSIBLE_CR4_GUEST_BITS \
( X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE )
2008-06-27 21:58:02 +04:00
static inline unsigned long kvm_register_read ( struct kvm_vcpu * vcpu ,
enum kvm_reg reg )
{
if ( ! test_bit ( reg , ( unsigned long * ) & vcpu - > arch . regs_avail ) )
kvm_x86_ops - > cache_reg ( vcpu , reg ) ;
return vcpu - > arch . regs [ reg ] ;
}
static inline void kvm_register_write ( struct kvm_vcpu * vcpu ,
enum kvm_reg reg ,
unsigned long val )
{
vcpu - > arch . regs [ reg ] = val ;
__set_bit ( reg , ( unsigned long * ) & vcpu - > arch . regs_dirty ) ;
__set_bit ( reg , ( unsigned long * ) & vcpu - > arch . regs_avail ) ;
}
static inline unsigned long kvm_rip_read ( struct kvm_vcpu * vcpu )
{
return kvm_register_read ( vcpu , VCPU_REGS_RIP ) ;
}
static inline void kvm_rip_write ( struct kvm_vcpu * vcpu , unsigned long val )
{
kvm_register_write ( vcpu , VCPU_REGS_RIP , val ) ;
}
2009-05-31 23:58:47 +04:00
static inline u64 kvm_pdptr_read ( struct kvm_vcpu * vcpu , int index )
{
2010-05-04 14:00:55 +04:00
might_sleep ( ) ; /* on svm */
2009-05-31 23:58:47 +04:00
if ( ! test_bit ( VCPU_EXREG_PDPTR ,
( unsigned long * ) & vcpu - > arch . regs_avail ) )
kvm_x86_ops - > cache_reg ( vcpu , VCPU_EXREG_PDPTR ) ;
return vcpu - > arch . pdptrs [ index ] ;
}
2009-12-29 19:07:30 +03:00
static inline ulong kvm_read_cr0_bits ( struct kvm_vcpu * vcpu , ulong mask )
{
2010-01-21 16:31:51 +03:00
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS ;
if ( tmask & vcpu - > arch . cr0_guest_owned_bits )
2009-12-29 19:43:06 +03:00
kvm_x86_ops - > decache_cr0_guest_bits ( vcpu ) ;
2009-12-29 19:07:30 +03:00
return vcpu - > arch . cr0 & mask ;
}
static inline ulong kvm_read_cr0 ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr0_bits ( vcpu , ~ 0UL ) ;
}
2009-12-07 13:16:48 +03:00
static inline ulong kvm_read_cr4_bits ( struct kvm_vcpu * vcpu , ulong mask )
{
2010-01-21 16:31:51 +03:00
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS ;
if ( tmask & vcpu - > arch . cr4_guest_owned_bits )
2009-12-07 13:16:48 +03:00
kvm_x86_ops - > decache_cr4_guest_bits ( vcpu ) ;
return vcpu - > arch . cr4 & mask ;
}
static inline ulong kvm_read_cr4 ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr4_bits ( vcpu , ~ 0UL ) ;
}
2008-06-27 21:58:02 +04:00
# endif