2008-06-27 14:58:02 -03:00
# ifndef ASM_KVM_CACHE_REGS_H
# define ASM_KVM_CACHE_REGS_H
static inline unsigned long kvm_register_read ( struct kvm_vcpu * vcpu ,
enum kvm_reg reg )
{
if ( ! test_bit ( reg , ( unsigned long * ) & vcpu - > arch . regs_avail ) )
kvm_x86_ops - > cache_reg ( vcpu , reg ) ;
return vcpu - > arch . regs [ reg ] ;
}
static inline void kvm_register_write ( struct kvm_vcpu * vcpu ,
enum kvm_reg reg ,
unsigned long val )
{
vcpu - > arch . regs [ reg ] = val ;
__set_bit ( reg , ( unsigned long * ) & vcpu - > arch . regs_dirty ) ;
__set_bit ( reg , ( unsigned long * ) & vcpu - > arch . regs_avail ) ;
}
static inline unsigned long kvm_rip_read ( struct kvm_vcpu * vcpu )
{
return kvm_register_read ( vcpu , VCPU_REGS_RIP ) ;
}
static inline void kvm_rip_write ( struct kvm_vcpu * vcpu , unsigned long val )
{
kvm_register_write ( vcpu , VCPU_REGS_RIP , val ) ;
}
2009-05-31 22:58:47 +03:00
static inline u64 kvm_pdptr_read ( struct kvm_vcpu * vcpu , int index )
{
if ( ! test_bit ( VCPU_EXREG_PDPTR ,
( unsigned long * ) & vcpu - > arch . regs_avail ) )
kvm_x86_ops - > cache_reg ( vcpu , VCPU_EXREG_PDPTR ) ;
return vcpu - > arch . pdptrs [ index ] ;
}
2009-12-29 18:07:30 +02:00
static inline ulong kvm_read_cr0_bits ( struct kvm_vcpu * vcpu , ulong mask )
{
return vcpu - > arch . cr0 & mask ;
}
static inline ulong kvm_read_cr0 ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr0_bits ( vcpu , ~ 0UL ) ;
}
2009-12-07 12:16:48 +02:00
static inline ulong kvm_read_cr4_bits ( struct kvm_vcpu * vcpu , ulong mask )
{
if ( mask & vcpu - > arch . cr4_guest_owned_bits )
kvm_x86_ops - > decache_cr4_guest_bits ( vcpu ) ;
return vcpu - > arch . cr4 & mask ;
}
static inline ulong kvm_read_cr4 ( struct kvm_vcpu * vcpu )
{
return kvm_read_cr4_bits ( vcpu , ~ 0UL ) ;
}
2008-06-27 14:58:02 -03:00
# endif