2008-10-22 22:26:29 -07:00
# ifndef _ASM_X86_PVCLOCK_H
# define _ASM_X86_PVCLOCK_H
2008-06-03 16:17:29 +02:00
# include <linux/clocksource.h>
# include <asm/pvclock-abi.h>
2015-12-29 20:12:18 -08:00
# ifdef CONFIG_KVM_GUEST
2015-12-10 19:20:20 -08:00
extern struct pvclock_vsyscall_time_info * pvclock_pvti_cpu0_va ( void ) ;
# else
static inline struct pvclock_vsyscall_time_info * pvclock_pvti_cpu0_va ( void )
{
return NULL ;
}
# endif
2008-06-03 16:17:29 +02:00
/* some helper functions for xen and kvm pv clock sources */
2016-12-21 20:32:01 +01:00
u64 pvclock_clocksource_read ( struct pvclock_vcpu_time_info * src ) ;
2012-11-27 23:28:52 -02:00
u8 pvclock_read_flags ( struct pvclock_vcpu_time_info * src ) ;
2010-05-11 12:17:39 -04:00
void pvclock_set_flags ( u8 flags ) ;
2008-07-28 11:47:52 -03:00
unsigned long pvclock_tsc_khz ( struct pvclock_vcpu_time_info * src ) ;
2008-06-03 16:17:29 +02:00
void pvclock_read_wallclock ( struct pvclock_wall_clock * wall ,
struct pvclock_vcpu_time_info * vcpu ,
struct timespec * ts ) ;
2010-10-25 16:53:46 -07:00
void pvclock_resume ( void ) ;
2008-06-03 16:17:29 +02:00
2013-10-11 21:39:25 -03:00
void pvclock_touch_watchdogs ( void ) ;
2016-06-09 13:06:08 +02:00
static __always_inline
unsigned pvclock_read_begin ( const struct pvclock_vcpu_time_info * src )
{
unsigned version = src - > version & ~ 1 ;
/* Make sure that the version is read before the data. */
virt_rmb ( ) ;
return version ;
}
static __always_inline
bool pvclock_read_retry ( const struct pvclock_vcpu_time_info * src ,
unsigned version )
{
/* Make sure that the version is re-read after the data. */
virt_rmb ( ) ;
return unlikely ( version ! = src - > version ) ;
}
2010-08-19 22:07:29 -10:00
/*
* Scale a 64 - bit delta by scaling and multiplying by a 32 - bit fraction ,
* yielding a 64 - bit result .
*/
static inline u64 pvclock_scale_delta ( u64 delta , u32 mul_frac , int shift )
{
u64 product ;
# ifdef __i386__
u32 tmp1 , tmp2 ;
2011-06-15 20:50:04 -07:00
# else
ulong tmp ;
2010-08-19 22:07:29 -10:00
# endif
if ( shift < 0 )
delta > > = - shift ;
else
delta < < = shift ;
# ifdef __i386__
__asm__ (
" mul %5 ; "
" mov %4,%%eax ; "
" mov %%edx,%4 ; "
" mul %5 ; "
" xor %5,%5 ; "
" add %4,%%eax ; "
" adc %5,%%edx ; "
: " =A " ( product ) , " =r " ( tmp1 ) , " =r " ( tmp2 )
: " a " ( ( u32 ) delta ) , " 1 " ( ( u32 ) ( delta > > 32 ) ) , " 2 " ( mul_frac ) ) ;
# elif defined(__x86_64__)
__asm__ (
2011-08-30 10:58:22 +02:00
" mulq %[mul_frac] ; shrd $32, %[hi], %[lo] "
2011-06-15 20:50:04 -07:00
: [ lo ] " =a " ( product ) ,
[ hi ] " =d " ( tmp )
: " 0 " ( delta ) ,
[ mul_frac ] " rm " ( ( u64 ) mul_frac ) ) ;
2010-08-19 22:07:29 -10:00
# else
# error implement me!
# endif
return product ;
}
2012-11-27 23:28:51 -02:00
static __always_inline
2016-12-21 20:32:01 +01:00
u64 __pvclock_read_cycles ( const struct pvclock_vcpu_time_info * src , u64 tsc )
2012-11-27 23:28:51 -02:00
{
2016-09-01 14:21:03 +02:00
u64 delta = tsc - src - > tsc_timestamp ;
2016-12-21 20:32:01 +01:00
u64 offset = pvclock_scale_delta ( delta , src - > tsc_to_system_mul ,
2016-06-09 13:06:08 +02:00
src - > tsc_shift ) ;
return src - > system_time + offset ;
2012-11-27 23:28:51 -02:00
}
2012-11-27 23:28:55 -02:00
struct pvclock_vsyscall_time_info {
struct pvclock_vcpu_time_info pvti ;
} __attribute__ ( ( __aligned__ ( SMP_CACHE_BYTES ) ) ) ;
# define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
2008-10-22 22:26:29 -07:00
# endif /* _ASM_X86_PVCLOCK_H */