2007-07-21 19:10:01 +04:00
/*
* Copyright 2006 Andi Kleen , SUSE Labs .
* Subject to the GNU Public License , v .2
*
2011-05-23 17:31:30 +04:00
* Fast user context implementation of clock_gettime , gettimeofday , and time .
2007-07-21 19:10:01 +04:00
*
* The code should have no internal unresolved relocations .
* Check with readelf after changing .
*/
2008-11-12 15:17:38 +03:00
/* Disable profiling for userspace code: */
2008-11-12 23:24:24 +03:00
# define DISABLE_BRANCH_PROFILING
2008-11-12 15:17:38 +03:00
2007-07-21 19:10:01 +04:00
# include <linux/kernel.h>
# include <linux/posix-timers.h>
# include <linux/time.h>
# include <linux/string.h>
# include <asm/vsyscall.h>
2011-07-14 14:47:22 +04:00
# include <asm/fixmap.h>
2007-07-21 19:10:01 +04:00
# include <asm/vgtod.h>
# include <asm/timex.h>
# include <asm/hpet.h>
# include <asm/unistd.h>
# include <asm/io.h>
2011-05-23 17:31:24 +04:00
# define gtod (&VVAR(vsyscall_gtod_data))
2007-07-21 19:10:01 +04:00
2011-07-14 14:47:22 +04:00
notrace static cycle_t vread_tsc ( void )
{
cycle_t ret ;
u64 last ;
/*
* Empirically , a fence ( of type that depends on the CPU )
* before rdtsc is enough to ensure that rdtsc is ordered
* with respect to loads . The various CPU manuals are unclear
* as to whether rdtsc can be reordered with later loads ,
* but no one has ever seen it happen .
*/
rdtsc_barrier ( ) ;
ret = ( cycle_t ) vget_cycles ( ) ;
last = VVAR ( vsyscall_gtod_data ) . clock . cycle_last ;
if ( likely ( ret > = last ) )
return ret ;
/*
* GCC likes to generate cmov here , but this branch is extremely
* predictable ( it ' s just a funciton of time and the likely is
* very likely ) and there ' s a data dependence , so force GCC
* to generate a branch instead . I don ' t barrier ( ) because
* we don ' t actually need a barrier , and if this function
* ever gets inlined it will generate worse code .
*/
asm volatile ( " " ) ;
return last ;
}
static notrace cycle_t vread_hpet ( void )
{
return readl ( ( const void __iomem * ) fix_to_virt ( VSYSCALL_HPET ) + 0xf0 ) ;
}
2008-05-12 23:20:41 +04:00
notrace static long vdso_fallback_gettime ( long clock , struct timespec * ts )
2007-07-21 19:10:01 +04:00
{
long ret ;
asm ( " syscall " : " =a " ( ret ) :
" 0 " ( __NR_clock_gettime ) , " D " ( clock ) , " S " ( ts ) : " memory " ) ;
return ret ;
}
2008-05-12 23:20:41 +04:00
notrace static inline long vgetns ( void )
2007-07-21 19:10:01 +04:00
{
2007-09-11 16:02:09 +04:00
long v ;
2011-07-14 14:47:22 +04:00
cycles_t cycles ;
if ( gtod - > clock . vclock_mode = = VCLOCK_TSC )
cycles = vread_tsc ( ) ;
else
cycles = vread_hpet ( ) ;
v = ( cycles - gtod - > clock . cycle_last ) & gtod - > clock . mask ;
2007-09-11 16:02:09 +04:00
return ( v * gtod - > clock . mult ) > > gtod - > clock . shift ;
2007-07-21 19:10:01 +04:00
}
2008-05-12 23:20:41 +04:00
notrace static noinline int do_realtime ( struct timespec * ts )
2007-07-21 19:10:01 +04:00
{
unsigned long seq , ns ;
do {
seq = read_seqbegin ( & gtod - > lock ) ;
ts - > tv_sec = gtod - > wall_time_sec ;
ts - > tv_nsec = gtod - > wall_time_nsec ;
ns = vgetns ( ) ;
} while ( unlikely ( read_seqretry ( & gtod - > lock , seq ) ) ) ;
timespec_add_ns ( ts , ns ) ;
return 0 ;
}
2008-05-12 23:20:41 +04:00
notrace static noinline int do_monotonic ( struct timespec * ts )
2007-07-21 19:10:01 +04:00
{
unsigned long seq , ns , secs ;
do {
seq = read_seqbegin ( & gtod - > lock ) ;
secs = gtod - > wall_time_sec ;
ns = gtod - > wall_time_nsec + vgetns ( ) ;
secs + = gtod - > wall_to_monotonic . tv_sec ;
ns + = gtod - > wall_to_monotonic . tv_nsec ;
} while ( unlikely ( read_seqretry ( & gtod - > lock , seq ) ) ) ;
2011-05-23 17:31:27 +04:00
/* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
* are all guaranteed to be nonnegative .
*/
while ( ns > = NSEC_PER_SEC ) {
ns - = NSEC_PER_SEC ;
+ + secs ;
}
ts - > tv_sec = secs ;
ts - > tv_nsec = ns ;
2007-07-21 19:10:01 +04:00
return 0 ;
}
2009-08-20 06:13:34 +04:00
notrace static noinline int do_realtime_coarse ( struct timespec * ts )
{
unsigned long seq ;
do {
seq = read_seqbegin ( & gtod - > lock ) ;
ts - > tv_sec = gtod - > wall_time_coarse . tv_sec ;
ts - > tv_nsec = gtod - > wall_time_coarse . tv_nsec ;
} while ( unlikely ( read_seqretry ( & gtod - > lock , seq ) ) ) ;
return 0 ;
}
notrace static noinline int do_monotonic_coarse ( struct timespec * ts )
{
unsigned long seq , ns , secs ;
do {
seq = read_seqbegin ( & gtod - > lock ) ;
secs = gtod - > wall_time_coarse . tv_sec ;
ns = gtod - > wall_time_coarse . tv_nsec ;
secs + = gtod - > wall_to_monotonic . tv_sec ;
ns + = gtod - > wall_to_monotonic . tv_nsec ;
} while ( unlikely ( read_seqretry ( & gtod - > lock , seq ) ) ) ;
2011-05-23 17:31:27 +04:00
/* wall_time_nsec and wall_to_monotonic.tv_nsec are
* guaranteed to be between 0 and NSEC_PER_SEC .
*/
if ( ns > = NSEC_PER_SEC ) {
ns - = NSEC_PER_SEC ;
+ + secs ;
}
ts - > tv_sec = secs ;
ts - > tv_nsec = ns ;
2009-08-20 06:13:34 +04:00
return 0 ;
}
2008-05-12 23:20:41 +04:00
notrace int __vdso_clock_gettime ( clockid_t clock , struct timespec * ts )
2007-07-21 19:10:01 +04:00
{
2011-06-05 21:50:20 +04:00
switch ( clock ) {
case CLOCK_REALTIME :
2011-07-14 14:47:22 +04:00
if ( likely ( gtod - > clock . vclock_mode ! = VCLOCK_NONE ) )
2011-06-05 21:50:20 +04:00
return do_realtime ( ts ) ;
break ;
case CLOCK_MONOTONIC :
2011-07-14 14:47:22 +04:00
if ( likely ( gtod - > clock . vclock_mode ! = VCLOCK_NONE ) )
2011-06-05 21:50:20 +04:00
return do_monotonic ( ts ) ;
break ;
case CLOCK_REALTIME_COARSE :
return do_realtime_coarse ( ts ) ;
case CLOCK_MONOTONIC_COARSE :
return do_monotonic_coarse ( ts ) ;
}
2007-07-21 19:10:01 +04:00
return vdso_fallback_gettime ( clock , ts ) ;
}
int clock_gettime ( clockid_t , struct timespec * )
__attribute__ ( ( weak , alias ( " __vdso_clock_gettime " ) ) ) ;
2008-05-12 23:20:41 +04:00
notrace int __vdso_gettimeofday ( struct timeval * tv , struct timezone * tz )
2007-07-21 19:10:01 +04:00
{
long ret ;
2011-07-14 14:47:22 +04:00
if ( likely ( gtod - > clock . vclock_mode ! = VCLOCK_NONE ) ) {
2009-04-30 00:32:01 +04:00
if ( likely ( tv ! = NULL ) ) {
BUILD_BUG_ON ( offsetof ( struct timeval , tv_usec ) ! =
offsetof ( struct timespec , tv_nsec ) | |
sizeof ( * tv ) ! = sizeof ( struct timespec ) ) ;
do_realtime ( ( struct timespec * ) tv ) ;
tv - > tv_usec / = 1000 ;
}
2007-07-21 19:10:01 +04:00
if ( unlikely ( tz ! = NULL ) ) {
2008-05-15 03:10:42 +04:00
/* Avoid memcpy. Some old compilers fail to inline it */
tz - > tz_minuteswest = gtod - > sys_tz . tz_minuteswest ;
tz - > tz_dsttime = gtod - > sys_tz . tz_dsttime ;
2007-07-21 19:10:01 +04:00
}
return 0 ;
}
asm ( " syscall " : " =a " ( ret ) :
" 0 " ( __NR_gettimeofday ) , " D " ( tv ) , " S " ( tz ) : " memory " ) ;
return ret ;
}
int gettimeofday ( struct timeval * , struct timezone * )
__attribute__ ( ( weak , alias ( " __vdso_gettimeofday " ) ) ) ;
2011-05-23 17:31:30 +04:00
2011-06-05 21:50:20 +04:00
/*
* This will break when the xtime seconds get inaccurate , but that is
* unlikely
*/
2011-05-23 17:31:30 +04:00
notrace time_t __vdso_time ( time_t * t )
{
2011-05-23 17:31:31 +04:00
/* This is atomic on x86_64 so we don't need any locks. */
2011-06-05 21:50:20 +04:00
time_t result = ACCESS_ONCE ( VVAR ( vsyscall_gtod_data ) . wall_time_sec ) ;
2011-05-23 17:31:30 +04:00
if ( t )
* t = result ;
return result ;
}
int time ( time_t * t )
__attribute__ ( ( weak , alias ( " __vdso_time " ) ) ) ;