2005-04-17 02:20:36 +04:00
/*
* linux / kernel / time . c
*
* Copyright ( C ) 1991 , 1992 Linus Torvalds
*
* This file contains the interface functions for the various
* time related system calls : time , stime , gettimeofday , settimeofday ,
* adjtime
*/
/*
* Modification history kernel / time . c
*
* 1993 - 09 - 02 Philip Gladstone
* Created file with time related functions from sched . c and adjtimex ( )
* 1993 - 10 - 08 Torsten Duwe
* adjtime interface update and CMOS clock write code
* 1995 - 08 - 13 Torsten Duwe
* kernel PLL updated to 1994 - 12 - 13 specs ( rfc - 1589 )
* 1999 - 01 - 16 Ulrich Windl
* Introduced error checking for many cases in adjtimex ( ) .
* Updated NTP code according to technical memorandum Jan ' 96
* " A Kernel Model for Precision Timekeeping " by Dave Mills
* Allow time_constant larger than MAXTC ( 6 ) for NTP v4 ( MAXTC = = 10 )
* ( Even though the technical memorandum forbids it )
* 2004 - 07 - 14 Christoph Lameter
* Added getnstimeofday to allow the posix timer functions to return
* with nanosecond accuracy
*/
# include <linux/module.h>
# include <linux/timex.h>
2006-01-11 23:17:46 +03:00
# include <linux/capability.h>
2005-04-17 02:20:36 +04:00
# include <linux/errno.h>
# include <linux/syscalls.h>
# include <linux/security.h>
# include <linux/fs.h>
# include <linux/module.h>
# include <asm/uaccess.h>
# include <asm/unistd.h>
/*
* The timezone where the local system is located . Used as a default by some
* programs who obtain this value by using gettimeofday .
*/
struct timezone sys_tz ;
EXPORT_SYMBOL ( sys_tz ) ;
# ifdef __ARCH_WANT_SYS_TIME
/*
* sys_time ( ) can be implemented in user - level using
* sys_gettimeofday ( ) . Is this for backwards compatibility ? If so ,
* why not move it into the appropriate arch directory ( for those
* architectures that need it ) .
*/
asmlinkage long sys_time ( time_t __user * tloc )
{
2007-07-16 10:41:18 +04:00
/*
* We read xtime . tv_sec atomically - it ' s updated
* atomically by update_wall_time ( ) , so no need to
* even read - lock the xtime seqlock :
*/
time_t i = xtime . tv_sec ;
2005-04-17 02:20:36 +04:00
2007-07-16 10:41:18 +04:00
smp_rmb ( ) ; /* sys_time() results are coherent */
2005-04-17 02:20:36 +04:00
if ( tloc ) {
2007-07-16 10:41:18 +04:00
if ( put_user ( i , tloc ) )
2005-04-17 02:20:36 +04:00
i = - EFAULT ;
}
return i ;
}
/*
* sys_stime ( ) can be implemented in user - level using
* sys_settimeofday ( ) . Is this for backwards compatibility ? If so ,
* why not move it into the appropriate arch directory ( for those
* architectures that need it ) .
*/
asmlinkage long sys_stime ( time_t __user * tptr )
{
struct timespec tv ;
int err ;
if ( get_user ( tv . tv_sec , tptr ) )
return - EFAULT ;
tv . tv_nsec = 0 ;
err = security_settime ( & tv , NULL ) ;
if ( err )
return err ;
do_settimeofday ( & tv ) ;
return 0 ;
}
# endif /* __ARCH_WANT_SYS_TIME */
asmlinkage long sys_gettimeofday ( struct timeval __user * tv , struct timezone __user * tz )
{
if ( likely ( tv ! = NULL ) ) {
struct timeval ktv ;
do_gettimeofday ( & ktv ) ;
if ( copy_to_user ( tv , & ktv , sizeof ( ktv ) ) )
return - EFAULT ;
}
if ( unlikely ( tz ! = NULL ) ) {
if ( copy_to_user ( tz , & sys_tz , sizeof ( sys_tz ) ) )
return - EFAULT ;
}
return 0 ;
}
/*
* Adjust the time obtained from the CMOS to be UTC time instead of
* local time .
*
* This is ugly , but preferable to the alternatives . Otherwise we
* would either need to write a program to do it in / etc / rc ( and risk
* confusion if the program gets run more than once ; it would also be
* hard to make the program warp the clock precisely n hours ) or
* compile in the timezone information into the kernel . Bad , bad . . . .
*
* - TYT , 1992 - 01 - 01
*
* The best thing to do is to keep the CMOS clock in universal time ( UTC )
* as real UNIX machines always do it . This avoids all headaches about
* daylight saving times and warping kernel clocks .
*/
2005-07-27 22:46:09 +04:00
static inline void warp_clock ( void )
2005-04-17 02:20:36 +04:00
{
write_seqlock_irq ( & xtime_lock ) ;
wall_to_monotonic . tv_sec - = sys_tz . tz_minuteswest * 60 ;
xtime . tv_sec + = sys_tz . tz_minuteswest * 60 ;
write_sequnlock_irq ( & xtime_lock ) ;
clock_was_set ( ) ;
}
/*
* In case for some reason the CMOS clock has not already been running
* in UTC , but in some local time : The first time we set the timezone ,
* we will warp the clock so that it is ticking UTC time instead of
* local time . Presumably , if someone is setting the timezone then we
* are running in an environment where the programs understand about
* timezones . This should be done at boot time in the / etc / rc script ,
* as soon as possible , so that the clock can be set right . Otherwise ,
* various programs will get confused when the clock gets warped .
*/
int do_sys_settimeofday ( struct timespec * tv , struct timezone * tz )
{
static int firsttime = 1 ;
int error = 0 ;
2006-01-31 21:16:55 +03:00
if ( tv & & ! timespec_valid ( tv ) )
2006-01-10 07:52:29 +03:00
return - EINVAL ;
2005-04-17 02:20:36 +04:00
error = security_settime ( tv , tz ) ;
if ( error )
return error ;
if ( tz ) {
/* SMP safe, global irq locking makes it work. */
sys_tz = * tz ;
if ( firsttime ) {
firsttime = 0 ;
if ( ! tv )
warp_clock ( ) ;
}
}
if ( tv )
{
/* SMP safe, again the code in arch/foo/time.c should
* globally block out interrupts when it runs .
*/
return do_settimeofday ( tv ) ;
}
return 0 ;
}
asmlinkage long sys_settimeofday ( struct timeval __user * tv ,
struct timezone __user * tz )
{
struct timeval user_tv ;
struct timespec new_ts ;
struct timezone new_tz ;
if ( tv ) {
if ( copy_from_user ( & user_tv , tv , sizeof ( * tv ) ) )
return - EFAULT ;
new_ts . tv_sec = user_tv . tv_sec ;
new_ts . tv_nsec = user_tv . tv_usec * NSEC_PER_USEC ;
}
if ( tz ) {
if ( copy_from_user ( & new_tz , tz , sizeof ( * tz ) ) )
return - EFAULT ;
}
return do_sys_settimeofday ( tv ? & new_ts : NULL , tz ? & new_tz : NULL ) ;
}
asmlinkage long sys_adjtimex ( struct timex __user * txc_p )
{
struct timex txc ; /* Local copy of parameter */
int ret ;
/* Copy the user data space into the kernel copy
* structure . But bear in mind that the structures
* may change
*/
if ( copy_from_user ( & txc , txc_p , sizeof ( struct timex ) ) )
return - EFAULT ;
ret = do_adjtimex ( & txc ) ;
return copy_to_user ( txc_p , & txc , sizeof ( struct timex ) ) ? - EFAULT : ret ;
}
inline struct timespec current_kernel_time ( void )
{
struct timespec now ;
unsigned long seq ;
do {
seq = read_seqbegin ( & xtime_lock ) ;
now = xtime ;
} while ( read_seqretry ( & xtime_lock , seq ) ) ;
return now ;
}
EXPORT_SYMBOL ( current_kernel_time ) ;
/**
* current_fs_time - Return FS time
* @ sb : Superblock .
*
2006-04-01 03:41:22 +04:00
* Return the current time truncated to the time granularity supported by
2005-04-17 02:20:36 +04:00
* the fs .
*/
struct timespec current_fs_time ( struct super_block * sb )
{
struct timespec now = current_kernel_time ( ) ;
return timespec_trunc ( now , sb - > s_time_gran ) ;
}
EXPORT_SYMBOL ( current_fs_time ) ;
Optimize timespec_trunc()
The first thing done by timespec_trunc() is :
if (gran <= jiffies_to_usecs(1) * 1000)
This should really be a test against a constant known at compile time.
Alas, it isnt. jiffies_to_usec() was unilined so C compiler emits a function
call and a multiply to compute : a CONSTANT.
mov $0x1,%edi
mov %rbx,0xffffffffffffffe8(%rbp)
mov %r12,0xfffffffffffffff0(%rbp)
mov %edx,%ebx
mov %rsi,0xffffffffffffffc8(%rbp)
mov %rsi,%r12
callq ffffffff80232010 <jiffies_to_usecs>
imul $0x3e8,%eax,%eax
cmp %ebx,%eax
This patch reorders kernel/time.c a bit so that jiffies_to_usecs() is defined
before timespec_trunc() so that compiler now generates :
cmp $0x3d0900,%edx (HZ=250 on my machine)
This gives a better code (timespec_trunc() becoming a leaf function), and
shorter kernel size as well.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-08 11:25:32 +04:00
/*
* Convert jiffies to milliseconds and back .
*
* Avoid unnecessary multiplications / divisions in the
* two most common HZ cases :
*/
unsigned int inline jiffies_to_msecs ( const unsigned long j )
{
# if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
return ( MSEC_PER_SEC / HZ ) * j ;
# elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
return ( j + ( HZ / MSEC_PER_SEC ) - 1 ) / ( HZ / MSEC_PER_SEC ) ;
# else
return ( j * MSEC_PER_SEC ) / HZ ;
# endif
}
EXPORT_SYMBOL ( jiffies_to_msecs ) ;
unsigned int inline jiffies_to_usecs ( const unsigned long j )
{
# if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return ( USEC_PER_SEC / HZ ) * j ;
# elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return ( j + ( HZ / USEC_PER_SEC ) - 1 ) / ( HZ / USEC_PER_SEC ) ;
# else
return ( j * USEC_PER_SEC ) / HZ ;
# endif
}
EXPORT_SYMBOL ( jiffies_to_usecs ) ;
2005-04-17 02:20:36 +04:00
/**
2006-04-01 03:41:22 +04:00
* timespec_trunc - Truncate timespec to a granularity
2005-04-17 02:20:36 +04:00
* @ t : Timespec
2006-04-01 03:41:22 +04:00
* @ gran : Granularity in ns .
2005-04-17 02:20:36 +04:00
*
2006-04-01 03:41:22 +04:00
* Truncate a timespec to a granularity . gran must be smaller than a second .
2005-04-17 02:20:36 +04:00
* Always rounds down .
*
* This function should be only used for timestamps returned by
* current_kernel_time ( ) or CURRENT_TIME , not with do_gettimeofday ( ) because
* it doesn ' t handle the better resolution of the later .
*/
struct timespec timespec_trunc ( struct timespec t , unsigned gran )
{
/*
* Division is pretty slow so avoid it for common cases .
* Currently current_kernel_time ( ) never returns better than
* jiffies resolution . Exploit that .
*/
if ( gran < = jiffies_to_usecs ( 1 ) * 1000 ) {
/* nothing */
} else if ( gran = = 1000000000 ) {
t . tv_nsec = 0 ;
} else {
t . tv_nsec - = t . tv_nsec % gran ;
}
return t ;
}
EXPORT_SYMBOL ( timespec_trunc ) ;
2006-06-26 11:25:08 +04:00
# ifndef CONFIG_GENERIC_TIME
2005-04-17 02:20:36 +04:00
/*
* Simulate gettimeofday using do_gettimeofday which only allows a timeval
* and therefore only yields usec accuracy
*/
void getnstimeofday ( struct timespec * tv )
{
struct timeval x ;
do_gettimeofday ( & x ) ;
tv - > tv_sec = x . tv_sec ;
tv - > tv_nsec = x . tv_usec * NSEC_PER_USEC ;
}
2005-10-15 02:59:03 +04:00
EXPORT_SYMBOL_GPL ( getnstimeofday ) ;
2005-04-17 02:20:36 +04:00
# endif
2006-01-10 07:52:22 +03:00
/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format , i . e . 1980 - 12 - 31 23 : 59 : 59
* = > year = 1980 , mon = 12 , day = 31 , hour = 23 , min = 59 , sec = 59.
*
* [ For the Julian calendar ( which was used in Russia before 1917 ,
* Britain & colonies before 1752 , anywhere else before 1582 ,
* and is still in use by some communities ) leave out the
* - year / 100 + year / 400 terms , and add 10. ]
*
* This algorithm was first published by Gauss ( I think ) .
*
* WARNING : this function will overflow on 2106 - 02 - 07 06 : 28 : 16 on
* machines were long is 32 - bit ! ( However , as time_t is signed , we
* will already get problems at other places on 2038 - 01 - 19 03 : 14 : 08 )
*/
unsigned long
2006-01-10 07:52:23 +03:00
mktime ( const unsigned int year0 , const unsigned int mon0 ,
const unsigned int day , const unsigned int hour ,
const unsigned int min , const unsigned int sec )
2006-01-10 07:52:22 +03:00
{
2006-01-10 07:52:23 +03:00
unsigned int mon = mon0 , year = year0 ;
/* 1..12 -> 11,12,1..10 */
if ( 0 > = ( int ) ( mon - = 2 ) ) {
mon + = 12 ; /* Puts Feb last since it has leap day */
2006-01-10 07:52:22 +03:00
year - = 1 ;
}
return ( ( ( ( unsigned long )
( year / 4 - year / 100 + year / 400 + 367 * mon / 12 + day ) +
year * 365 - 719499
) * 24 + hour /* now have hours */
) * 60 + min /* now have minutes */
) * 60 + sec ; /* finally seconds */
}
2006-01-10 07:52:24 +03:00
EXPORT_SYMBOL ( mktime ) ;
2006-01-10 07:52:22 +03:00
/**
* set_normalized_timespec - set timespec sec and nsec parts and normalize
*
* @ ts : pointer to timespec variable to be set
* @ sec : seconds to set
* @ nsec : nanoseconds to set
*
* Set seconds and nanoseconds field of a timespec variable and
* normalize to the timespec storage format
*
* Note : The tv_nsec part is always in the range of
* 0 < = tv_nsec < NSEC_PER_SEC
* For negative values only the tv_sec field is negative !
*/
2006-01-10 07:52:23 +03:00
void set_normalized_timespec ( struct timespec * ts , time_t sec , long nsec )
2006-01-10 07:52:22 +03:00
{
while ( nsec > = NSEC_PER_SEC ) {
nsec - = NSEC_PER_SEC ;
+ + sec ;
}
while ( nsec < 0 ) {
nsec + = NSEC_PER_SEC ;
- - sec ;
}
ts - > tv_sec = sec ;
ts - > tv_nsec = nsec ;
}
2006-01-10 07:52:30 +03:00
/**
* ns_to_timespec - Convert nanoseconds to timespec
* @ nsec : the nanoseconds value to be converted
*
* Returns the timespec representation of the nsec parameter .
*/
2006-03-26 13:38:11 +04:00
struct timespec ns_to_timespec ( const s64 nsec )
2006-01-10 07:52:30 +03:00
{
struct timespec ts ;
2006-02-03 14:04:20 +03:00
if ( ! nsec )
return ( struct timespec ) { 0 , 0 } ;
ts . tv_sec = div_long_long_rem_signed ( nsec , NSEC_PER_SEC , & ts . tv_nsec ) ;
if ( unlikely ( nsec < 0 ) )
set_normalized_timespec ( & ts , ts . tv_sec , ts . tv_nsec ) ;
2006-01-10 07:52:30 +03:00
return ts ;
}
2007-03-25 08:35:33 +04:00
EXPORT_SYMBOL ( ns_to_timespec ) ;
2006-01-10 07:52:30 +03:00
/**
* ns_to_timeval - Convert nanoseconds to timeval
* @ nsec : the nanoseconds value to be converted
*
* Returns the timeval representation of the nsec parameter .
*/
2006-03-26 13:38:11 +04:00
struct timeval ns_to_timeval ( const s64 nsec )
2006-01-10 07:52:30 +03:00
{
struct timespec ts = ns_to_timespec ( nsec ) ;
struct timeval tv ;
tv . tv_sec = ts . tv_sec ;
tv . tv_usec = ( suseconds_t ) ts . tv_nsec / 1000 ;
return tv ;
}
2007-04-20 03:16:32 +04:00
EXPORT_SYMBOL ( ns_to_timeval ) ;
2006-01-10 07:52:30 +03:00
2007-02-16 12:27:28 +03:00
/*
* When we convert to jiffies then we interpret incoming values
* the following way :
*
* - negative values mean ' infinite timeout ' ( MAX_JIFFY_OFFSET )
*
* - ' too large ' values [ that would result in larger than
* MAX_JIFFY_OFFSET values ] mean ' infinite timeout ' too .
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor
*
* We must also be careful about 32 - bit overflows .
*/
2007-02-16 12:27:27 +03:00
unsigned long msecs_to_jiffies ( const unsigned int m )
{
2007-02-16 12:27:28 +03:00
/*
* Negative value , means infinite timeout :
*/
if ( ( int ) m < 0 )
2007-02-16 12:27:27 +03:00
return MAX_JIFFY_OFFSET ;
2007-02-16 12:27:28 +03:00
2007-02-16 12:27:27 +03:00
# if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
2007-02-16 12:27:28 +03:00
/*
* HZ is equal to or smaller than 1000 , and 1000 is a nice
* round multiple of HZ , divide with the factor between them ,
* but round upwards :
*/
2007-02-16 12:27:27 +03:00
return ( m + ( MSEC_PER_SEC / HZ ) - 1 ) / ( MSEC_PER_SEC / HZ ) ;
# elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
2007-02-16 12:27:28 +03:00
/*
* HZ is larger than 1000 , and HZ is a nice round multiple of
* 1000 - simply multiply with the factor between them .
*
* But first make sure the multiplication result cannot
* overflow :
*/
if ( m > jiffies_to_msecs ( MAX_JIFFY_OFFSET ) )
return MAX_JIFFY_OFFSET ;
2007-02-16 12:27:27 +03:00
return m * ( HZ / MSEC_PER_SEC ) ;
# else
2007-02-16 12:27:28 +03:00
/*
* Generic case - multiply , round and divide . But first
* check that if we are doing a net multiplication , that
* we wouldnt overflow :
*/
if ( HZ > MSEC_PER_SEC & & m > jiffies_to_msecs ( MAX_JIFFY_OFFSET ) )
return MAX_JIFFY_OFFSET ;
2007-02-16 12:27:27 +03:00
return ( m * HZ + MSEC_PER_SEC - 1 ) / MSEC_PER_SEC ;
# endif
}
EXPORT_SYMBOL ( msecs_to_jiffies ) ;
unsigned long usecs_to_jiffies ( const unsigned int u )
{
if ( u > jiffies_to_usecs ( MAX_JIFFY_OFFSET ) )
return MAX_JIFFY_OFFSET ;
# if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return ( u + ( USEC_PER_SEC / HZ ) - 1 ) / ( USEC_PER_SEC / HZ ) ;
# elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return u * ( HZ / USEC_PER_SEC ) ;
# else
return ( u * HZ + USEC_PER_SEC - 1 ) / USEC_PER_SEC ;
# endif
}
EXPORT_SYMBOL ( usecs_to_jiffies ) ;
/*
* The TICK_NSEC - 1 rounds up the value to the next resolution . Note
* that a remainder subtract here would not do the right thing as the
* resolution values don ' t fall on second boundries . I . e . the line :
* nsec - = nsec % TICK_NSEC ; is NOT a correct resolution rounding .
*
* Rather , we just shift the bits off the right .
*
* The > > ( NSEC_JIFFIE_SC - SEC_JIFFIE_SC ) converts the scaled nsec
* value to a scaled second value .
*/
unsigned long
timespec_to_jiffies ( const struct timespec * value )
{
unsigned long sec = value - > tv_sec ;
long nsec = value - > tv_nsec + TICK_NSEC - 1 ;
if ( sec > = MAX_SEC_IN_JIFFIES ) {
sec = MAX_SEC_IN_JIFFIES ;
nsec = 0 ;
}
return ( ( ( u64 ) sec * SEC_CONVERSION ) +
( ( ( u64 ) nsec * NSEC_CONVERSION ) > >
( NSEC_JIFFIE_SC - SEC_JIFFIE_SC ) ) ) > > SEC_JIFFIE_SC ;
}
EXPORT_SYMBOL ( timespec_to_jiffies ) ;
void
jiffies_to_timespec ( const unsigned long jiffies , struct timespec * value )
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide .
*/
u64 nsec = ( u64 ) jiffies * TICK_NSEC ;
value - > tv_sec = div_long_long_rem ( nsec , NSEC_PER_SEC , & value - > tv_nsec ) ;
}
EXPORT_SYMBOL ( jiffies_to_timespec ) ;
/* Same for "timeval"
*
* Well , almost . The problem here is that the real system resolution is
* in nanoseconds and the value being converted is in micro seconds .
* Also for some machines ( those that use HZ = 1024 , in - particular ) ,
* there is a LARGE error in the tick size in microseconds .
* The solution we use is to do the rounding AFTER we convert the
* microsecond part . Thus the USEC_ROUND , the bits to be shifted off .
* Instruction wise , this should cost only an additional add with carry
* instruction above the way it was done above .
*/
unsigned long
timeval_to_jiffies ( const struct timeval * value )
{
unsigned long sec = value - > tv_sec ;
long usec = value - > tv_usec ;
if ( sec > = MAX_SEC_IN_JIFFIES ) {
sec = MAX_SEC_IN_JIFFIES ;
usec = 0 ;
}
return ( ( ( u64 ) sec * SEC_CONVERSION ) +
( ( ( u64 ) usec * USEC_CONVERSION + USEC_ROUND ) > >
( USEC_JIFFIE_SC - SEC_JIFFIE_SC ) ) ) > > SEC_JIFFIE_SC ;
}
2007-04-05 00:20:54 +04:00
EXPORT_SYMBOL ( timeval_to_jiffies ) ;
2007-02-16 12:27:27 +03:00
void jiffies_to_timeval ( const unsigned long jiffies , struct timeval * value )
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide .
*/
u64 nsec = ( u64 ) jiffies * TICK_NSEC ;
long tv_usec ;
value - > tv_sec = div_long_long_rem ( nsec , NSEC_PER_SEC , & tv_usec ) ;
tv_usec / = NSEC_PER_USEC ;
value - > tv_usec = tv_usec ;
}
2007-04-05 00:20:54 +04:00
EXPORT_SYMBOL ( jiffies_to_timeval ) ;
2007-02-16 12:27:27 +03:00
/*
* Convert jiffies / jiffies_64 to clock_t and back .
*/
clock_t jiffies_to_clock_t ( long x )
{
# if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
return x / ( HZ / USER_HZ ) ;
# else
u64 tmp = ( u64 ) x * TICK_NSEC ;
do_div ( tmp , ( NSEC_PER_SEC / USER_HZ ) ) ;
return ( long ) tmp ;
# endif
}
EXPORT_SYMBOL ( jiffies_to_clock_t ) ;
unsigned long clock_t_to_jiffies ( unsigned long x )
{
# if (HZ % USER_HZ)==0
if ( x > = ~ 0UL / ( HZ / USER_HZ ) )
return ~ 0UL ;
return x * ( HZ / USER_HZ ) ;
# else
u64 jif ;
/* Don't worry about loss of precision here .. */
if ( x > = ~ 0UL / HZ * USER_HZ )
return ~ 0UL ;
/* .. but do try to contain it here */
jif = x * ( u64 ) HZ ;
do_div ( jif , USER_HZ ) ;
return jif ;
# endif
}
EXPORT_SYMBOL ( clock_t_to_jiffies ) ;
u64 jiffies_64_to_clock_t ( u64 x )
{
# if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
do_div ( x , HZ / USER_HZ ) ;
# else
/*
* There are better ways that don ' t overflow early ,
* but even this doesn ' t overflow in hundreds of years
* in 64 bits , so . .
*/
x * = TICK_NSEC ;
do_div ( x , ( NSEC_PER_SEC / USER_HZ ) ) ;
# endif
return x ;
}
EXPORT_SYMBOL ( jiffies_64_to_clock_t ) ;
u64 nsec_to_clock_t ( u64 x )
{
# if (NSEC_PER_SEC % USER_HZ) == 0
do_div ( x , ( NSEC_PER_SEC / USER_HZ ) ) ;
# elif (USER_HZ % 512) == 0
x * = USER_HZ / 512 ;
do_div ( x , ( NSEC_PER_SEC / 512 ) ) ;
# else
/*
* max relative error 5.7e-8 ( 1.8 s per year ) for USER_HZ < = 1024 ,
* overflow after 64.99 years .
* exact for HZ = 60 , 72 , 90 , 120 , 144 , 180 , 300 , 600 , 900 , . . .
*/
x * = 9 ;
do_div ( x , ( unsigned long ) ( ( 9ull * NSEC_PER_SEC + ( USER_HZ / 2 ) ) /
USER_HZ ) ) ;
# endif
return x ;
}
2005-04-17 02:20:36 +04:00
# if (BITS_PER_LONG < 64)
u64 get_jiffies_64 ( void )
{
unsigned long seq ;
u64 ret ;
do {
seq = read_seqbegin ( & xtime_lock ) ;
ret = jiffies_64 ;
} while ( read_seqretry ( & xtime_lock , seq ) ) ;
return ret ;
}
EXPORT_SYMBOL ( get_jiffies_64 ) ;
# endif
EXPORT_SYMBOL ( jiffies ) ;