2006-01-10 07:52:32 +03:00
/*
* linux / kernel / hrtimer . c
*
2007-02-16 12:27:50 +03:00
* Copyright ( C ) 2005 - 2006 , Thomas Gleixner < tglx @ linutronix . de >
* Copyright ( C ) 2005 - 2006 , Red Hat , Inc . , Ingo Molnar
* Copyright ( C ) 2006 Timesys Corp . , Thomas Gleixner < tglx @ timesys . com >
2006-01-10 07:52:32 +03:00
*
* High - resolution kernel timers
*
* In contrast to the low - resolution timeout API implemented in
* kernel / timer . c , hrtimers provide finer resolution and accuracy
* depending on system configuration and capabilities .
*
* These timers are currently used for :
* - itimers
* - POSIX timers
* - nanosleep
* - precise in - kernel timing
*
* Started by : Thomas Gleixner and Ingo Molnar
*
* Credits :
* based on kernel / timer . c
*
2006-02-01 14:05:13 +03:00
* Help , testing , suggestions , bugfixes , improvements were
* provided by :
*
* George Anzinger , Andrew Morton , Steven Rostedt , Roman Zippel
* et . al .
*
2006-01-10 07:52:32 +03:00
* For licencing details see kernel - base / COPYING
*/
# include <linux/cpu.h>
# include <linux/module.h>
# include <linux/percpu.h>
# include <linux/hrtimer.h>
# include <linux/notifier.h>
# include <linux/syscalls.h>
# include <linux/interrupt.h>
# include <asm/uaccess.h>
/**
* ktime_get - get the monotonic time in ktime_t format
*
* returns the time in ktime_t format
*/
2007-02-16 12:28:00 +03:00
ktime_t ktime_get ( void )
2006-01-10 07:52:32 +03:00
{
struct timespec now ;
ktime_get_ts ( & now ) ;
return timespec_to_ktime ( now ) ;
}
/**
* ktime_get_real - get the real ( wall - ) time in ktime_t format
*
* returns the time in ktime_t format
*/
2007-02-16 12:28:00 +03:00
ktime_t ktime_get_real ( void )
2006-01-10 07:52:32 +03:00
{
struct timespec now ;
getnstimeofday ( & now ) ;
return timespec_to_ktime ( now ) ;
}
EXPORT_SYMBOL_GPL ( ktime_get_real ) ;
/*
* The timer bases :
2006-02-01 14:05:11 +03:00
*
* Note : If we want to add new timer bases , we have to skip the two
* clock ids captured by the cpu - timers . We do this by holding empty
* entries rather than doing math adjustment of the clock ids .
* This ensures that we capture erroneous accesses to these clock ids
* rather than moving them into the range of valid clock id ' s .
2006-01-10 07:52:32 +03:00
*/
2007-02-16 12:27:50 +03:00
static DEFINE_PER_CPU ( struct hrtimer_cpu_base , hrtimer_bases ) =
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:50 +03:00
. clock_base =
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:50 +03:00
{
. index = CLOCK_REALTIME ,
. get_time = & ktime_get_real ,
. resolution = KTIME_REALTIME_RES ,
} ,
{
. index = CLOCK_MONOTONIC ,
. get_time = & ktime_get ,
. resolution = KTIME_MONOTONIC_RES ,
} ,
}
2006-01-10 07:52:32 +03:00
} ;
/**
* ktime_get_ts - get the monotonic clock in timespec format
* @ ts : pointer to timespec variable
*
* The function calculates the monotonic clock from the realtime
* clock and the wall_to_monotonic offset and stores the result
2007-02-10 12:45:59 +03:00
* in normalized timespec format in the variable pointed to by @ ts .
2006-01-10 07:52:32 +03:00
*/
void ktime_get_ts ( struct timespec * ts )
{
struct timespec tomono ;
unsigned long seq ;
do {
seq = read_seqbegin ( & xtime_lock ) ;
getnstimeofday ( ts ) ;
tomono = wall_to_monotonic ;
} while ( read_seqretry ( & xtime_lock , seq ) ) ;
set_normalized_timespec ( ts , ts - > tv_sec + tomono . tv_sec ,
ts - > tv_nsec + tomono . tv_nsec ) ;
}
2006-01-10 07:52:39 +03:00
EXPORT_SYMBOL_GPL ( ktime_get_ts ) ;
2006-01-10 07:52:32 +03:00
2006-03-26 13:38:05 +04:00
/*
* Get the coarse grained time at the softirq based on xtime and
* wall_to_monotonic .
*/
2007-02-16 12:27:50 +03:00
static void hrtimer_get_softirq_time ( struct hrtimer_cpu_base * base )
2006-03-26 13:38:05 +04:00
{
ktime_t xtim , tomono ;
2007-02-16 12:27:26 +03:00
struct timespec xts ;
2006-03-26 13:38:05 +04:00
unsigned long seq ;
do {
seq = read_seqbegin ( & xtime_lock ) ;
2007-02-16 12:27:26 +03:00
# ifdef CONFIG_NO_HZ
getnstimeofday ( & xts ) ;
# else
xts = xtime ;
# endif
2006-03-26 13:38:05 +04:00
} while ( read_seqretry ( & xtime_lock , seq ) ) ;
2007-02-16 12:27:26 +03:00
xtim = timespec_to_ktime ( xts ) ;
tomono = timespec_to_ktime ( wall_to_monotonic ) ;
2007-02-16 12:27:50 +03:00
base - > clock_base [ CLOCK_REALTIME ] . softirq_time = xtim ;
base - > clock_base [ CLOCK_MONOTONIC ] . softirq_time =
ktime_add ( xtim , tomono ) ;
2006-03-26 13:38:05 +04:00
}
2007-02-16 12:27:51 +03:00
/*
* Helper function to check , whether the timer is on one of the queues
*/
static inline int hrtimer_is_queued ( struct hrtimer * timer )
{
return timer - > state & HRTIMER_STATE_ENQUEUED ;
}
/*
* Helper function to check , whether the timer is running the callback
* function
*/
static inline int hrtimer_callback_running ( struct hrtimer * timer )
{
return timer - > state & HRTIMER_STATE_CALLBACK ;
}
2006-01-10 07:52:32 +03:00
/*
* Functions and macros which are different for UP / SMP systems are kept in a
* single place
*/
# ifdef CONFIG_SMP
/*
* We are using hashed locking : holding per_cpu ( hrtimer_bases ) [ n ] . lock
* means that all timers which are tied to this base via timer - > base are
* locked , and the base itself is locked too .
*
* So __run_timers / migrate_timers can safely modify all timers which could
* be found on the lists / queues .
*
* When the timer ' s base is locked , and the timer removed from list , it is
* possible to set timer - > base = NULL and drop the lock : the timer remains
* locked .
*/
2007-02-16 12:27:50 +03:00
static
struct hrtimer_clock_base * lock_hrtimer_base ( const struct hrtimer * timer ,
unsigned long * flags )
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base ;
2006-01-10 07:52:32 +03:00
for ( ; ; ) {
base = timer - > base ;
if ( likely ( base ! = NULL ) ) {
2007-02-16 12:27:50 +03:00
spin_lock_irqsave ( & base - > cpu_base - > lock , * flags ) ;
2006-01-10 07:52:32 +03:00
if ( likely ( base = = timer - > base ) )
return base ;
/* The timer has migrated to another CPU: */
2007-02-16 12:27:50 +03:00
spin_unlock_irqrestore ( & base - > cpu_base - > lock , * flags ) ;
2006-01-10 07:52:32 +03:00
}
cpu_relax ( ) ;
}
}
/*
* Switch the timer base to the current CPU when possible .
*/
2007-02-16 12:27:50 +03:00
static inline struct hrtimer_clock_base *
switch_hrtimer_base ( struct hrtimer * timer , struct hrtimer_clock_base * base )
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * new_base ;
struct hrtimer_cpu_base * new_cpu_base ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:50 +03:00
new_cpu_base = & __get_cpu_var ( hrtimer_bases ) ;
new_base = & new_cpu_base - > clock_base [ base - > index ] ;
2006-01-10 07:52:32 +03:00
if ( base ! = new_base ) {
/*
* We are trying to schedule the timer on the local CPU .
* However we can ' t change timer ' s base while it is running ,
* so we keep it on the same CPU . No hassle vs . reprogramming
* the event source in the high resolution case . The softirq
* code will take care of this when the timer function has
* completed . There is no conflict as we hold the lock until
* the timer is enqueued .
*/
2007-02-16 12:27:52 +03:00
if ( unlikely ( timer - > state & HRTIMER_STATE_CALLBACK ) )
2006-01-10 07:52:32 +03:00
return base ;
/* See the comment in lock_timer_base() */
timer - > base = NULL ;
2007-02-16 12:27:50 +03:00
spin_unlock ( & base - > cpu_base - > lock ) ;
spin_lock ( & new_base - > cpu_base - > lock ) ;
2006-01-10 07:52:32 +03:00
timer - > base = new_base ;
}
return new_base ;
}
# else /* CONFIG_SMP */
2007-02-16 12:27:50 +03:00
static inline struct hrtimer_clock_base *
2006-01-10 07:52:32 +03:00
lock_hrtimer_base ( const struct hrtimer * timer , unsigned long * flags )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base = timer - > base ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:50 +03:00
spin_lock_irqsave ( & base - > cpu_base - > lock , * flags ) ;
2006-01-10 07:52:32 +03:00
return base ;
}
# define switch_hrtimer_base(t, b) (b)
# endif /* !CONFIG_SMP */
/*
* Functions for the union type storage format of ktime_t which are
* too large for inlining :
*/
# if BITS_PER_LONG < 64
# ifndef CONFIG_KTIME_SCALAR
/**
* ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
* @ kt : addend
* @ nsec : the scalar nsec value to add
*
* Returns the sum of kt and nsec in ktime_t format
*/
ktime_t ktime_add_ns ( const ktime_t kt , u64 nsec )
{
ktime_t tmp ;
if ( likely ( nsec < NSEC_PER_SEC ) ) {
tmp . tv64 = nsec ;
} else {
unsigned long rem = do_div ( nsec , NSEC_PER_SEC ) ;
tmp = ktime_set ( ( long ) nsec , rem ) ;
}
return ktime_add ( kt , tmp ) ;
}
# else /* CONFIG_KTIME_SCALAR */
# endif /* !CONFIG_KTIME_SCALAR */
/*
* Divide a ktime value by a nanosecond value
*/
2006-03-26 13:38:11 +04:00
static unsigned long ktime_divns ( const ktime_t kt , s64 div )
2006-01-10 07:52:32 +03:00
{
u64 dclc , inc , dns ;
int sft = 0 ;
dclc = dns = ktime_to_ns ( kt ) ;
inc = div ;
/* Make sure the divisor is less than 2^32: */
while ( div > > 32 ) {
sft + + ;
div > > = 1 ;
}
dclc > > = sft ;
do_div ( dclc , ( unsigned long ) div ) ;
return ( unsigned long ) dclc ;
}
# else /* BITS_PER_LONG < 64 */
# define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div))
# endif /* BITS_PER_LONG >= 64 */
/*
* Counterpart to lock_timer_base above :
*/
static inline
void unlock_hrtimer_base ( const struct hrtimer * timer , unsigned long * flags )
{
2007-02-16 12:27:50 +03:00
spin_unlock_irqrestore ( & timer - > base - > cpu_base - > lock , * flags ) ;
2006-01-10 07:52:32 +03:00
}
/**
* hrtimer_forward - forward the timer expiry
* @ timer : hrtimer to forward
2006-03-26 13:38:06 +04:00
* @ now : forward past this time
2006-01-10 07:52:32 +03:00
* @ interval : the interval to forward
*
* Forward the timer expiry so it will expire in the future .
2006-01-17 01:58:55 +03:00
* Returns the number of overruns .
2006-01-10 07:52:32 +03:00
*/
unsigned long
2006-03-26 13:38:06 +04:00
hrtimer_forward ( struct hrtimer * timer , ktime_t now , ktime_t interval )
2006-01-10 07:52:32 +03:00
{
unsigned long orun = 1 ;
2006-03-26 13:38:06 +04:00
ktime_t delta ;
2006-01-10 07:52:32 +03:00
delta = ktime_sub ( now , timer - > expires ) ;
if ( delta . tv64 < 0 )
return 0 ;
2006-01-12 13:47:34 +03:00
if ( interval . tv64 < timer - > base - > resolution . tv64 )
interval . tv64 = timer - > base - > resolution . tv64 ;
2006-01-10 07:52:32 +03:00
if ( unlikely ( delta . tv64 > = interval . tv64 ) ) {
2006-03-26 13:38:11 +04:00
s64 incr = ktime_to_ns ( interval ) ;
2006-01-10 07:52:32 +03:00
orun = ktime_divns ( delta , incr ) ;
timer - > expires = ktime_add_ns ( timer - > expires , incr * orun ) ;
if ( timer - > expires . tv64 > now . tv64 )
return orun ;
/*
* This ( and the ktime_add ( ) below ) is the
* correction for exact :
*/
orun + + ;
}
timer - > expires = ktime_add ( timer - > expires , interval ) ;
return orun ;
}
/*
* enqueue_hrtimer - internal function to ( re ) start a timer
*
* The timer is inserted in expiry order . Insertion into the
* red black tree is O ( log ( n ) ) . Must hold the base lock .
*/
2007-02-16 12:27:50 +03:00
static void enqueue_hrtimer ( struct hrtimer * timer ,
struct hrtimer_clock_base * base )
2006-01-10 07:52:32 +03:00
{
struct rb_node * * link = & base - > active . rb_node ;
struct rb_node * parent = NULL ;
struct hrtimer * entry ;
/*
* Find the right place in the rbtree :
*/
while ( * link ) {
parent = * link ;
entry = rb_entry ( parent , struct hrtimer , node ) ;
/*
* We dont care about collisions . Nodes with
* the same expiry time stay together .
*/
if ( timer - > expires . tv64 < entry - > expires . tv64 )
link = & ( * link ) - > rb_left ;
2006-01-12 13:25:54 +03:00
else
2006-01-10 07:52:32 +03:00
link = & ( * link ) - > rb_right ;
}
/*
2006-01-12 13:25:54 +03:00
* Insert the timer to the rbtree and check whether it
* replaces the first pending timer
2006-01-10 07:52:32 +03:00
*/
rb_link_node ( & timer - > node , parent , link ) ;
rb_insert_color ( & timer - > node , & base - > active ) ;
2007-02-16 12:27:51 +03:00
/*
* HRTIMER_STATE_ENQUEUED is or ' ed to the current state to preserve the
* state of a possibly running callback .
*/
timer - > state | = HRTIMER_STATE_ENQUEUED ;
2006-01-10 07:52:32 +03:00
2006-01-12 13:25:54 +03:00
if ( ! base - > first | | timer - > expires . tv64 <
rb_entry ( base - > first , struct hrtimer , node ) - > expires . tv64 )
base - > first = & timer - > node ;
}
2006-01-10 07:52:32 +03:00
/*
* __remove_hrtimer - internal function to remove a timer
*
* Caller must hold the base lock .
*/
2007-02-16 12:27:50 +03:00
static void __remove_hrtimer ( struct hrtimer * timer ,
2007-02-16 12:27:51 +03:00
struct hrtimer_clock_base * base ,
unsigned long newstate )
2006-01-10 07:52:32 +03:00
{
/*
2006-01-12 13:25:54 +03:00
* Remove the timer from the rbtree and replace the
* first entry pointer if necessary .
2006-01-10 07:52:32 +03:00
*/
2006-01-12 13:25:54 +03:00
if ( base - > first = = & timer - > node )
base - > first = rb_next ( & timer - > node ) ;
2006-01-10 07:52:32 +03:00
rb_erase ( & timer - > node , & base - > active ) ;
2007-02-16 12:27:51 +03:00
timer - > state = newstate ;
2006-01-10 07:52:32 +03:00
}
/*
* remove hrtimer , called with base lock held
*/
static inline int
2007-02-16 12:27:50 +03:00
remove_hrtimer ( struct hrtimer * timer , struct hrtimer_clock_base * base )
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:51 +03:00
if ( hrtimer_is_queued ( timer ) ) {
__remove_hrtimer ( timer , base , HRTIMER_STATE_INACTIVE ) ;
2006-01-10 07:52:32 +03:00
return 1 ;
}
return 0 ;
}
/**
* hrtimer_start - ( re ) start an relative timer on the current CPU
* @ timer : the timer to be added
* @ tim : expiry time
* @ mode : expiry mode : absolute ( HRTIMER_ABS ) or relative ( HRTIMER_REL )
*
* Returns :
* 0 on success
* 1 when the timer was active
*/
int
hrtimer_start ( struct hrtimer * timer , ktime_t tim , const enum hrtimer_mode mode )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base , * new_base ;
2006-01-10 07:52:32 +03:00
unsigned long flags ;
int ret ;
base = lock_hrtimer_base ( timer , & flags ) ;
/* Remove an active timer from the queue: */
ret = remove_hrtimer ( timer , base ) ;
/* Switch the timer base, if necessary: */
new_base = switch_hrtimer_base ( timer , base ) ;
2007-02-16 12:27:49 +03:00
if ( mode = = HRTIMER_MODE_REL ) {
2006-01-10 07:52:32 +03:00
tim = ktime_add ( tim , new_base - > get_time ( ) ) ;
2006-02-15 00:53:15 +03:00
/*
* CONFIG_TIME_LOW_RES is a temporary way for architectures
* to signal that they simply return xtime in
* do_gettimeoffset ( ) . In this case we want to round up by
* resolution when starting a relative timer , to avoid short
* timeouts . This will go away with the GTOD framework .
*/
# ifdef CONFIG_TIME_LOW_RES
tim = ktime_add ( tim , base - > resolution ) ;
# endif
}
2006-01-10 07:52:32 +03:00
timer - > expires = tim ;
enqueue_hrtimer ( timer , new_base ) ;
unlock_hrtimer_base ( timer , & flags ) ;
return ret ;
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_start ) ;
2006-01-10 07:52:32 +03:00
/**
* hrtimer_try_to_cancel - try to deactivate a timer
* @ timer : hrtimer to stop
*
* Returns :
* 0 when the timer was not active
* 1 when the timer was active
* - 1 when the timer is currently excuting the callback function and
2006-06-25 16:49:15 +04:00
* cannot be stopped
2006-01-10 07:52:32 +03:00
*/
int hrtimer_try_to_cancel ( struct hrtimer * timer )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base ;
2006-01-10 07:52:32 +03:00
unsigned long flags ;
int ret = - 1 ;
base = lock_hrtimer_base ( timer , & flags ) ;
2007-02-16 12:27:51 +03:00
if ( ! hrtimer_callback_running ( timer ) )
2006-01-10 07:52:32 +03:00
ret = remove_hrtimer ( timer , base ) ;
unlock_hrtimer_base ( timer , & flags ) ;
return ret ;
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_try_to_cancel ) ;
2006-01-10 07:52:32 +03:00
/**
* hrtimer_cancel - cancel a timer and wait for the handler to finish .
* @ timer : the timer to be cancelled
*
* Returns :
* 0 when the timer was not active
* 1 when the timer was active
*/
int hrtimer_cancel ( struct hrtimer * timer )
{
for ( ; ; ) {
int ret = hrtimer_try_to_cancel ( timer ) ;
if ( ret > = 0 )
return ret ;
2006-04-11 09:54:13 +04:00
cpu_relax ( ) ;
2006-01-10 07:52:32 +03:00
}
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_cancel ) ;
2006-01-10 07:52:32 +03:00
/**
* hrtimer_get_remaining - get remaining time for the timer
* @ timer : the timer to read
*/
ktime_t hrtimer_get_remaining ( const struct hrtimer * timer )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base ;
2006-01-10 07:52:32 +03:00
unsigned long flags ;
ktime_t rem ;
base = lock_hrtimer_base ( timer , & flags ) ;
2007-02-16 12:27:50 +03:00
rem = ktime_sub ( timer - > expires , base - > get_time ( ) ) ;
2006-01-10 07:52:32 +03:00
unlock_hrtimer_base ( timer , & flags ) ;
return rem ;
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_get_remaining ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:47 +03:00
# if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
2006-03-07 02:42:45 +03:00
/**
* hrtimer_get_next_event - get the time until next expiry event
*
* Returns the delta to the next expiry event or KTIME_MAX if no timer
* is pending .
*/
ktime_t hrtimer_get_next_event ( void )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * cpu_base = & __get_cpu_var ( hrtimer_bases ) ;
struct hrtimer_clock_base * base = cpu_base - > clock_base ;
2006-03-07 02:42:45 +03:00
ktime_t delta , mindelta = { . tv64 = KTIME_MAX } ;
unsigned long flags ;
int i ;
2007-02-16 12:27:50 +03:00
spin_lock_irqsave ( & cpu_base - > lock , flags ) ;
for ( i = 0 ; i < HRTIMER_MAX_CLOCK_BASES ; i + + , base + + ) {
2006-03-07 02:42:45 +03:00
struct hrtimer * timer ;
2007-02-16 12:27:50 +03:00
if ( ! base - > first )
2006-03-07 02:42:45 +03:00
continue ;
2007-02-16 12:27:50 +03:00
2006-03-07 02:42:45 +03:00
timer = rb_entry ( base - > first , struct hrtimer , node ) ;
delta . tv64 = timer - > expires . tv64 ;
delta = ktime_sub ( delta , base - > get_time ( ) ) ;
if ( delta . tv64 < mindelta . tv64 )
mindelta . tv64 = delta . tv64 ;
}
2007-02-16 12:27:50 +03:00
spin_unlock_irqrestore ( & cpu_base - > lock , flags ) ;
2006-03-07 02:42:45 +03:00
if ( mindelta . tv64 < 0 )
mindelta . tv64 = 0 ;
return mindelta ;
}
# endif
2006-01-10 07:52:32 +03:00
/**
2006-02-01 14:05:11 +03:00
* hrtimer_init - initialize a timer to the given clock
* @ timer : the timer to be initialized
2006-01-10 07:52:32 +03:00
* @ clock_id : the clock to be used
2006-02-01 14:05:11 +03:00
* @ mode : timer mode abs / rel
2006-01-10 07:52:32 +03:00
*/
2006-02-01 14:05:11 +03:00
void hrtimer_init ( struct hrtimer * timer , clockid_t clock_id ,
enum hrtimer_mode mode )
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * cpu_base ;
2006-01-10 07:52:32 +03:00
2006-02-01 14:05:11 +03:00
memset ( timer , 0 , sizeof ( struct hrtimer ) ) ;
2007-02-16 12:27:50 +03:00
cpu_base = & __raw_get_cpu_var ( hrtimer_bases ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:49 +03:00
if ( clock_id = = CLOCK_REALTIME & & mode ! = HRTIMER_MODE_ABS )
2006-02-01 14:05:11 +03:00
clock_id = CLOCK_MONOTONIC ;
2007-02-16 12:27:50 +03:00
timer - > base = & cpu_base - > clock_base [ clock_id ] ;
2006-01-10 07:52:32 +03:00
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_init ) ;
2006-01-10 07:52:32 +03:00
/**
* hrtimer_get_res - get the timer resolution for a clock
* @ which_clock : which clock to query
* @ tp : pointer to timespec variable to store the resolution
*
2007-02-10 12:45:59 +03:00
* Store the resolution of the clock selected by @ which_clock in the
* variable pointed to by @ tp .
2006-01-10 07:52:32 +03:00
*/
int hrtimer_get_res ( const clockid_t which_clock , struct timespec * tp )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * cpu_base ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:50 +03:00
cpu_base = & __raw_get_cpu_var ( hrtimer_bases ) ;
* tp = ktime_to_timespec ( cpu_base - > clock_base [ which_clock ] . resolution ) ;
2006-01-10 07:52:32 +03:00
return 0 ;
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_get_res ) ;
2006-01-10 07:52:32 +03:00
/*
* Expire the per base hrtimer - queue :
*/
2007-02-16 12:27:50 +03:00
static inline void run_hrtimer_queue ( struct hrtimer_cpu_base * cpu_base ,
int index )
2006-01-10 07:52:32 +03:00
{
2006-01-12 13:25:54 +03:00
struct rb_node * node ;
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base = & cpu_base - > clock_base [ index ] ;
2006-01-10 07:52:32 +03:00
2006-03-31 14:31:20 +04:00
if ( ! base - > first )
return ;
2006-03-26 13:38:05 +04:00
if ( base - > get_softirq_time )
base - > softirq_time = base - > get_softirq_time ( ) ;
2007-02-16 12:27:50 +03:00
spin_lock_irq ( & cpu_base - > lock ) ;
2006-01-10 07:52:32 +03:00
2006-01-12 13:25:54 +03:00
while ( ( node = base - > first ) ) {
2006-01-10 07:52:32 +03:00
struct hrtimer * timer ;
2007-02-16 12:27:49 +03:00
enum hrtimer_restart ( * fn ) ( struct hrtimer * ) ;
2006-01-10 07:52:32 +03:00
int restart ;
2006-01-12 13:25:54 +03:00
timer = rb_entry ( node , struct hrtimer , node ) ;
2006-03-26 13:38:05 +04:00
if ( base - > softirq_time . tv64 < = timer - > expires . tv64 )
2006-01-10 07:52:32 +03:00
break ;
fn = timer - > function ;
2007-02-16 12:27:51 +03:00
__remove_hrtimer ( timer , base , HRTIMER_STATE_CALLBACK ) ;
2007-02-16 12:27:50 +03:00
spin_unlock_irq ( & cpu_base - > lock ) ;
2006-01-10 07:52:32 +03:00
2006-03-26 13:38:12 +04:00
restart = fn ( timer ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:50 +03:00
spin_lock_irq ( & cpu_base - > lock ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:51 +03:00
timer - > state & = ~ HRTIMER_STATE_CALLBACK ;
2006-03-26 13:38:09 +04:00
if ( restart ! = HRTIMER_NORESTART ) {
BUG_ON ( hrtimer_active ( timer ) ) ;
2006-01-10 07:52:32 +03:00
enqueue_hrtimer ( timer , base ) ;
2006-03-26 13:38:09 +04:00
}
2006-01-10 07:52:32 +03:00
}
2007-02-16 12:27:50 +03:00
spin_unlock_irq ( & cpu_base - > lock ) ;
2006-01-10 07:52:32 +03:00
}
/*
* Called from timer softirq every jiffy , expire hrtimers :
*/
void hrtimer_run_queues ( void )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * cpu_base = & __get_cpu_var ( hrtimer_bases ) ;
2006-01-10 07:52:32 +03:00
int i ;
2007-02-16 12:27:50 +03:00
hrtimer_get_softirq_time ( cpu_base ) ;
2006-03-26 13:38:05 +04:00
2007-02-16 12:27:50 +03:00
for ( i = 0 ; i < HRTIMER_MAX_CLOCK_BASES ; i + + )
run_hrtimer_queue ( cpu_base , i ) ;
2006-01-10 07:52:32 +03:00
}
2006-01-10 07:52:35 +03:00
/*
* Sleep related functions :
*/
2007-02-16 12:27:49 +03:00
static enum hrtimer_restart hrtimer_wakeup ( struct hrtimer * timer )
2006-03-31 14:31:17 +04:00
{
struct hrtimer_sleeper * t =
container_of ( timer , struct hrtimer_sleeper , timer ) ;
struct task_struct * task = t - > task ;
t - > task = NULL ;
if ( task )
wake_up_process ( task ) ;
return HRTIMER_NORESTART ;
}
2006-07-03 11:25:41 +04:00
void hrtimer_init_sleeper ( struct hrtimer_sleeper * sl , struct task_struct * task )
2006-03-31 14:31:17 +04:00
{
sl - > timer . function = hrtimer_wakeup ;
sl - > task = task ;
}
2006-03-31 14:31:19 +04:00
static int __sched do_nanosleep ( struct hrtimer_sleeper * t , enum hrtimer_mode mode )
2006-03-26 13:38:08 +04:00
{
2006-03-31 14:31:19 +04:00
hrtimer_init_sleeper ( t , current ) ;
2006-01-10 07:52:35 +03:00
2006-03-26 13:38:08 +04:00
do {
set_current_state ( TASK_INTERRUPTIBLE ) ;
hrtimer_start ( & t - > timer , t - > timer . expires , mode ) ;
schedule ( ) ;
2006-03-31 14:31:19 +04:00
hrtimer_cancel ( & t - > timer ) ;
2007-02-16 12:27:49 +03:00
mode = HRTIMER_MODE_ABS ;
2006-03-31 14:31:19 +04:00
} while ( t - > task & & ! signal_pending ( current ) ) ;
2006-03-26 13:38:08 +04:00
2006-03-31 14:31:19 +04:00
return t - > task = = NULL ;
2006-01-10 07:52:35 +03:00
}
2006-09-29 13:00:28 +04:00
long __sched hrtimer_nanosleep_restart ( struct restart_block * restart )
2006-01-10 07:52:35 +03:00
{
2006-03-31 14:31:19 +04:00
struct hrtimer_sleeper t ;
2006-01-16 12:59:41 +03:00
struct timespec __user * rmtp ;
struct timespec tu ;
2006-03-26 13:38:08 +04:00
ktime_t time ;
2006-01-10 07:52:35 +03:00
restart - > fn = do_no_restart_syscall ;
2007-02-16 12:27:49 +03:00
hrtimer_init ( & t . timer , restart - > arg0 , HRTIMER_MODE_ABS ) ;
2006-09-29 13:00:28 +04:00
t . timer . expires . tv64 = ( ( u64 ) restart - > arg3 < < 32 ) | ( u64 ) restart - > arg2 ;
2006-01-10 07:52:35 +03:00
2007-02-16 12:27:49 +03:00
if ( do_nanosleep ( & t , HRTIMER_MODE_ABS ) )
2006-01-10 07:52:35 +03:00
return 0 ;
2006-09-29 13:00:28 +04:00
rmtp = ( struct timespec __user * ) restart - > arg1 ;
2006-03-26 13:38:08 +04:00
if ( rmtp ) {
time = ktime_sub ( t . timer . expires , t . timer . base - > get_time ( ) ) ;
if ( time . tv64 < = 0 )
return 0 ;
tu = ktime_to_timespec ( time ) ;
if ( copy_to_user ( rmtp , & tu , sizeof ( tu ) ) )
return - EFAULT ;
}
2006-01-10 07:52:35 +03:00
2006-09-29 13:00:28 +04:00
restart - > fn = hrtimer_nanosleep_restart ;
2006-01-10 07:52:35 +03:00
/* The other values in restart are already filled in */
return - ERESTART_RESTARTBLOCK ;
}
long hrtimer_nanosleep ( struct timespec * rqtp , struct timespec __user * rmtp ,
const enum hrtimer_mode mode , const clockid_t clockid )
{
struct restart_block * restart ;
2006-03-31 14:31:19 +04:00
struct hrtimer_sleeper t ;
2006-01-10 07:52:35 +03:00
struct timespec tu ;
ktime_t rem ;
2006-03-26 13:38:08 +04:00
hrtimer_init ( & t . timer , clockid , mode ) ;
t . timer . expires = timespec_to_ktime ( * rqtp ) ;
if ( do_nanosleep ( & t , mode ) )
2006-01-10 07:52:35 +03:00
return 0 ;
2006-02-01 14:05:11 +03:00
/* Absolute timers do not update the rmtp value and restart: */
2007-02-16 12:27:49 +03:00
if ( mode = = HRTIMER_MODE_ABS )
2006-01-10 07:52:35 +03:00
return - ERESTARTNOHAND ;
2006-03-26 13:38:08 +04:00
if ( rmtp ) {
rem = ktime_sub ( t . timer . expires , t . timer . base - > get_time ( ) ) ;
if ( rem . tv64 < = 0 )
return 0 ;
tu = ktime_to_timespec ( rem ) ;
if ( copy_to_user ( rmtp , & tu , sizeof ( tu ) ) )
return - EFAULT ;
}
2006-01-10 07:52:35 +03:00
restart = & current_thread_info ( ) - > restart_block ;
2006-09-29 13:00:28 +04:00
restart - > fn = hrtimer_nanosleep_restart ;
restart - > arg0 = ( unsigned long ) t . timer . base - > index ;
restart - > arg1 = ( unsigned long ) rmtp ;
restart - > arg2 = t . timer . expires . tv64 & 0xFFFFFFFF ;
restart - > arg3 = t . timer . expires . tv64 > > 32 ;
2006-01-10 07:52:35 +03:00
return - ERESTART_RESTARTBLOCK ;
}
2006-01-10 07:52:36 +03:00
asmlinkage long
sys_nanosleep ( struct timespec __user * rqtp , struct timespec __user * rmtp )
{
struct timespec tu ;
if ( copy_from_user ( & tu , rqtp , sizeof ( tu ) ) )
return - EFAULT ;
if ( ! timespec_valid ( & tu ) )
return - EINVAL ;
2007-02-16 12:27:49 +03:00
return hrtimer_nanosleep ( & tu , rmtp , HRTIMER_MODE_REL , CLOCK_MONOTONIC ) ;
2006-01-10 07:52:36 +03:00
}
2006-01-10 07:52:32 +03:00
/*
* Functions related to boot - time initialization :
*/
static void __devinit init_hrtimers_cpu ( int cpu )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * cpu_base = & per_cpu ( hrtimer_bases , cpu ) ;
2006-01-10 07:52:32 +03:00
int i ;
2007-02-16 12:27:50 +03:00
spin_lock_init ( & cpu_base - > lock ) ;
lockdep_set_class ( & cpu_base - > lock , & cpu_base - > lock_key ) ;
for ( i = 0 ; i < HRTIMER_MAX_CLOCK_BASES ; i + + )
cpu_base - > clock_base [ i ] . cpu_base = cpu_base ;
2006-01-10 07:52:32 +03:00
}
# ifdef CONFIG_HOTPLUG_CPU
2007-02-16 12:27:50 +03:00
static void migrate_hrtimer_list ( struct hrtimer_clock_base * old_base ,
struct hrtimer_clock_base * new_base )
2006-01-10 07:52:32 +03:00
{
struct hrtimer * timer ;
struct rb_node * node ;
while ( ( node = rb_first ( & old_base - > active ) ) ) {
timer = rb_entry ( node , struct hrtimer , node ) ;
2007-02-16 12:27:51 +03:00
BUG_ON ( timer - > state & HRTIMER_STATE_CALLBACK ) ;
__remove_hrtimer ( timer , old_base , HRTIMER_STATE_INACTIVE ) ;
2006-01-10 07:52:32 +03:00
timer - > base = new_base ;
enqueue_hrtimer ( timer , new_base ) ;
}
}
static void migrate_hrtimers ( int cpu )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * old_base , * new_base ;
2006-01-10 07:52:32 +03:00
int i ;
BUG_ON ( cpu_online ( cpu ) ) ;
2007-02-16 12:27:50 +03:00
old_base = & per_cpu ( hrtimer_bases , cpu ) ;
new_base = & get_cpu_var ( hrtimer_bases ) ;
2006-01-10 07:52:32 +03:00
local_irq_disable ( ) ;
2007-02-16 12:27:50 +03:00
spin_lock ( & new_base - > lock ) ;
spin_lock ( & old_base - > lock ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:50 +03:00
for ( i = 0 ; i < HRTIMER_MAX_CLOCK_BASES ; i + + ) {
migrate_hrtimer_list ( & old_base - > clock_base [ i ] ,
& new_base - > clock_base [ i ] ) ;
2006-01-10 07:52:32 +03:00
}
2007-02-16 12:27:50 +03:00
spin_unlock ( & old_base - > lock ) ;
spin_unlock ( & new_base - > lock ) ;
2006-01-10 07:52:32 +03:00
local_irq_enable ( ) ;
put_cpu_var ( hrtimer_bases ) ;
}
# endif /* CONFIG_HOTPLUG_CPU */
2006-07-30 14:03:35 +04:00
static int __cpuinit hrtimer_cpu_notify ( struct notifier_block * self ,
2006-01-10 07:52:32 +03:00
unsigned long action , void * hcpu )
{
long cpu = ( long ) hcpu ;
switch ( action ) {
case CPU_UP_PREPARE :
init_hrtimers_cpu ( cpu ) ;
break ;
# ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD :
2007-02-16 12:28:00 +03:00
clockevents_notify ( CLOCK_EVT_NOTIFY_CPU_DEAD , & cpu ) ;
2006-01-10 07:52:32 +03:00
migrate_hrtimers ( cpu ) ;
break ;
# endif
default :
break ;
}
return NOTIFY_OK ;
}
2006-07-30 14:03:35 +04:00
static struct notifier_block __cpuinitdata hrtimers_nb = {
2006-01-10 07:52:32 +03:00
. notifier_call = hrtimer_cpu_notify ,
} ;
void __init hrtimers_init ( void )
{
hrtimer_cpu_notify ( & hrtimers_nb , ( unsigned long ) CPU_UP_PREPARE ,
( void * ) ( long ) smp_processor_id ( ) ) ;
register_cpu_notifier ( & hrtimers_nb ) ;
}