2006-01-10 07:52:32 +03:00
/*
* linux / kernel / hrtimer . c
*
2007-02-16 12:27:50 +03:00
* Copyright ( C ) 2005 - 2006 , Thomas Gleixner < tglx @ linutronix . de >
2007-02-16 12:28:03 +03:00
* Copyright ( C ) 2005 - 2007 , Red Hat , Inc . , Ingo Molnar
2007-02-16 12:28:11 +03:00
* Copyright ( C ) 2006 - 2007 Timesys Corp . , Thomas Gleixner
2006-01-10 07:52:32 +03:00
*
* High - resolution kernel timers
*
* In contrast to the low - resolution timeout API implemented in
* kernel / timer . c , hrtimers provide finer resolution and accuracy
* depending on system configuration and capabilities .
*
* These timers are currently used for :
* - itimers
* - POSIX timers
* - nanosleep
* - precise in - kernel timing
*
* Started by : Thomas Gleixner and Ingo Molnar
*
* Credits :
* based on kernel / timer . c
*
2006-02-01 14:05:13 +03:00
* Help , testing , suggestions , bugfixes , improvements were
* provided by :
*
* George Anzinger , Andrew Morton , Steven Rostedt , Roman Zippel
* et . al .
*
2006-01-10 07:52:32 +03:00
* For licencing details see kernel - base / COPYING
*/
# include <linux/cpu.h>
2007-02-16 12:28:11 +03:00
# include <linux/irq.h>
2006-01-10 07:52:32 +03:00
# include <linux/module.h>
# include <linux/percpu.h>
# include <linux/hrtimer.h>
# include <linux/notifier.h>
# include <linux/syscalls.h>
2007-02-16 12:28:11 +03:00
# include <linux/kallsyms.h>
2006-01-10 07:52:32 +03:00
# include <linux/interrupt.h>
2007-02-16 12:28:03 +03:00
# include <linux/tick.h>
2007-02-16 12:28:11 +03:00
# include <linux/seq_file.h>
# include <linux/err.h>
2006-01-10 07:52:32 +03:00
# include <asm/uaccess.h>
/**
* ktime_get - get the monotonic time in ktime_t format
*
* returns the time in ktime_t format
*/
2007-02-16 12:28:00 +03:00
ktime_t ktime_get ( void )
2006-01-10 07:52:32 +03:00
{
struct timespec now ;
ktime_get_ts ( & now ) ;
return timespec_to_ktime ( now ) ;
}
2007-03-16 11:18:42 +03:00
EXPORT_SYMBOL_GPL ( ktime_get ) ;
2006-01-10 07:52:32 +03:00
/**
* ktime_get_real - get the real ( wall - ) time in ktime_t format
*
* returns the time in ktime_t format
*/
2007-02-16 12:28:00 +03:00
ktime_t ktime_get_real ( void )
2006-01-10 07:52:32 +03:00
{
struct timespec now ;
getnstimeofday ( & now ) ;
return timespec_to_ktime ( now ) ;
}
EXPORT_SYMBOL_GPL ( ktime_get_real ) ;
/*
* The timer bases :
2006-02-01 14:05:11 +03:00
*
* Note : If we want to add new timer bases , we have to skip the two
* clock ids captured by the cpu - timers . We do this by holding empty
* entries rather than doing math adjustment of the clock ids .
* This ensures that we capture erroneous accesses to these clock ids
* rather than moving them into the range of valid clock id ' s .
2006-01-10 07:52:32 +03:00
*/
2007-02-16 12:28:11 +03:00
DEFINE_PER_CPU ( struct hrtimer_cpu_base , hrtimer_bases ) =
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:50 +03:00
. clock_base =
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:50 +03:00
{
. index = CLOCK_REALTIME ,
. get_time = & ktime_get_real ,
2007-02-16 12:28:11 +03:00
. resolution = KTIME_LOW_RES ,
2007-02-16 12:27:50 +03:00
} ,
{
. index = CLOCK_MONOTONIC ,
. get_time = & ktime_get ,
2007-02-16 12:28:11 +03:00
. resolution = KTIME_LOW_RES ,
2007-02-16 12:27:50 +03:00
} ,
}
2006-01-10 07:52:32 +03:00
} ;
/**
* ktime_get_ts - get the monotonic clock in timespec format
* @ ts : pointer to timespec variable
*
* The function calculates the monotonic clock from the realtime
* clock and the wall_to_monotonic offset and stores the result
2007-02-10 12:45:59 +03:00
* in normalized timespec format in the variable pointed to by @ ts .
2006-01-10 07:52:32 +03:00
*/
void ktime_get_ts ( struct timespec * ts )
{
struct timespec tomono ;
unsigned long seq ;
do {
seq = read_seqbegin ( & xtime_lock ) ;
getnstimeofday ( ts ) ;
tomono = wall_to_monotonic ;
} while ( read_seqretry ( & xtime_lock , seq ) ) ;
set_normalized_timespec ( ts , ts - > tv_sec + tomono . tv_sec ,
ts - > tv_nsec + tomono . tv_nsec ) ;
}
2006-01-10 07:52:39 +03:00
EXPORT_SYMBOL_GPL ( ktime_get_ts ) ;
2006-01-10 07:52:32 +03:00
2006-03-26 13:38:05 +04:00
/*
* Get the coarse grained time at the softirq based on xtime and
* wall_to_monotonic .
*/
2007-02-16 12:27:50 +03:00
static void hrtimer_get_softirq_time ( struct hrtimer_cpu_base * base )
2006-03-26 13:38:05 +04:00
{
ktime_t xtim , tomono ;
2007-03-17 00:38:21 +03:00
struct timespec xts , tom ;
2006-03-26 13:38:05 +04:00
unsigned long seq ;
do {
seq = read_seqbegin ( & xtime_lock ) ;
2007-07-25 04:47:43 +04:00
xts = current_kernel_time ( ) ;
2007-03-17 00:38:21 +03:00
tom = wall_to_monotonic ;
2006-03-26 13:38:05 +04:00
} while ( read_seqretry ( & xtime_lock , seq ) ) ;
2007-02-16 12:27:26 +03:00
xtim = timespec_to_ktime ( xts ) ;
2007-03-17 00:38:21 +03:00
tomono = timespec_to_ktime ( tom ) ;
2007-02-16 12:27:50 +03:00
base - > clock_base [ CLOCK_REALTIME ] . softirq_time = xtim ;
base - > clock_base [ CLOCK_MONOTONIC ] . softirq_time =
ktime_add ( xtim , tomono ) ;
2006-03-26 13:38:05 +04:00
}
2007-02-16 12:27:51 +03:00
/*
* Helper function to check , whether the timer is running the callback
* function
*/
static inline int hrtimer_callback_running ( struct hrtimer * timer )
{
return timer - > state & HRTIMER_STATE_CALLBACK ;
}
2006-01-10 07:52:32 +03:00
/*
* Functions and macros which are different for UP / SMP systems are kept in a
* single place
*/
# ifdef CONFIG_SMP
/*
* We are using hashed locking : holding per_cpu ( hrtimer_bases ) [ n ] . lock
* means that all timers which are tied to this base via timer - > base are
* locked , and the base itself is locked too .
*
* So __run_timers / migrate_timers can safely modify all timers which could
* be found on the lists / queues .
*
* When the timer ' s base is locked , and the timer removed from list , it is
* possible to set timer - > base = NULL and drop the lock : the timer remains
* locked .
*/
2007-02-16 12:27:50 +03:00
static
struct hrtimer_clock_base * lock_hrtimer_base ( const struct hrtimer * timer ,
unsigned long * flags )
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base ;
2006-01-10 07:52:32 +03:00
for ( ; ; ) {
base = timer - > base ;
if ( likely ( base ! = NULL ) ) {
2007-02-16 12:27:50 +03:00
spin_lock_irqsave ( & base - > cpu_base - > lock , * flags ) ;
2006-01-10 07:52:32 +03:00
if ( likely ( base = = timer - > base ) )
return base ;
/* The timer has migrated to another CPU: */
2007-02-16 12:27:50 +03:00
spin_unlock_irqrestore ( & base - > cpu_base - > lock , * flags ) ;
2006-01-10 07:52:32 +03:00
}
cpu_relax ( ) ;
}
}
/*
* Switch the timer base to the current CPU when possible .
*/
2007-02-16 12:27:50 +03:00
static inline struct hrtimer_clock_base *
switch_hrtimer_base ( struct hrtimer * timer , struct hrtimer_clock_base * base )
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * new_base ;
struct hrtimer_cpu_base * new_cpu_base ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:50 +03:00
new_cpu_base = & __get_cpu_var ( hrtimer_bases ) ;
new_base = & new_cpu_base - > clock_base [ base - > index ] ;
2006-01-10 07:52:32 +03:00
if ( base ! = new_base ) {
/*
* We are trying to schedule the timer on the local CPU .
* However we can ' t change timer ' s base while it is running ,
* so we keep it on the same CPU . No hassle vs . reprogramming
* the event source in the high resolution case . The softirq
* code will take care of this when the timer function has
* completed . There is no conflict as we hold the lock until
* the timer is enqueued .
*/
2007-02-16 12:28:11 +03:00
if ( unlikely ( hrtimer_callback_running ( timer ) ) )
2006-01-10 07:52:32 +03:00
return base ;
/* See the comment in lock_timer_base() */
timer - > base = NULL ;
2007-02-16 12:27:50 +03:00
spin_unlock ( & base - > cpu_base - > lock ) ;
spin_lock ( & new_base - > cpu_base - > lock ) ;
2006-01-10 07:52:32 +03:00
timer - > base = new_base ;
}
return new_base ;
}
# else /* CONFIG_SMP */
2007-02-16 12:27:50 +03:00
static inline struct hrtimer_clock_base *
2006-01-10 07:52:32 +03:00
lock_hrtimer_base ( const struct hrtimer * timer , unsigned long * flags )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base = timer - > base ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:50 +03:00
spin_lock_irqsave ( & base - > cpu_base - > lock , * flags ) ;
2006-01-10 07:52:32 +03:00
return base ;
}
2007-02-16 12:28:11 +03:00
# define switch_hrtimer_base(t, b) (b)
2006-01-10 07:52:32 +03:00
# endif /* !CONFIG_SMP */
/*
* Functions for the union type storage format of ktime_t which are
* too large for inlining :
*/
# if BITS_PER_LONG < 64
# ifndef CONFIG_KTIME_SCALAR
/**
* ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
* @ kt : addend
* @ nsec : the scalar nsec value to add
*
* Returns the sum of kt and nsec in ktime_t format
*/
ktime_t ktime_add_ns ( const ktime_t kt , u64 nsec )
{
ktime_t tmp ;
if ( likely ( nsec < NSEC_PER_SEC ) ) {
tmp . tv64 = nsec ;
} else {
unsigned long rem = do_div ( nsec , NSEC_PER_SEC ) ;
tmp = ktime_set ( ( long ) nsec , rem ) ;
}
return ktime_add ( kt , tmp ) ;
}
2007-04-28 02:31:24 +04:00
EXPORT_SYMBOL_GPL ( ktime_add_ns ) ;
2007-08-20 04:16:05 +04:00
/**
* ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
* @ kt : minuend
* @ nsec : the scalar nsec value to subtract
*
* Returns the subtraction of @ nsec from @ kt in ktime_t format
*/
ktime_t ktime_sub_ns ( const ktime_t kt , u64 nsec )
{
ktime_t tmp ;
if ( likely ( nsec < NSEC_PER_SEC ) ) {
tmp . tv64 = nsec ;
} else {
unsigned long rem = do_div ( nsec , NSEC_PER_SEC ) ;
tmp = ktime_set ( ( long ) nsec , rem ) ;
}
return ktime_sub ( kt , tmp ) ;
}
EXPORT_SYMBOL_GPL ( ktime_sub_ns ) ;
2006-01-10 07:52:32 +03:00
# endif /* !CONFIG_KTIME_SCALAR */
/*
* Divide a ktime value by a nanosecond value
*/
2007-02-16 12:28:03 +03:00
unsigned long ktime_divns ( const ktime_t kt , s64 div )
2006-01-10 07:52:32 +03:00
{
u64 dclc , inc , dns ;
int sft = 0 ;
dclc = dns = ktime_to_ns ( kt ) ;
inc = div ;
/* Make sure the divisor is less than 2^32: */
while ( div > > 32 ) {
sft + + ;
div > > = 1 ;
}
dclc > > = sft ;
do_div ( dclc , ( unsigned long ) div ) ;
return ( unsigned long ) dclc ;
}
# endif /* BITS_PER_LONG >= 64 */
2007-02-16 12:28:11 +03:00
/* High resolution timer related functions */
# ifdef CONFIG_HIGH_RES_TIMERS
/*
* High resolution timer enabled ?
*/
static int hrtimer_hres_enabled __read_mostly = 1 ;
/*
* Enable / Disable high resolution mode
*/
static int __init setup_hrtimer_hres ( char * str )
{
if ( ! strcmp ( str , " off " ) )
hrtimer_hres_enabled = 0 ;
else if ( ! strcmp ( str , " on " ) )
hrtimer_hres_enabled = 1 ;
else
return 0 ;
return 1 ;
}
__setup ( " highres= " , setup_hrtimer_hres ) ;
/*
* hrtimer_high_res_enabled - query , if the highres mode is enabled
*/
static inline int hrtimer_is_hres_enabled ( void )
{
return hrtimer_hres_enabled ;
}
/*
* Is the high resolution mode active ?
*/
static inline int hrtimer_hres_active ( void )
{
return __get_cpu_var ( hrtimer_bases ) . hres_active ;
}
/*
* Reprogram the event source with checking both queues for the
* next event
* Called with interrupts disabled and base - > lock held
*/
static void hrtimer_force_reprogram ( struct hrtimer_cpu_base * cpu_base )
{
int i ;
struct hrtimer_clock_base * base = cpu_base - > clock_base ;
ktime_t expires ;
cpu_base - > expires_next . tv64 = KTIME_MAX ;
for ( i = 0 ; i < HRTIMER_MAX_CLOCK_BASES ; i + + , base + + ) {
struct hrtimer * timer ;
if ( ! base - > first )
continue ;
timer = rb_entry ( base - > first , struct hrtimer , node ) ;
expires = ktime_sub ( timer - > expires , base - > offset ) ;
if ( expires . tv64 < cpu_base - > expires_next . tv64 )
cpu_base - > expires_next = expires ;
}
if ( cpu_base - > expires_next . tv64 ! = KTIME_MAX )
tick_program_event ( cpu_base - > expires_next , 1 ) ;
}
/*
* Shared reprogramming for clock_realtime and clock_monotonic
*
* When a timer is enqueued and expires earlier than the already enqueued
* timers , we have to check , whether it expires earlier than the timer for
* which the clock event device was armed .
*
* Called with interrupts disabled and base - > cpu_base . lock held
*/
static int hrtimer_reprogram ( struct hrtimer * timer ,
struct hrtimer_clock_base * base )
{
ktime_t * expires_next = & __get_cpu_var ( hrtimer_bases ) . expires_next ;
ktime_t expires = ktime_sub ( timer - > expires , base - > offset ) ;
int res ;
/*
* When the callback is running , we do not reprogram the clock event
* device . The timer callback is either running on a different CPU or
2007-10-20 01:10:43 +04:00
* the callback is executed in the hrtimer_interrupt context . The
2007-02-16 12:28:11 +03:00
* reprogramming is handled either by the softirq , which called the
* callback or at the end of the hrtimer_interrupt .
*/
if ( hrtimer_callback_running ( timer ) )
return 0 ;
if ( expires . tv64 > = expires_next - > tv64 )
return 0 ;
/*
* Clockevents returns - ETIME , when the event was in the past .
*/
res = tick_program_event ( expires , 0 ) ;
if ( ! IS_ERR_VALUE ( res ) )
* expires_next = expires ;
return res ;
}
/*
* Retrigger next event is called after clock was set
*
* Called with interrupts disabled via on_each_cpu ( )
*/
static void retrigger_next_event ( void * arg )
{
struct hrtimer_cpu_base * base ;
struct timespec realtime_offset ;
unsigned long seq ;
if ( ! hrtimer_hres_active ( ) )
return ;
do {
seq = read_seqbegin ( & xtime_lock ) ;
set_normalized_timespec ( & realtime_offset ,
- wall_to_monotonic . tv_sec ,
- wall_to_monotonic . tv_nsec ) ;
} while ( read_seqretry ( & xtime_lock , seq ) ) ;
base = & __get_cpu_var ( hrtimer_bases ) ;
/* Adjust CLOCK_REALTIME offset */
spin_lock ( & base - > lock ) ;
base - > clock_base [ CLOCK_REALTIME ] . offset =
timespec_to_ktime ( realtime_offset ) ;
hrtimer_force_reprogram ( base ) ;
spin_unlock ( & base - > lock ) ;
}
/*
* Clock realtime was set
*
* Change the offset of the realtime clock vs . the monotonic
* clock .
*
* We might have to reprogram the high resolution timer interrupt . On
* SMP we call the architecture specific code to retrigger _all_ high
* resolution timer interrupts . On UP we just disable interrupts and
* call the high resolution interrupt code .
*/
void clock_was_set ( void )
{
/* Retrigger the CPU local events everywhere */
on_each_cpu ( retrigger_next_event , NULL , 0 , 1 ) ;
}
2007-04-07 14:05:00 +04:00
/*
* During resume we might have to reprogram the high resolution timer
* interrupt ( on the local CPU ) :
*/
void hres_timers_resume ( void )
{
WARN_ON_ONCE ( num_online_cpus ( ) > 1 ) ;
/* Retrigger the CPU local events: */
retrigger_next_event ( NULL ) ;
}
2007-02-16 12:28:11 +03:00
/*
* Check , whether the timer is on the callback pending list
*/
static inline int hrtimer_cb_pending ( const struct hrtimer * timer )
{
return timer - > state & HRTIMER_STATE_PENDING ;
}
/*
* Remove a timer from the callback pending list
*/
static inline void hrtimer_remove_cb_pending ( struct hrtimer * timer )
{
list_del_init ( & timer - > cb_entry ) ;
}
/*
* Initialize the high resolution related parts of cpu_base
*/
static inline void hrtimer_init_hres ( struct hrtimer_cpu_base * base )
{
base - > expires_next . tv64 = KTIME_MAX ;
base - > hres_active = 0 ;
INIT_LIST_HEAD ( & base - > cb_pending ) ;
}
/*
* Initialize the high resolution related parts of a hrtimer
*/
static inline void hrtimer_init_timer_hres ( struct hrtimer * timer )
{
INIT_LIST_HEAD ( & timer - > cb_entry ) ;
}
/*
* When High resolution timers are active , try to reprogram . Note , that in case
* the state has HRTIMER_STATE_CALLBACK set , no reprogramming and no expiry
* check happens . The timer gets enqueued into the rbtree . The reprogramming
* and expiry check is done in the hrtimer_interrupt or in the softirq .
*/
static inline int hrtimer_enqueue_reprogram ( struct hrtimer * timer ,
struct hrtimer_clock_base * base )
{
if ( base - > cpu_base - > hres_active & & hrtimer_reprogram ( timer , base ) ) {
/* Timer is expired, act upon the callback mode */
switch ( timer - > cb_mode ) {
case HRTIMER_CB_IRQSAFE_NO_RESTART :
/*
* We can call the callback from here . No restart
* happens , so no danger of recursion
*/
BUG_ON ( timer - > function ( timer ) ! = HRTIMER_NORESTART ) ;
return 1 ;
case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ :
/*
* This is solely for the sched tick emulation with
* dynamic tick support to ensure that we do not
* restart the tick right on the edge and end up with
* the tick timer in the softirq ! The calling site
* takes care of this .
*/
return 1 ;
case HRTIMER_CB_IRQSAFE :
case HRTIMER_CB_SOFTIRQ :
/*
* Move everything else into the softirq pending list !
*/
list_add_tail ( & timer - > cb_entry ,
& base - > cpu_base - > cb_pending ) ;
timer - > state = HRTIMER_STATE_PENDING ;
raise_softirq ( HRTIMER_SOFTIRQ ) ;
return 1 ;
default :
BUG ( ) ;
}
}
return 0 ;
}
/*
* Switch to high resolution mode
*/
2007-03-06 12:42:08 +03:00
static int hrtimer_switch_to_hres ( void )
2007-02-16 12:28:11 +03:00
{
2007-07-21 15:37:36 +04:00
int cpu = smp_processor_id ( ) ;
struct hrtimer_cpu_base * base = & per_cpu ( hrtimer_bases , cpu ) ;
2007-02-16 12:28:11 +03:00
unsigned long flags ;
if ( base - > hres_active )
2007-03-06 12:42:08 +03:00
return 1 ;
2007-02-16 12:28:11 +03:00
local_irq_save ( flags ) ;
if ( tick_init_highres ( ) ) {
local_irq_restore ( flags ) ;
2007-07-21 15:37:36 +04:00
printk ( KERN_WARNING " Could not switch to high resolution "
" mode on CPU %d \n " , cpu ) ;
2007-03-06 12:42:08 +03:00
return 0 ;
2007-02-16 12:28:11 +03:00
}
base - > hres_active = 1 ;
base - > clock_base [ CLOCK_REALTIME ] . resolution = KTIME_HIGH_RES ;
base - > clock_base [ CLOCK_MONOTONIC ] . resolution = KTIME_HIGH_RES ;
tick_setup_sched_timer ( ) ;
/* "Retrigger" the interrupt to get things going */
retrigger_next_event ( NULL ) ;
local_irq_restore ( flags ) ;
2007-10-29 08:35:29 +03:00
printk ( KERN_DEBUG " Switched to high resolution mode on CPU %d \n " ,
2007-02-16 12:28:11 +03:00
smp_processor_id ( ) ) ;
2007-03-06 12:42:08 +03:00
return 1 ;
2007-02-16 12:28:11 +03:00
}
# else
static inline int hrtimer_hres_active ( void ) { return 0 ; }
static inline int hrtimer_is_hres_enabled ( void ) { return 0 ; }
2007-03-06 12:42:08 +03:00
static inline int hrtimer_switch_to_hres ( void ) { return 0 ; }
2007-02-16 12:28:11 +03:00
static inline void hrtimer_force_reprogram ( struct hrtimer_cpu_base * base ) { }
static inline int hrtimer_enqueue_reprogram ( struct hrtimer * timer ,
struct hrtimer_clock_base * base )
{
return 0 ;
}
static inline int hrtimer_cb_pending ( struct hrtimer * timer ) { return 0 ; }
static inline void hrtimer_remove_cb_pending ( struct hrtimer * timer ) { }
static inline void hrtimer_init_hres ( struct hrtimer_cpu_base * base ) { }
static inline void hrtimer_init_timer_hres ( struct hrtimer * timer ) { }
# endif /* CONFIG_HIGH_RES_TIMERS */
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 12:28:13 +03:00
# ifdef CONFIG_TIMER_STATS
void __timer_stats_hrtimer_set_start_info ( struct hrtimer * timer , void * addr )
{
if ( timer - > start_site )
return ;
timer - > start_site = addr ;
memcpy ( timer - > start_comm , current - > comm , TASK_COMM_LEN ) ;
timer - > start_pid = current - > pid ;
}
# endif
2006-01-10 07:52:32 +03:00
/*
2007-10-20 03:56:53 +04:00
* Counterpart to lock_hrtimer_base above :
2006-01-10 07:52:32 +03:00
*/
static inline
void unlock_hrtimer_base ( const struct hrtimer * timer , unsigned long * flags )
{
2007-02-16 12:27:50 +03:00
spin_unlock_irqrestore ( & timer - > base - > cpu_base - > lock , * flags ) ;
2006-01-10 07:52:32 +03:00
}
/**
* hrtimer_forward - forward the timer expiry
* @ timer : hrtimer to forward
2006-03-26 13:38:06 +04:00
* @ now : forward past this time
2006-01-10 07:52:32 +03:00
* @ interval : the interval to forward
*
* Forward the timer expiry so it will expire in the future .
2006-01-17 01:58:55 +03:00
* Returns the number of overruns .
2006-01-10 07:52:32 +03:00
*/
unsigned long
2006-03-26 13:38:06 +04:00
hrtimer_forward ( struct hrtimer * timer , ktime_t now , ktime_t interval )
2006-01-10 07:52:32 +03:00
{
unsigned long orun = 1 ;
2006-03-26 13:38:06 +04:00
ktime_t delta ;
2006-01-10 07:52:32 +03:00
delta = ktime_sub ( now , timer - > expires ) ;
if ( delta . tv64 < 0 )
return 0 ;
2006-01-12 13:47:34 +03:00
if ( interval . tv64 < timer - > base - > resolution . tv64 )
interval . tv64 = timer - > base - > resolution . tv64 ;
2006-01-10 07:52:32 +03:00
if ( unlikely ( delta . tv64 > = interval . tv64 ) ) {
2006-03-26 13:38:11 +04:00
s64 incr = ktime_to_ns ( interval ) ;
2006-01-10 07:52:32 +03:00
orun = ktime_divns ( delta , incr ) ;
timer - > expires = ktime_add_ns ( timer - > expires , incr * orun ) ;
if ( timer - > expires . tv64 > now . tv64 )
return orun ;
/*
* This ( and the ktime_add ( ) below ) is the
* correction for exact :
*/
orun + + ;
}
timer - > expires = ktime_add ( timer - > expires , interval ) ;
2007-03-17 00:38:20 +03:00
/*
* Make sure , that the result did not wrap with a very large
* interval .
*/
if ( timer - > expires . tv64 < 0 )
timer - > expires = ktime_set ( KTIME_SEC_MAX , 0 ) ;
2006-01-10 07:52:32 +03:00
return orun ;
}
2007-05-08 11:31:58 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_forward ) ;
2006-01-10 07:52:32 +03:00
/*
* enqueue_hrtimer - internal function to ( re ) start a timer
*
* The timer is inserted in expiry order . Insertion into the
* red black tree is O ( log ( n ) ) . Must hold the base lock .
*/
2007-02-16 12:27:50 +03:00
static void enqueue_hrtimer ( struct hrtimer * timer ,
2007-02-16 12:28:11 +03:00
struct hrtimer_clock_base * base , int reprogram )
2006-01-10 07:52:32 +03:00
{
struct rb_node * * link = & base - > active . rb_node ;
struct rb_node * parent = NULL ;
struct hrtimer * entry ;
2007-07-21 15:37:36 +04:00
int leftmost = 1 ;
2006-01-10 07:52:32 +03:00
/*
* Find the right place in the rbtree :
*/
while ( * link ) {
parent = * link ;
entry = rb_entry ( parent , struct hrtimer , node ) ;
/*
* We dont care about collisions . Nodes with
* the same expiry time stay together .
*/
2007-07-21 15:37:36 +04:00
if ( timer - > expires . tv64 < entry - > expires . tv64 ) {
2006-01-10 07:52:32 +03:00
link = & ( * link ) - > rb_left ;
2007-07-21 15:37:36 +04:00
} else {
2006-01-10 07:52:32 +03:00
link = & ( * link ) - > rb_right ;
2007-07-21 15:37:36 +04:00
leftmost = 0 ;
}
2006-01-10 07:52:32 +03:00
}
/*
2006-01-12 13:25:54 +03:00
* Insert the timer to the rbtree and check whether it
* replaces the first pending timer
2006-01-10 07:52:32 +03:00
*/
2007-07-21 15:37:36 +04:00
if ( leftmost ) {
2007-02-16 12:28:11 +03:00
/*
* Reprogram the clock event device . When the timer is already
* expired hrtimer_enqueue_reprogram has either called the
* callback or added it to the pending list and raised the
* softirq .
*
* This is a NOP for ! HIGHRES
*/
if ( reprogram & & hrtimer_enqueue_reprogram ( timer , base ) )
return ;
base - > first = & timer - > node ;
}
2006-01-10 07:52:32 +03:00
rb_link_node ( & timer - > node , parent , link ) ;
rb_insert_color ( & timer - > node , & base - > active ) ;
2007-02-16 12:27:51 +03:00
/*
* HRTIMER_STATE_ENQUEUED is or ' ed to the current state to preserve the
* state of a possibly running callback .
*/
timer - > state | = HRTIMER_STATE_ENQUEUED ;
2006-01-12 13:25:54 +03:00
}
2006-01-10 07:52:32 +03:00
/*
* __remove_hrtimer - internal function to remove a timer
*
* Caller must hold the base lock .
2007-02-16 12:28:11 +03:00
*
* High resolution timer mode reprograms the clock event device when the
* timer is the one which expires next . The caller can disable this by setting
* reprogram to zero . This is useful , when the context does a reprogramming
* anyway ( e . g . timer interrupt )
2006-01-10 07:52:32 +03:00
*/
2007-02-16 12:27:50 +03:00
static void __remove_hrtimer ( struct hrtimer * timer ,
2007-02-16 12:27:51 +03:00
struct hrtimer_clock_base * base ,
2007-02-16 12:28:11 +03:00
unsigned long newstate , int reprogram )
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:28:11 +03:00
/* High res. callback list. NOP for !HIGHRES */
if ( hrtimer_cb_pending ( timer ) )
hrtimer_remove_cb_pending ( timer ) ;
else {
/*
* Remove the timer from the rbtree and replace the
* first entry pointer if necessary .
*/
if ( base - > first = = & timer - > node ) {
base - > first = rb_next ( & timer - > node ) ;
/* Reprogram the clock event device. if enabled */
if ( reprogram & & hrtimer_hres_active ( ) )
hrtimer_force_reprogram ( base - > cpu_base ) ;
}
rb_erase ( & timer - > node , & base - > active ) ;
}
2007-02-16 12:27:51 +03:00
timer - > state = newstate ;
2006-01-10 07:52:32 +03:00
}
/*
* remove hrtimer , called with base lock held
*/
static inline int
2007-02-16 12:27:50 +03:00
remove_hrtimer ( struct hrtimer * timer , struct hrtimer_clock_base * base )
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:51 +03:00
if ( hrtimer_is_queued ( timer ) ) {
2007-02-16 12:28:11 +03:00
int reprogram ;
/*
* Remove the timer and force reprogramming when high
* resolution mode is active and the timer is on the current
* CPU . If we remove a timer on another CPU , reprogramming is
* skipped . The interrupt event on this CPU is fired and
* reprogramming happens in the interrupt handler . This is a
* rare case and less expensive than a smp call .
*/
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 12:28:13 +03:00
timer_stats_hrtimer_clear_start_info ( timer ) ;
2007-02-16 12:28:11 +03:00
reprogram = base - > cpu_base = = & __get_cpu_var ( hrtimer_bases ) ;
__remove_hrtimer ( timer , base , HRTIMER_STATE_INACTIVE ,
reprogram ) ;
2006-01-10 07:52:32 +03:00
return 1 ;
}
return 0 ;
}
/**
* hrtimer_start - ( re ) start an relative timer on the current CPU
* @ timer : the timer to be added
* @ tim : expiry time
* @ mode : expiry mode : absolute ( HRTIMER_ABS ) or relative ( HRTIMER_REL )
*
* Returns :
* 0 on success
* 1 when the timer was active
*/
int
hrtimer_start ( struct hrtimer * timer , ktime_t tim , const enum hrtimer_mode mode )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base , * new_base ;
2006-01-10 07:52:32 +03:00
unsigned long flags ;
int ret ;
base = lock_hrtimer_base ( timer , & flags ) ;
/* Remove an active timer from the queue: */
ret = remove_hrtimer ( timer , base ) ;
/* Switch the timer base, if necessary: */
new_base = switch_hrtimer_base ( timer , base ) ;
2007-02-16 12:27:49 +03:00
if ( mode = = HRTIMER_MODE_REL ) {
2006-01-10 07:52:32 +03:00
tim = ktime_add ( tim , new_base - > get_time ( ) ) ;
2006-02-15 00:53:15 +03:00
/*
* CONFIG_TIME_LOW_RES is a temporary way for architectures
* to signal that they simply return xtime in
* do_gettimeoffset ( ) . In this case we want to round up by
* resolution when starting a relative timer , to avoid short
* timeouts . This will go away with the GTOD framework .
*/
# ifdef CONFIG_TIME_LOW_RES
tim = ktime_add ( tim , base - > resolution ) ;
# endif
2007-12-07 21:16:17 +03:00
/*
* Careful here : User space might have asked for a
* very long sleep , so the add above might result in a
* negative number , which enqueues the timer in front
* of the queue .
*/
if ( tim . tv64 < 0 )
tim . tv64 = KTIME_MAX ;
2006-02-15 00:53:15 +03:00
}
2006-01-10 07:52:32 +03:00
timer - > expires = tim ;
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 12:28:13 +03:00
timer_stats_hrtimer_set_start_info ( timer ) ;
2007-03-28 15:17:18 +04:00
/*
* Only allow reprogramming if the new base is on this CPU .
* ( it might still be on another CPU if the timer was pending )
*/
enqueue_hrtimer ( timer , new_base ,
new_base - > cpu_base = = & __get_cpu_var ( hrtimer_bases ) ) ;
2006-01-10 07:52:32 +03:00
unlock_hrtimer_base ( timer , & flags ) ;
return ret ;
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_start ) ;
2006-01-10 07:52:32 +03:00
/**
* hrtimer_try_to_cancel - try to deactivate a timer
* @ timer : hrtimer to stop
*
* Returns :
* 0 when the timer was not active
* 1 when the timer was active
* - 1 when the timer is currently excuting the callback function and
2006-06-25 16:49:15 +04:00
* cannot be stopped
2006-01-10 07:52:32 +03:00
*/
int hrtimer_try_to_cancel ( struct hrtimer * timer )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base ;
2006-01-10 07:52:32 +03:00
unsigned long flags ;
int ret = - 1 ;
base = lock_hrtimer_base ( timer , & flags ) ;
2007-02-16 12:27:51 +03:00
if ( ! hrtimer_callback_running ( timer ) )
2006-01-10 07:52:32 +03:00
ret = remove_hrtimer ( timer , base ) ;
unlock_hrtimer_base ( timer , & flags ) ;
return ret ;
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_try_to_cancel ) ;
2006-01-10 07:52:32 +03:00
/**
* hrtimer_cancel - cancel a timer and wait for the handler to finish .
* @ timer : the timer to be cancelled
*
* Returns :
* 0 when the timer was not active
* 1 when the timer was active
*/
int hrtimer_cancel ( struct hrtimer * timer )
{
for ( ; ; ) {
int ret = hrtimer_try_to_cancel ( timer ) ;
if ( ret > = 0 )
return ret ;
2006-04-11 09:54:13 +04:00
cpu_relax ( ) ;
2006-01-10 07:52:32 +03:00
}
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_cancel ) ;
2006-01-10 07:52:32 +03:00
/**
* hrtimer_get_remaining - get remaining time for the timer
* @ timer : the timer to read
*/
ktime_t hrtimer_get_remaining ( const struct hrtimer * timer )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base ;
2006-01-10 07:52:32 +03:00
unsigned long flags ;
ktime_t rem ;
base = lock_hrtimer_base ( timer , & flags ) ;
2007-02-16 12:27:50 +03:00
rem = ktime_sub ( timer - > expires , base - > get_time ( ) ) ;
2006-01-10 07:52:32 +03:00
unlock_hrtimer_base ( timer , & flags ) ;
return rem ;
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_get_remaining ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:47 +03:00
# if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
2006-03-07 02:42:45 +03:00
/**
* hrtimer_get_next_event - get the time until next expiry event
*
* Returns the delta to the next expiry event or KTIME_MAX if no timer
* is pending .
*/
ktime_t hrtimer_get_next_event ( void )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * cpu_base = & __get_cpu_var ( hrtimer_bases ) ;
struct hrtimer_clock_base * base = cpu_base - > clock_base ;
2006-03-07 02:42:45 +03:00
ktime_t delta , mindelta = { . tv64 = KTIME_MAX } ;
unsigned long flags ;
int i ;
2007-02-16 12:27:50 +03:00
spin_lock_irqsave ( & cpu_base - > lock , flags ) ;
2007-02-16 12:28:11 +03:00
if ( ! hrtimer_hres_active ( ) ) {
for ( i = 0 ; i < HRTIMER_MAX_CLOCK_BASES ; i + + , base + + ) {
struct hrtimer * timer ;
2006-03-07 02:42:45 +03:00
2007-02-16 12:28:11 +03:00
if ( ! base - > first )
continue ;
2007-02-16 12:27:50 +03:00
2007-02-16 12:28:11 +03:00
timer = rb_entry ( base - > first , struct hrtimer , node ) ;
delta . tv64 = timer - > expires . tv64 ;
delta = ktime_sub ( delta , base - > get_time ( ) ) ;
if ( delta . tv64 < mindelta . tv64 )
mindelta . tv64 = delta . tv64 ;
}
2006-03-07 02:42:45 +03:00
}
2007-02-16 12:27:50 +03:00
spin_unlock_irqrestore ( & cpu_base - > lock , flags ) ;
2006-03-07 02:42:45 +03:00
if ( mindelta . tv64 < 0 )
mindelta . tv64 = 0 ;
return mindelta ;
}
# endif
2006-01-10 07:52:32 +03:00
/**
2006-02-01 14:05:11 +03:00
* hrtimer_init - initialize a timer to the given clock
* @ timer : the timer to be initialized
2006-01-10 07:52:32 +03:00
* @ clock_id : the clock to be used
2006-02-01 14:05:11 +03:00
* @ mode : timer mode abs / rel
2006-01-10 07:52:32 +03:00
*/
2006-02-01 14:05:11 +03:00
void hrtimer_init ( struct hrtimer * timer , clockid_t clock_id ,
enum hrtimer_mode mode )
2006-01-10 07:52:32 +03:00
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * cpu_base ;
2006-01-10 07:52:32 +03:00
2006-02-01 14:05:11 +03:00
memset ( timer , 0 , sizeof ( struct hrtimer ) ) ;
2007-02-16 12:27:50 +03:00
cpu_base = & __raw_get_cpu_var ( hrtimer_bases ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:49 +03:00
if ( clock_id = = CLOCK_REALTIME & & mode ! = HRTIMER_MODE_ABS )
2006-02-01 14:05:11 +03:00
clock_id = CLOCK_MONOTONIC ;
2007-02-16 12:27:50 +03:00
timer - > base = & cpu_base - > clock_base [ clock_id ] ;
2007-02-16 12:28:11 +03:00
hrtimer_init_timer_hres ( timer ) ;
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 12:28:13 +03:00
# ifdef CONFIG_TIMER_STATS
timer - > start_site = NULL ;
timer - > start_pid = - 1 ;
memset ( timer - > start_comm , 0 , TASK_COMM_LEN ) ;
# endif
2006-01-10 07:52:32 +03:00
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_init ) ;
2006-01-10 07:52:32 +03:00
/**
* hrtimer_get_res - get the timer resolution for a clock
* @ which_clock : which clock to query
* @ tp : pointer to timespec variable to store the resolution
*
2007-02-10 12:45:59 +03:00
* Store the resolution of the clock selected by @ which_clock in the
* variable pointed to by @ tp .
2006-01-10 07:52:32 +03:00
*/
int hrtimer_get_res ( const clockid_t which_clock , struct timespec * tp )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * cpu_base ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:50 +03:00
cpu_base = & __raw_get_cpu_var ( hrtimer_bases ) ;
* tp = ktime_to_timespec ( cpu_base - > clock_base [ which_clock ] . resolution ) ;
2006-01-10 07:52:32 +03:00
return 0 ;
}
2006-05-31 08:26:09 +04:00
EXPORT_SYMBOL_GPL ( hrtimer_get_res ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:28:11 +03:00
# ifdef CONFIG_HIGH_RES_TIMERS
/*
* High resolution timer interrupt
* Called with interrupts disabled
*/
void hrtimer_interrupt ( struct clock_event_device * dev )
{
struct hrtimer_cpu_base * cpu_base = & __get_cpu_var ( hrtimer_bases ) ;
struct hrtimer_clock_base * base ;
ktime_t expires_next , now ;
int i , raise = 0 ;
BUG_ON ( ! cpu_base - > hres_active ) ;
cpu_base - > nr_events + + ;
dev - > next_event . tv64 = KTIME_MAX ;
retry :
now = ktime_get ( ) ;
expires_next . tv64 = KTIME_MAX ;
base = cpu_base - > clock_base ;
for ( i = 0 ; i < HRTIMER_MAX_CLOCK_BASES ; i + + ) {
ktime_t basenow ;
struct rb_node * node ;
spin_lock ( & cpu_base - > lock ) ;
basenow = ktime_add ( now , base - > offset ) ;
while ( ( node = base - > first ) ) {
struct hrtimer * timer ;
timer = rb_entry ( node , struct hrtimer , node ) ;
if ( basenow . tv64 < timer - > expires . tv64 ) {
ktime_t expires ;
expires = ktime_sub ( timer - > expires ,
base - > offset ) ;
if ( expires . tv64 < expires_next . tv64 )
expires_next = expires ;
break ;
}
/* Move softirq callbacks to the pending list */
if ( timer - > cb_mode = = HRTIMER_CB_SOFTIRQ ) {
__remove_hrtimer ( timer , base ,
HRTIMER_STATE_PENDING , 0 ) ;
list_add_tail ( & timer - > cb_entry ,
& base - > cpu_base - > cb_pending ) ;
raise = 1 ;
continue ;
}
__remove_hrtimer ( timer , base ,
HRTIMER_STATE_CALLBACK , 0 ) ;
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 12:28:13 +03:00
timer_stats_account_hrtimer ( timer ) ;
2007-02-16 12:28:11 +03:00
/*
* Note : We clear the CALLBACK bit after
* enqueue_hrtimer to avoid reprogramming of
* the event hardware . This happens at the end
* of this function anyway .
*/
if ( timer - > function ( timer ) ! = HRTIMER_NORESTART ) {
BUG_ON ( timer - > state ! = HRTIMER_STATE_CALLBACK ) ;
enqueue_hrtimer ( timer , base , 0 ) ;
}
timer - > state & = ~ HRTIMER_STATE_CALLBACK ;
}
spin_unlock ( & cpu_base - > lock ) ;
base + + ;
}
cpu_base - > expires_next = expires_next ;
/* Reprogramming necessary ? */
if ( expires_next . tv64 ! = KTIME_MAX ) {
if ( tick_program_event ( expires_next , 0 ) )
goto retry ;
}
/* Raise softirq ? */
if ( raise )
raise_softirq ( HRTIMER_SOFTIRQ ) ;
}
static void run_hrtimer_softirq ( struct softirq_action * h )
{
struct hrtimer_cpu_base * cpu_base = & __get_cpu_var ( hrtimer_bases ) ;
spin_lock_irq ( & cpu_base - > lock ) ;
while ( ! list_empty ( & cpu_base - > cb_pending ) ) {
enum hrtimer_restart ( * fn ) ( struct hrtimer * ) ;
struct hrtimer * timer ;
int restart ;
timer = list_entry ( cpu_base - > cb_pending . next ,
struct hrtimer , cb_entry ) ;
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 12:28:13 +03:00
timer_stats_account_hrtimer ( timer ) ;
2007-02-16 12:28:11 +03:00
fn = timer - > function ;
__remove_hrtimer ( timer , timer - > base , HRTIMER_STATE_CALLBACK , 0 ) ;
spin_unlock_irq ( & cpu_base - > lock ) ;
restart = fn ( timer ) ;
spin_lock_irq ( & cpu_base - > lock ) ;
timer - > state & = ~ HRTIMER_STATE_CALLBACK ;
if ( restart = = HRTIMER_RESTART ) {
BUG_ON ( hrtimer_active ( timer ) ) ;
/*
* Enqueue the timer , allow reprogramming of the event
* device
*/
enqueue_hrtimer ( timer , timer - > base , 1 ) ;
} else if ( hrtimer_active ( timer ) ) {
/*
* If the timer was rearmed on another CPU , reprogram
* the event device .
*/
if ( timer - > base - > first = = & timer - > node )
hrtimer_reprogram ( timer , timer - > base ) ;
}
}
spin_unlock_irq ( & cpu_base - > lock ) ;
}
# endif /* CONFIG_HIGH_RES_TIMERS */
2006-01-10 07:52:32 +03:00
/*
* Expire the per base hrtimer - queue :
*/
2007-02-16 12:27:50 +03:00
static inline void run_hrtimer_queue ( struct hrtimer_cpu_base * cpu_base ,
int index )
2006-01-10 07:52:32 +03:00
{
2006-01-12 13:25:54 +03:00
struct rb_node * node ;
2007-02-16 12:27:50 +03:00
struct hrtimer_clock_base * base = & cpu_base - > clock_base [ index ] ;
2006-01-10 07:52:32 +03:00
2006-03-31 14:31:20 +04:00
if ( ! base - > first )
return ;
2006-03-26 13:38:05 +04:00
if ( base - > get_softirq_time )
base - > softirq_time = base - > get_softirq_time ( ) ;
2007-02-16 12:27:50 +03:00
spin_lock_irq ( & cpu_base - > lock ) ;
2006-01-10 07:52:32 +03:00
2006-01-12 13:25:54 +03:00
while ( ( node = base - > first ) ) {
2006-01-10 07:52:32 +03:00
struct hrtimer * timer ;
2007-02-16 12:27:49 +03:00
enum hrtimer_restart ( * fn ) ( struct hrtimer * ) ;
2006-01-10 07:52:32 +03:00
int restart ;
2006-01-12 13:25:54 +03:00
timer = rb_entry ( node , struct hrtimer , node ) ;
2006-03-26 13:38:05 +04:00
if ( base - > softirq_time . tv64 < = timer - > expires . tv64 )
2006-01-10 07:52:32 +03:00
break ;
2007-03-06 12:42:08 +03:00
# ifdef CONFIG_HIGH_RES_TIMERS
WARN_ON_ONCE ( timer - > cb_mode = = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ ) ;
# endif
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 12:28:13 +03:00
timer_stats_account_hrtimer ( timer ) ;
2006-01-10 07:52:32 +03:00
fn = timer - > function ;
2007-02-16 12:28:11 +03:00
__remove_hrtimer ( timer , base , HRTIMER_STATE_CALLBACK , 0 ) ;
2007-02-16 12:27:50 +03:00
spin_unlock_irq ( & cpu_base - > lock ) ;
2006-01-10 07:52:32 +03:00
2006-03-26 13:38:12 +04:00
restart = fn ( timer ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:50 +03:00
spin_lock_irq ( & cpu_base - > lock ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:51 +03:00
timer - > state & = ~ HRTIMER_STATE_CALLBACK ;
2006-03-26 13:38:09 +04:00
if ( restart ! = HRTIMER_NORESTART ) {
BUG_ON ( hrtimer_active ( timer ) ) ;
2007-02-16 12:28:11 +03:00
enqueue_hrtimer ( timer , base , 0 ) ;
2006-03-26 13:38:09 +04:00
}
2006-01-10 07:52:32 +03:00
}
2007-02-16 12:27:50 +03:00
spin_unlock_irq ( & cpu_base - > lock ) ;
2006-01-10 07:52:32 +03:00
}
/*
* Called from timer softirq every jiffy , expire hrtimers :
2007-02-16 12:28:11 +03:00
*
* For HRT its the fall back code to run the softirq in the timer
* softirq context in case the hrtimer initialization failed or has
* not been done yet .
2006-01-10 07:52:32 +03:00
*/
void hrtimer_run_queues ( void )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * cpu_base = & __get_cpu_var ( hrtimer_bases ) ;
2006-01-10 07:52:32 +03:00
int i ;
2007-02-16 12:28:11 +03:00
if ( hrtimer_hres_active ( ) )
return ;
2007-02-16 12:28:03 +03:00
/*
* This _is_ ugly : We have to check in the softirq context ,
* whether we can switch to highres and / or nohz mode . The
* clocksource switch happens in the timer interrupt with
* xtime_lock held . Notification from there only sets the
* check bit in the tick_oneshot code , otherwise we might
* deadlock vs . xtime_lock .
*/
2007-02-16 12:28:11 +03:00
if ( tick_check_oneshot_change ( ! hrtimer_is_hres_enabled ( ) ) )
2007-03-06 12:42:08 +03:00
if ( hrtimer_switch_to_hres ( ) )
return ;
2007-02-16 12:28:03 +03:00
2007-02-16 12:27:50 +03:00
hrtimer_get_softirq_time ( cpu_base ) ;
2006-03-26 13:38:05 +04:00
2007-02-16 12:27:50 +03:00
for ( i = 0 ; i < HRTIMER_MAX_CLOCK_BASES ; i + + )
run_hrtimer_queue ( cpu_base , i ) ;
2006-01-10 07:52:32 +03:00
}
2006-01-10 07:52:35 +03:00
/*
* Sleep related functions :
*/
2007-02-16 12:27:49 +03:00
static enum hrtimer_restart hrtimer_wakeup ( struct hrtimer * timer )
2006-03-31 14:31:17 +04:00
{
struct hrtimer_sleeper * t =
container_of ( timer , struct hrtimer_sleeper , timer ) ;
struct task_struct * task = t - > task ;
t - > task = NULL ;
if ( task )
wake_up_process ( task ) ;
return HRTIMER_NORESTART ;
}
2006-07-03 11:25:41 +04:00
void hrtimer_init_sleeper ( struct hrtimer_sleeper * sl , struct task_struct * task )
2006-03-31 14:31:17 +04:00
{
sl - > timer . function = hrtimer_wakeup ;
sl - > task = task ;
2007-02-16 12:28:11 +03:00
# ifdef CONFIG_HIGH_RES_TIMERS
sl - > timer . cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART ;
# endif
2006-03-31 14:31:17 +04:00
}
2006-03-31 14:31:19 +04:00
static int __sched do_nanosleep ( struct hrtimer_sleeper * t , enum hrtimer_mode mode )
2006-03-26 13:38:08 +04:00
{
2006-03-31 14:31:19 +04:00
hrtimer_init_sleeper ( t , current ) ;
2006-01-10 07:52:35 +03:00
2006-03-26 13:38:08 +04:00
do {
set_current_state ( TASK_INTERRUPTIBLE ) ;
hrtimer_start ( & t - > timer , t - > timer . expires , mode ) ;
2007-02-16 12:28:11 +03:00
if ( likely ( t - > task ) )
schedule ( ) ;
2006-03-26 13:38:08 +04:00
2006-03-31 14:31:19 +04:00
hrtimer_cancel ( & t - > timer ) ;
2007-02-16 12:27:49 +03:00
mode = HRTIMER_MODE_ABS ;
2006-03-31 14:31:19 +04:00
} while ( t - > task & & ! signal_pending ( current ) ) ;
2006-03-26 13:38:08 +04:00
2006-03-31 14:31:19 +04:00
return t - > task = = NULL ;
2006-01-10 07:52:35 +03:00
}
2006-09-29 13:00:28 +04:00
long __sched hrtimer_nanosleep_restart ( struct restart_block * restart )
2006-01-10 07:52:35 +03:00
{
2006-03-31 14:31:19 +04:00
struct hrtimer_sleeper t ;
2007-10-16 01:06:04 +04:00
struct timespec * rmtp ;
2006-03-26 13:38:08 +04:00
ktime_t time ;
2006-01-10 07:52:35 +03:00
restart - > fn = do_no_restart_syscall ;
2007-02-16 12:27:49 +03:00
hrtimer_init ( & t . timer , restart - > arg0 , HRTIMER_MODE_ABS ) ;
2006-09-29 13:00:28 +04:00
t . timer . expires . tv64 = ( ( u64 ) restart - > arg3 < < 32 ) | ( u64 ) restart - > arg2 ;
2006-01-10 07:52:35 +03:00
2007-02-16 12:27:49 +03:00
if ( do_nanosleep ( & t , HRTIMER_MODE_ABS ) )
2006-01-10 07:52:35 +03:00
return 0 ;
2007-10-16 01:06:04 +04:00
rmtp = ( struct timespec * ) restart - > arg1 ;
2006-03-26 13:38:08 +04:00
if ( rmtp ) {
time = ktime_sub ( t . timer . expires , t . timer . base - > get_time ( ) ) ;
if ( time . tv64 < = 0 )
return 0 ;
2007-10-16 01:06:04 +04:00
* rmtp = ktime_to_timespec ( time ) ;
2006-03-26 13:38:08 +04:00
}
2006-01-10 07:52:35 +03:00
2006-09-29 13:00:28 +04:00
restart - > fn = hrtimer_nanosleep_restart ;
2006-01-10 07:52:35 +03:00
/* The other values in restart are already filled in */
return - ERESTART_RESTARTBLOCK ;
}
2007-10-16 01:06:04 +04:00
long hrtimer_nanosleep ( struct timespec * rqtp , struct timespec * rmtp ,
2006-01-10 07:52:35 +03:00
const enum hrtimer_mode mode , const clockid_t clockid )
{
struct restart_block * restart ;
2006-03-31 14:31:19 +04:00
struct hrtimer_sleeper t ;
2006-01-10 07:52:35 +03:00
ktime_t rem ;
2006-03-26 13:38:08 +04:00
hrtimer_init ( & t . timer , clockid , mode ) ;
t . timer . expires = timespec_to_ktime ( * rqtp ) ;
if ( do_nanosleep ( & t , mode ) )
2006-01-10 07:52:35 +03:00
return 0 ;
2006-02-01 14:05:11 +03:00
/* Absolute timers do not update the rmtp value and restart: */
2007-02-16 12:27:49 +03:00
if ( mode = = HRTIMER_MODE_ABS )
2006-01-10 07:52:35 +03:00
return - ERESTARTNOHAND ;
2006-03-26 13:38:08 +04:00
if ( rmtp ) {
rem = ktime_sub ( t . timer . expires , t . timer . base - > get_time ( ) ) ;
if ( rem . tv64 < = 0 )
return 0 ;
2007-10-16 01:06:04 +04:00
* rmtp = ktime_to_timespec ( rem ) ;
2006-03-26 13:38:08 +04:00
}
2006-01-10 07:52:35 +03:00
restart = & current_thread_info ( ) - > restart_block ;
2006-09-29 13:00:28 +04:00
restart - > fn = hrtimer_nanosleep_restart ;
restart - > arg0 = ( unsigned long ) t . timer . base - > index ;
restart - > arg1 = ( unsigned long ) rmtp ;
restart - > arg2 = t . timer . expires . tv64 & 0xFFFFFFFF ;
restart - > arg3 = t . timer . expires . tv64 > > 32 ;
2006-01-10 07:52:35 +03:00
return - ERESTART_RESTARTBLOCK ;
}
2006-01-10 07:52:36 +03:00
asmlinkage long
sys_nanosleep ( struct timespec __user * rqtp , struct timespec __user * rmtp )
{
2007-10-16 01:06:04 +04:00
struct timespec tu , rmt ;
int ret ;
2006-01-10 07:52:36 +03:00
if ( copy_from_user ( & tu , rqtp , sizeof ( tu ) ) )
return - EFAULT ;
if ( ! timespec_valid ( & tu ) )
return - EINVAL ;
2007-10-16 01:06:04 +04:00
ret = hrtimer_nanosleep ( & tu , rmtp ? & rmt : NULL , HRTIMER_MODE_REL ,
CLOCK_MONOTONIC ) ;
if ( ret & & rmtp ) {
if ( copy_to_user ( rmtp , & rmt , sizeof ( * rmtp ) ) )
return - EFAULT ;
}
return ret ;
2006-01-10 07:52:36 +03:00
}
2006-01-10 07:52:32 +03:00
/*
* Functions related to boot - time initialization :
*/
static void __devinit init_hrtimers_cpu ( int cpu )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * cpu_base = & per_cpu ( hrtimer_bases , cpu ) ;
2006-01-10 07:52:32 +03:00
int i ;
2007-02-16 12:27:50 +03:00
spin_lock_init ( & cpu_base - > lock ) ;
lockdep_set_class ( & cpu_base - > lock , & cpu_base - > lock_key ) ;
for ( i = 0 ; i < HRTIMER_MAX_CLOCK_BASES ; i + + )
cpu_base - > clock_base [ i ] . cpu_base = cpu_base ;
2007-02-16 12:28:11 +03:00
hrtimer_init_hres ( cpu_base ) ;
2006-01-10 07:52:32 +03:00
}
# ifdef CONFIG_HOTPLUG_CPU
2007-02-16 12:27:50 +03:00
static void migrate_hrtimer_list ( struct hrtimer_clock_base * old_base ,
struct hrtimer_clock_base * new_base )
2006-01-10 07:52:32 +03:00
{
struct hrtimer * timer ;
struct rb_node * node ;
while ( ( node = rb_first ( & old_base - > active ) ) ) {
timer = rb_entry ( node , struct hrtimer , node ) ;
2007-02-16 12:28:11 +03:00
BUG_ON ( hrtimer_callback_running ( timer ) ) ;
__remove_hrtimer ( timer , old_base , HRTIMER_STATE_INACTIVE , 0 ) ;
2006-01-10 07:52:32 +03:00
timer - > base = new_base ;
2007-02-16 12:28:11 +03:00
/*
* Enqueue the timer . Allow reprogramming of the event device
*/
enqueue_hrtimer ( timer , new_base , 1 ) ;
2006-01-10 07:52:32 +03:00
}
}
static void migrate_hrtimers ( int cpu )
{
2007-02-16 12:27:50 +03:00
struct hrtimer_cpu_base * old_base , * new_base ;
2006-01-10 07:52:32 +03:00
int i ;
BUG_ON ( cpu_online ( cpu ) ) ;
2007-02-16 12:27:50 +03:00
old_base = & per_cpu ( hrtimer_bases , cpu ) ;
new_base = & get_cpu_var ( hrtimer_bases ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:28:11 +03:00
tick_cancel_sched_timer ( cpu ) ;
2006-01-10 07:52:32 +03:00
local_irq_disable ( ) ;
2007-03-05 11:30:51 +03:00
double_spin_lock ( & new_base - > lock , & old_base - > lock ,
smp_processor_id ( ) < cpu ) ;
2006-01-10 07:52:32 +03:00
2007-02-16 12:27:50 +03:00
for ( i = 0 ; i < HRTIMER_MAX_CLOCK_BASES ; i + + ) {
migrate_hrtimer_list ( & old_base - > clock_base [ i ] ,
& new_base - > clock_base [ i ] ) ;
2006-01-10 07:52:32 +03:00
}
2007-03-05 11:30:51 +03:00
double_spin_unlock ( & new_base - > lock , & old_base - > lock ,
smp_processor_id ( ) < cpu ) ;
2006-01-10 07:52:32 +03:00
local_irq_enable ( ) ;
put_cpu_var ( hrtimer_bases ) ;
}
# endif /* CONFIG_HOTPLUG_CPU */
2006-07-30 14:03:35 +04:00
static int __cpuinit hrtimer_cpu_notify ( struct notifier_block * self ,
2006-01-10 07:52:32 +03:00
unsigned long action , void * hcpu )
{
2007-07-17 04:17:44 +04:00
unsigned int cpu = ( long ) hcpu ;
2006-01-10 07:52:32 +03:00
switch ( action ) {
case CPU_UP_PREPARE :
2007-05-09 13:35:10 +04:00
case CPU_UP_PREPARE_FROZEN :
2006-01-10 07:52:32 +03:00
init_hrtimers_cpu ( cpu ) ;
break ;
# ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD :
2007-05-09 13:35:10 +04:00
case CPU_DEAD_FROZEN :
2007-02-16 12:28:00 +03:00
clockevents_notify ( CLOCK_EVT_NOTIFY_CPU_DEAD , & cpu ) ;
2006-01-10 07:52:32 +03:00
migrate_hrtimers ( cpu ) ;
break ;
# endif
default :
break ;
}
return NOTIFY_OK ;
}
2006-07-30 14:03:35 +04:00
static struct notifier_block __cpuinitdata hrtimers_nb = {
2006-01-10 07:52:32 +03:00
. notifier_call = hrtimer_cpu_notify ,
} ;
void __init hrtimers_init ( void )
{
hrtimer_cpu_notify ( & hrtimers_nb , ( unsigned long ) CPU_UP_PREPARE ,
( void * ) ( long ) smp_processor_id ( ) ) ;
register_cpu_notifier ( & hrtimers_nb ) ;
2007-02-16 12:28:11 +03:00
# ifdef CONFIG_HIGH_RES_TIMERS
open_softirq ( HRTIMER_SOFTIRQ , run_hrtimer_softirq , NULL ) ;
# endif
2006-01-10 07:52:32 +03:00
}