2005-04-16 15:20:36 -07:00
/*
* arch / s390 / kernel / vtime . c
* Virtual cpu timer based timer functions .
*
* S390 version
* Copyright ( C ) 2004 IBM Deutschland Entwicklung GmbH , IBM Corporation
* Author ( s ) : Jan Glauber < jan . glauber @ de . ibm . com >
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/time.h>
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/smp.h>
# include <linux/types.h>
# include <linux/timex.h>
# include <linux/notifier.h>
# include <linux/kernel_stat.h>
# include <linux/rcupdate.h>
# include <linux/posix-timers.h>
# include <asm/s390_ext.h>
# include <asm/timer.h>
2006-10-06 16:38:35 +02:00
# include <asm/irq_regs.h>
2009-06-12 10:26:21 +02:00
# include <asm/cputime.h>
2005-04-16 15:20:36 -07:00
2007-02-05 21:16:47 +01:00
static DEFINE_PER_CPU ( struct vtimer_queue , virt_cpu_timer ) ;
2005-04-16 15:20:36 -07:00
2009-06-22 12:08:20 +02:00
DEFINE_PER_CPU ( struct s390_idle_data , s390_idle ) ;
2008-12-31 15:11:41 +01:00
static inline __u64 get_vtimer ( void )
{
__u64 timer ;
asm volatile ( " STPT %0 " : " =m " ( timer ) ) ;
return timer ;
}
static inline void set_vtimer ( __u64 expires )
{
__u64 timer ;
asm volatile ( " STPT %0 \n " /* Store current cpu timer value */
" SPT %1 " /* Set new value immediatly afterwards */
: " =m " ( timer ) : " m " ( expires ) ) ;
S390_lowcore . system_timer + = S390_lowcore . last_update_timer - timer ;
S390_lowcore . last_update_timer = expires ;
}
2005-04-16 15:20:36 -07:00
/*
* Update process times based on virtual cpu times stored by entry . S
* to the lowcore fields user_timer , system_timer & steal_clock .
*/
2008-12-31 15:11:39 +01:00
static void do_account_vtime ( struct task_struct * tsk , int hardirq_offset )
2005-04-16 15:20:36 -07:00
{
2008-12-31 15:11:39 +01:00
struct thread_info * ti = task_thread_info ( tsk ) ;
__u64 timer , clock , user , system , steal ;
2005-04-16 15:20:36 -07:00
timer = S390_lowcore . last_update_timer ;
clock = S390_lowcore . last_update_clock ;
asm volatile ( " STPT %0 \n " /* Store current cpu timer value */
" STCK %1 " /* Store current tod clock value */
: " =m " ( S390_lowcore . last_update_timer ) ,
" =m " ( S390_lowcore . last_update_clock ) ) ;
S390_lowcore . system_timer + = timer - S390_lowcore . last_update_timer ;
2008-12-31 15:11:39 +01:00
S390_lowcore . steal_timer + = S390_lowcore . last_update_clock - clock ;
2005-04-16 15:20:36 -07:00
2008-12-31 15:11:39 +01:00
user = S390_lowcore . user_timer - ti - > user_timer ;
S390_lowcore . steal_timer - = user ;
ti - > user_timer = S390_lowcore . user_timer ;
account_user_time ( tsk , user , user ) ;
2005-04-16 15:20:36 -07:00
2008-12-31 15:11:39 +01:00
system = S390_lowcore . system_timer - ti - > system_timer ;
S390_lowcore . steal_timer - = system ;
ti - > system_timer = S390_lowcore . system_timer ;
2008-12-31 15:11:41 +01:00
account_system_time ( tsk , hardirq_offset , system , system ) ;
2005-04-16 15:20:36 -07:00
2008-12-31 15:11:39 +01:00
steal = S390_lowcore . steal_timer ;
if ( ( s64 ) steal > 0 ) {
S390_lowcore . steal_timer = 0 ;
2008-12-31 15:11:41 +01:00
account_steal_time ( steal ) ;
2005-04-16 15:20:36 -07:00
}
}
2008-12-31 15:11:39 +01:00
void account_vtime ( struct task_struct * prev , struct task_struct * next )
2006-01-14 13:21:03 -08:00
{
2008-12-31 15:11:39 +01:00
struct thread_info * ti ;
do_account_vtime ( prev , 0 ) ;
ti = task_thread_info ( prev ) ;
ti - > user_timer = S390_lowcore . user_timer ;
ti - > system_timer = S390_lowcore . system_timer ;
ti = task_thread_info ( next ) ;
S390_lowcore . user_timer = ti - > user_timer ;
S390_lowcore . system_timer = ti - > system_timer ;
}
2006-01-14 13:21:03 -08:00
2008-12-31 15:11:39 +01:00
void account_process_tick ( struct task_struct * tsk , int user_tick )
{
do_account_vtime ( tsk , HARDIRQ_OFFSET ) ;
2006-01-14 13:21:03 -08:00
}
2005-04-16 15:20:36 -07:00
/*
* Update process times based on virtual cpu times stored by entry . S
* to the lowcore fields user_timer , system_timer & steal_clock .
*/
void account_system_vtime ( struct task_struct * tsk )
{
2008-12-31 15:11:39 +01:00
struct thread_info * ti = task_thread_info ( tsk ) ;
__u64 timer , system ;
2005-04-16 15:20:36 -07:00
timer = S390_lowcore . last_update_timer ;
2008-12-31 15:11:41 +01:00
S390_lowcore . last_update_timer = get_vtimer ( ) ;
2005-04-16 15:20:36 -07:00
S390_lowcore . system_timer + = timer - S390_lowcore . last_update_timer ;
2008-12-31 15:11:39 +01:00
system = S390_lowcore . system_timer - ti - > system_timer ;
S390_lowcore . steal_timer - = system ;
ti - > system_timer = S390_lowcore . system_timer ;
2008-12-31 15:11:41 +01:00
account_system_time ( tsk , 0 , system , system ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-25 18:47:20 +01:00
EXPORT_SYMBOL_GPL ( account_system_vtime ) ;
2005-04-16 15:20:36 -07:00
2010-05-17 10:00:03 +02:00
void vtime_start_cpu ( __u64 int_clock , __u64 enter_timer )
2005-04-16 15:20:36 -07:00
{
2008-12-31 15:11:41 +01:00
struct s390_idle_data * idle = & __get_cpu_var ( s390_idle ) ;
2008-12-31 15:11:40 +01:00
struct vtimer_queue * vq = & __get_cpu_var ( virt_cpu_timer ) ;
2008-12-31 15:11:41 +01:00
__u64 idle_time , expires ;
2010-05-17 10:00:03 +02:00
if ( idle - > idle_enter = = 0ULL )
return ;
2008-12-31 15:11:41 +01:00
/* Account time spent with enabled wait psw loaded as idle time. */
2010-05-17 10:00:03 +02:00
idle_time = int_clock - idle - > idle_enter ;
2008-12-31 15:11:41 +01:00
account_idle_time ( idle_time ) ;
2009-04-14 15:36:26 +02:00
S390_lowcore . steal_timer + =
idle - > idle_enter - S390_lowcore . last_update_clock ;
2010-05-17 10:00:03 +02:00
S390_lowcore . last_update_clock = int_clock ;
2008-12-31 15:11:41 +01:00
/* Account system time spent going idle. */
S390_lowcore . system_timer + = S390_lowcore . last_update_timer - vq - > idle ;
2010-05-17 10:00:03 +02:00
S390_lowcore . last_update_timer = enter_timer ;
2008-12-31 15:11:41 +01:00
/* Restart vtime CPU timer */
if ( vq - > do_spt ) {
/* Program old expire value but first save progress. */
2010-05-17 10:00:03 +02:00
expires = vq - > idle - enter_timer ;
2008-12-31 15:11:41 +01:00
expires + = get_vtimer ( ) ;
set_vtimer ( expires ) ;
} else {
/* Don't account the CPU timer delta while the cpu was idle. */
2010-05-17 10:00:03 +02:00
vq - > elapsed - = vq - > idle - enter_timer ;
2008-12-31 15:11:41 +01:00
}
2005-05-01 08:58:57 -07:00
2009-06-22 12:08:20 +02:00
idle - > sequence + + ;
smp_wmb ( ) ;
2008-12-31 15:11:41 +01:00
idle - > idle_time + = idle_time ;
idle - > idle_enter = 0ULL ;
idle - > idle_count + + ;
2009-06-22 12:08:20 +02:00
smp_wmb ( ) ;
idle - > sequence + + ;
2005-04-16 15:20:36 -07:00
}
2008-12-31 15:11:41 +01:00
void vtime_stop_cpu ( void )
2005-04-16 15:20:36 -07:00
{
2008-12-31 15:11:41 +01:00
struct s390_idle_data * idle = & __get_cpu_var ( s390_idle ) ;
struct vtimer_queue * vq = & __get_cpu_var ( virt_cpu_timer ) ;
psw_t psw ;
/* Wait for external, I/O or machine check interrupt. */
psw . mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT ;
2009-09-29 14:25:16 +02:00
idle - > nohz_delay = 0 ;
2008-12-31 15:11:41 +01:00
/* Check if the CPU timer needs to be reprogrammed. */
if ( vq - > do_spt ) {
__u64 vmax = VTIMER_MAX_SLICE ;
/*
* The inline assembly is equivalent to
* vq - > idle = get_cpu_timer ( ) ;
* set_cpu_timer ( VTIMER_MAX_SLICE ) ;
* idle - > idle_enter = get_clock ( ) ;
* __load_psw_mask ( psw_kernel_bits | PSW_MASK_WAIT |
* PSW_MASK_IO | PSW_MASK_EXT ) ;
* The difference is that the inline assembly makes sure that
* the last three instruction are stpt , stck and lpsw in that
* order . This is done to increase the precision .
*/
asm volatile (
# ifndef CONFIG_64BIT
" basr 1,0 \n "
" 0: ahi 1,1f-0b \n "
" st 1,4(%2) \n "
# else /* CONFIG_64BIT */
" larl 1,1f \n "
" stg 1,8(%2) \n "
# endif /* CONFIG_64BIT */
" stpt 0(%4) \n "
" spt 0(%5) \n "
" stck 0(%3) \n "
# ifndef CONFIG_64BIT
" lpsw 0(%2) \n "
# else /* CONFIG_64BIT */
" lpswe 0(%2) \n "
# endif /* CONFIG_64BIT */
" 1: "
: " =m " ( idle - > idle_enter ) , " =m " ( vq - > idle )
: " a " ( & psw ) , " a " ( & idle - > idle_enter ) ,
" a " ( & vq - > idle ) , " a " ( & vmax ) , " m " ( vmax ) , " m " ( psw )
: " memory " , " cc " , " 1 " ) ;
} else {
/*
* The inline assembly is equivalent to
* vq - > idle = get_cpu_timer ( ) ;
* idle - > idle_enter = get_clock ( ) ;
* __load_psw_mask ( psw_kernel_bits | PSW_MASK_WAIT |
* PSW_MASK_IO | PSW_MASK_EXT ) ;
* The difference is that the inline assembly makes sure that
* the last three instruction are stpt , stck and lpsw in that
* order . This is done to increase the precision .
*/
asm volatile (
# ifndef CONFIG_64BIT
" basr 1,0 \n "
" 0: ahi 1,1f-0b \n "
" st 1,4(%2) \n "
# else /* CONFIG_64BIT */
" larl 1,1f \n "
" stg 1,8(%2) \n "
# endif /* CONFIG_64BIT */
" stpt 0(%4) \n "
" stck 0(%3) \n "
# ifndef CONFIG_64BIT
" lpsw 0(%2) \n "
# else /* CONFIG_64BIT */
" lpswe 0(%2) \n "
# endif /* CONFIG_64BIT */
" 1: "
: " =m " ( idle - > idle_enter ) , " =m " ( vq - > idle )
: " a " ( & psw ) , " a " ( & idle - > idle_enter ) ,
" a " ( & vq - > idle ) , " m " ( psw )
: " memory " , " cc " , " 1 " ) ;
2005-04-16 15:20:36 -07:00
}
}
2009-04-23 13:58:08 +02:00
cputime64_t s390_get_idle_time ( int cpu )
{
struct s390_idle_data * idle ;
unsigned long long now , idle_time , idle_enter ;
2009-06-22 12:08:20 +02:00
unsigned int sequence ;
2009-04-23 13:58:08 +02:00
idle = & per_cpu ( s390_idle , cpu ) ;
2009-06-22 12:08:20 +02:00
2009-04-23 13:58:08 +02:00
now = get_clock ( ) ;
2009-06-22 12:08:20 +02:00
repeat :
sequence = idle - > sequence ;
smp_rmb ( ) ;
if ( sequence & 1 )
goto repeat ;
2009-04-23 13:58:08 +02:00
idle_time = 0 ;
idle_enter = idle - > idle_enter ;
if ( idle_enter ! = 0ULL & & idle_enter < now )
idle_time = now - idle_enter ;
2009-06-22 12:08:20 +02:00
smp_rmb ( ) ;
if ( idle - > sequence ! = sequence )
goto repeat ;
2009-04-23 13:58:08 +02:00
return idle_time ;
}
2005-04-16 15:20:36 -07:00
/*
* Sorted add to a list . List is linear searched until first bigger
* element is found .
*/
static void list_add_sorted ( struct vtimer_list * timer , struct list_head * head )
{
struct vtimer_list * event ;
list_for_each_entry ( event , head , entry ) {
if ( event - > expires > timer - > expires ) {
list_add_tail ( & timer - > entry , & event - > entry ) ;
return ;
}
}
list_add_tail ( & timer - > entry , head ) ;
}
/*
* Do the callback functions of expired vtimer events .
* Called from within the interrupt handler .
*/
2006-10-11 15:31:26 +02:00
static void do_callbacks ( struct list_head * cb_list )
2005-04-16 15:20:36 -07:00
{
2008-12-31 15:11:41 +01:00
struct vtimer_queue * vq ;
2005-04-16 15:20:36 -07:00
struct vtimer_list * event , * tmp ;
if ( list_empty ( cb_list ) )
return ;
2008-12-31 15:11:41 +01:00
vq = & __get_cpu_var ( virt_cpu_timer ) ;
2005-04-16 15:20:36 -07:00
list_for_each_entry_safe ( event , tmp , cb_list , entry ) {
2008-12-31 15:11:41 +01:00
list_del_init ( & event - > entry ) ;
( event - > function ) ( event - > data ) ;
if ( event - > interval ) {
/* Recharge interval timer */
event - > expires = event - > interval + vq - > elapsed ;
spin_lock ( & vq - > lock ) ;
list_add_sorted ( event , & vq - > list ) ;
spin_unlock ( & vq - > lock ) ;
2005-04-16 15:20:36 -07:00
}
}
}
/*
* Handler for the virtual CPU timer .
*/
2006-10-06 16:38:35 +02:00
static void do_cpu_timer_interrupt ( __u16 error_code )
2005-04-16 15:20:36 -07:00
{
2008-12-31 15:11:41 +01:00
struct vtimer_queue * vq ;
2005-04-16 15:20:36 -07:00
struct vtimer_list * event , * tmp ;
2008-12-31 15:11:41 +01:00
struct list_head cb_list ; /* the callback queue */
__u64 elapsed , next ;
2005-04-16 15:20:36 -07:00
INIT_LIST_HEAD ( & cb_list ) ;
2008-12-31 15:11:41 +01:00
vq = & __get_cpu_var ( virt_cpu_timer ) ;
2005-04-16 15:20:36 -07:00
/* walk timer list, fire all expired events */
2008-12-31 15:11:41 +01:00
spin_lock ( & vq - > lock ) ;
elapsed = vq - > elapsed + ( vq - > timer - S390_lowcore . async_enter_timer ) ;
BUG_ON ( ( s64 ) elapsed < 0 ) ;
vq - > elapsed = 0 ;
list_for_each_entry_safe ( event , tmp , & vq - > list , entry ) {
if ( event - > expires < elapsed )
/* move expired timer to the callback queue */
list_move_tail ( & event - > entry , & cb_list ) ;
else
event - > expires - = elapsed ;
2005-04-16 15:20:36 -07:00
}
2008-12-31 15:11:41 +01:00
spin_unlock ( & vq - > lock ) ;
vq - > do_spt = list_empty ( & cb_list ) ;
2006-10-11 15:31:26 +02:00
do_callbacks ( & cb_list ) ;
2005-04-16 15:20:36 -07:00
/* next event is first in list */
2008-12-31 15:11:41 +01:00
next = VTIMER_MAX_SLICE ;
spin_lock ( & vq - > lock ) ;
if ( ! list_empty ( & vq - > list ) ) {
event = list_first_entry ( & vq - > list , struct vtimer_list , entry ) ;
next = event - > expires ;
} else
vq - > do_spt = 0 ;
spin_unlock ( & vq - > lock ) ;
/*
* To improve precision add the time spent by the
* interrupt handler to the elapsed time .
* Note : CPU timer counts down and we got an interrupt ,
* the current content is negative
*/
elapsed = S390_lowcore . async_enter_timer - get_vtimer ( ) ;
set_vtimer ( next - elapsed ) ;
vq - > timer = next - elapsed ;
vq - > elapsed = elapsed ;
2005-04-16 15:20:36 -07:00
}
void init_virt_timer ( struct vtimer_list * timer )
{
timer - > function = NULL ;
INIT_LIST_HEAD ( & timer - > entry ) ;
}
EXPORT_SYMBOL ( init_virt_timer ) ;
static inline int vtimer_pending ( struct vtimer_list * timer )
{
return ( ! list_empty ( & timer - > entry ) ) ;
}
/*
* this function should only run on the specified CPU
*/
static void internal_add_vtimer ( struct vtimer_list * timer )
{
2008-12-31 15:11:41 +01:00
struct vtimer_queue * vq ;
2005-04-16 15:20:36 -07:00
unsigned long flags ;
2008-12-31 15:11:41 +01:00
__u64 left , expires ;
2005-04-16 15:20:36 -07:00
2008-12-31 15:11:41 +01:00
vq = & per_cpu ( virt_cpu_timer , timer - > cpu ) ;
spin_lock_irqsave ( & vq - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
2008-07-14 09:59:23 +02:00
BUG_ON ( timer - > cpu ! = smp_processor_id ( ) ) ;
2005-04-16 15:20:36 -07:00
2008-12-31 15:11:41 +01:00
if ( list_empty ( & vq - > list ) ) {
/* First timer on this cpu, just program it. */
list_add ( & timer - > entry , & vq - > list ) ;
set_vtimer ( timer - > expires ) ;
vq - > timer = timer - > expires ;
vq - > elapsed = 0 ;
} else {
/* Check progress of old timers. */
expires = timer - > expires ;
left = get_vtimer ( ) ;
if ( likely ( ( s64 ) expires < ( s64 ) left ) ) {
/* The new timer expires before the current timer. */
set_vtimer ( expires ) ;
vq - > elapsed + = vq - > timer - left ;
vq - > timer = expires ;
} else {
vq - > elapsed + = vq - > timer - left ;
vq - > timer = left ;
}
/* Insert new timer into per cpu list. */
timer - > expires + = vq - > elapsed ;
list_add_sorted ( timer , & vq - > list ) ;
2005-04-16 15:20:36 -07:00
}
2008-12-31 15:11:41 +01:00
spin_unlock_irqrestore ( & vq - > lock , flags ) ;
2006-06-26 18:35:02 +02:00
/* release CPU acquired in prepare_vtimer or mod_virt_timer() */
2005-04-16 15:20:36 -07:00
put_cpu ( ) ;
}
2008-07-14 09:59:23 +02:00
static inline void prepare_vtimer ( struct vtimer_list * timer )
2005-04-16 15:20:36 -07:00
{
2008-07-14 09:59:23 +02:00
BUG_ON ( ! timer - > function ) ;
BUG_ON ( ! timer - > expires | | timer - > expires > VTIMER_MAX_SLICE ) ;
BUG_ON ( vtimer_pending ( timer ) ) ;
2005-04-16 15:20:36 -07:00
timer - > cpu = get_cpu ( ) ;
}
/*
* add_virt_timer - add an oneshot virtual CPU timer
*/
void add_virt_timer ( void * new )
{
struct vtimer_list * timer ;
timer = ( struct vtimer_list * ) new ;
2008-07-14 09:59:23 +02:00
prepare_vtimer ( timer ) ;
2005-04-16 15:20:36 -07:00
timer - > interval = 0 ;
internal_add_vtimer ( timer ) ;
}
EXPORT_SYMBOL ( add_virt_timer ) ;
/*
* add_virt_timer_int - add an interval virtual CPU timer
*/
void add_virt_timer_periodic ( void * new )
{
struct vtimer_list * timer ;
timer = ( struct vtimer_list * ) new ;
2008-07-14 09:59:23 +02:00
prepare_vtimer ( timer ) ;
2005-04-16 15:20:36 -07:00
timer - > interval = timer - > expires ;
internal_add_vtimer ( timer ) ;
}
EXPORT_SYMBOL ( add_virt_timer_periodic ) ;
2009-04-14 15:36:20 +02:00
int __mod_vtimer ( struct vtimer_list * timer , __u64 expires , int periodic )
2005-04-16 15:20:36 -07:00
{
2008-12-31 15:11:41 +01:00
struct vtimer_queue * vq ;
2005-04-16 15:20:36 -07:00
unsigned long flags ;
int cpu ;
2008-07-14 09:59:23 +02:00
BUG_ON ( ! timer - > function ) ;
BUG_ON ( ! expires | | expires > VTIMER_MAX_SLICE ) ;
2005-04-16 15:20:36 -07:00
if ( timer - > expires = = expires & & vtimer_pending ( timer ) )
return 1 ;
cpu = get_cpu ( ) ;
2008-12-31 15:11:41 +01:00
vq = & per_cpu ( virt_cpu_timer , cpu ) ;
2005-04-16 15:20:36 -07:00
/* disable interrupts before test if timer is pending */
2008-12-31 15:11:41 +01:00
spin_lock_irqsave ( & vq - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
/* if timer isn't pending add it on the current CPU */
if ( ! vtimer_pending ( timer ) ) {
2008-12-31 15:11:41 +01:00
spin_unlock_irqrestore ( & vq - > lock , flags ) ;
2009-04-14 15:36:20 +02:00
if ( periodic )
timer - > interval = expires ;
else
timer - > interval = 0 ;
2005-04-16 15:20:36 -07:00
timer - > expires = expires ;
timer - > cpu = cpu ;
internal_add_vtimer ( timer ) ;
return 0 ;
}
2009-04-14 15:36:20 +02:00
/* check if we run on the right CPU */
BUG_ON ( timer - > cpu ! = cpu ) ;
2005-04-16 15:20:36 -07:00
list_del_init ( & timer - > entry ) ;
timer - > expires = expires ;
2009-04-14 15:36:20 +02:00
if ( periodic )
2005-04-16 15:20:36 -07:00
timer - > interval = expires ;
/* the timer can't expire anymore so we can release the lock */
2008-12-31 15:11:41 +01:00
spin_unlock_irqrestore ( & vq - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
internal_add_vtimer ( timer ) ;
return 1 ;
}
2009-04-14 15:36:20 +02:00
/*
* If we change a pending timer the function must be called on the CPU
* where the timer is running on .
*
* returns whether it has modified a pending timer ( 1 ) or not ( 0 )
*/
int mod_virt_timer ( struct vtimer_list * timer , __u64 expires )
{
return __mod_vtimer ( timer , expires , 0 ) ;
}
2005-04-16 15:20:36 -07:00
EXPORT_SYMBOL ( mod_virt_timer ) ;
2009-04-14 15:36:20 +02:00
/*
* If we change a pending timer the function must be called on the CPU
* where the timer is running on .
*
* returns whether it has modified a pending timer ( 1 ) or not ( 0 )
*/
int mod_virt_timer_periodic ( struct vtimer_list * timer , __u64 expires )
{
return __mod_vtimer ( timer , expires , 1 ) ;
}
EXPORT_SYMBOL ( mod_virt_timer_periodic ) ;
2005-04-16 15:20:36 -07:00
/*
* delete a virtual timer
*
* returns whether the deleted timer was pending ( 1 ) or not ( 0 )
*/
int del_virt_timer ( struct vtimer_list * timer )
{
unsigned long flags ;
2008-12-31 15:11:41 +01:00
struct vtimer_queue * vq ;
2005-04-16 15:20:36 -07:00
/* check if timer is pending */
if ( ! vtimer_pending ( timer ) )
return 0 ;
2008-12-31 15:11:41 +01:00
vq = & per_cpu ( virt_cpu_timer , timer - > cpu ) ;
spin_lock_irqsave ( & vq - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
/* we don't interrupt a running timer, just let it expire! */
list_del_init ( & timer - > entry ) ;
2008-12-31 15:11:41 +01:00
spin_unlock_irqrestore ( & vq - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
return 1 ;
}
EXPORT_SYMBOL ( del_virt_timer ) ;
/*
* Start the virtual CPU timer on the current CPU .
*/
void init_cpu_vtimer ( void )
{
2008-12-31 15:11:41 +01:00
struct vtimer_queue * vq ;
2005-04-16 15:20:36 -07:00
2008-12-31 15:11:41 +01:00
/* initialize per cpu vtimer structure */
vq = & __get_cpu_var ( virt_cpu_timer ) ;
INIT_LIST_HEAD ( & vq - > list ) ;
spin_lock_init ( & vq - > lock ) ;
2007-02-05 21:18:19 +01:00
/* enable cpu timer interrupts */
__ctl_set_bit ( 0 , 10 ) ;
2005-04-16 15:20:36 -07:00
}
void __init vtime_init ( void )
{
/* request the cpu timer external interrupt */
2009-06-22 12:08:03 +02:00
if ( register_external_interrupt ( 0x1005 , do_cpu_timer_interrupt ) )
2005-04-16 15:20:36 -07:00
panic ( " Couldn't request external interrupt 0x1005 " ) ;
2007-02-05 21:18:19 +01:00
/* Enable cpu timer interrupts on the boot cpu. */
2005-04-16 15:20:36 -07:00
init_cpu_vtimer ( ) ;
}