2005-04-17 02:20:36 +04:00
/*
* " High Precision Event Timer " based timekeeping .
*
* Copyright ( c ) 1991 , 1992 , 1995 Linus Torvalds
* Copyright ( c ) 1994 Alan Modra
* Copyright ( c ) 1995 Markus Kuhn
* Copyright ( c ) 1996 Ingo Molnar
* Copyright ( c ) 1998 Andrea Arcangeli
2006-06-26 15:58:38 +04:00
* Copyright ( c ) 2002 , 2006 Vojtech Pavlik
2005-04-17 02:20:36 +04:00
* Copyright ( c ) 2003 Andi Kleen
* RTC support code taken from arch / i386 / kernel / timers / time_hpet . c
*/
2008-01-30 15:30:27 +03:00
# include <linux/clockchips.h>
2005-04-17 02:20:36 +04:00
# include <linux/init.h>
2008-01-30 15:30:27 +03:00
# include <linux/interrupt.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
2008-01-30 15:30:27 +03:00
# include <linux/time.h>
2007-10-13 01:04:07 +04:00
2007-07-21 19:11:18 +04:00
# include <asm/i8253.h>
2007-02-16 12:28:19 +03:00
# include <asm/hpet.h>
2007-05-02 21:27:06 +04:00
# include <asm/nmi.h>
2007-07-21 19:10:01 +04:00
# include <asm/vgtod.h>
2008-01-30 15:31:10 +03:00
# include <asm/time.h>
# include <asm/timer.h>
2005-04-17 02:20:36 +04:00
volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES ;
unsigned long profile_pc ( struct pt_regs * regs )
{
unsigned long pc = instruction_pointer ( regs ) ;
2006-09-26 12:52:28 +04:00
/* Assume the lock function has either no stack frame or a copy
2008-01-30 15:30:56 +03:00
of flags from PUSHF
2006-09-26 12:52:28 +04:00
Eflags always has bits 22 and up cleared unlike kernel addresses . */
2006-07-28 16:44:42 +04:00
if ( ! user_mode ( regs ) & & in_lock_functions ( pc ) ) {
2008-01-30 15:30:56 +03:00
unsigned long * sp = ( unsigned long * ) regs - > sp ;
2006-09-26 12:52:28 +04:00
if ( sp [ 0 ] > > 22 )
return sp [ 0 ] ;
if ( sp [ 1 ] > > 22 )
return sp [ 1 ] ;
2005-04-17 02:20:36 +04:00
}
return pc ;
}
EXPORT_SYMBOL ( profile_pc ) ;
2007-10-13 01:04:07 +04:00
static irqreturn_t timer_event_interrupt ( int irq , void * dev_id )
{
2007-10-13 01:04:07 +04:00
add_pda ( irq0_irqs , 1 ) ;
2007-10-13 01:04:07 +04:00
global_clock_event - > event_handler ( global_clock_event ) ;
return IRQ_HANDLED ;
2005-04-17 02:20:36 +04:00
}
2007-05-02 21:27:06 +04:00
/* calibrate_cpu is used on systems with fixed rate TSCs to determine
* processor frequency */
# define TICK_COUNT 100000000
2008-01-30 15:31:10 +03:00
unsigned long __init native_calculate_cpu_khz ( void )
2007-05-02 21:27:06 +04:00
{
2007-07-21 19:10:18 +04:00
int tsc_start , tsc_now ;
int i , no_ctr_free ;
unsigned long evntsel3 = 0 , pmc3 = 0 , pmc_now = 0 ;
unsigned long flags ;
for ( i = 0 ; i < 4 ; i + + )
if ( avail_to_resrv_perfctr_nmi_bit ( i ) )
break ;
no_ctr_free = ( i = = 4 ) ;
if ( no_ctr_free ) {
i = 3 ;
rdmsrl ( MSR_K7_EVNTSEL3 , evntsel3 ) ;
wrmsrl ( MSR_K7_EVNTSEL3 , 0 ) ;
rdmsrl ( MSR_K7_PERFCTR3 , pmc3 ) ;
} else {
reserve_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) ;
reserve_evntsel_nmi ( MSR_K7_EVNTSEL0 + i ) ;
}
local_irq_save ( flags ) ;
2008-02-08 15:19:25 +03:00
/* start measuring cycles, incrementing from 0 */
2007-07-21 19:10:18 +04:00
wrmsrl ( MSR_K7_PERFCTR0 + i , 0 ) ;
wrmsrl ( MSR_K7_EVNTSEL0 + i , 1 < < 22 | 3 < < 16 | 0x76 ) ;
rdtscl ( tsc_start ) ;
do {
rdmsrl ( MSR_K7_PERFCTR0 + i , pmc_now ) ;
2008-01-30 15:32:39 +03:00
tsc_now = get_cycles ( ) ;
2007-07-21 19:10:18 +04:00
} while ( ( tsc_now - tsc_start ) < TICK_COUNT ) ;
local_irq_restore ( flags ) ;
if ( no_ctr_free ) {
wrmsrl ( MSR_K7_EVNTSEL3 , 0 ) ;
wrmsrl ( MSR_K7_PERFCTR3 , pmc3 ) ;
wrmsrl ( MSR_K7_EVNTSEL3 , evntsel3 ) ;
} else {
release_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) ;
release_evntsel_nmi ( MSR_K7_EVNTSEL0 + i ) ;
}
return pmc_now * tsc_khz / ( tsc_now - tsc_start ) ;
2007-05-02 21:27:06 +04:00
}
2005-04-17 02:20:36 +04:00
static struct irqaction irq0 = {
2007-10-13 01:04:07 +04:00
. handler = timer_event_interrupt ,
2007-10-13 01:04:06 +04:00
. flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING ,
2007-05-08 11:35:28 +04:00
. mask = CPU_MASK_NONE ,
2007-07-21 19:10:18 +04:00
. name = " timer "
2005-04-17 02:20:36 +04:00
} ;
2008-01-30 15:31:10 +03:00
void __init hpet_time_init ( void )
2006-09-26 12:52:28 +04:00
{
2007-10-13 01:04:07 +04:00
if ( ! hpet_enable ( ) )
setup_pit_timer ( ) ;
2005-06-23 11:08:36 +04:00
2007-10-13 01:04:07 +04:00
setup_irq ( 0 , & irq0 ) ;
2008-01-30 15:31:10 +03:00
}
2005-04-17 02:20:36 +04:00
2008-01-30 15:31:10 +03:00
void __init time_init ( void )
{
2007-10-13 01:04:06 +04:00
tsc_calibrate ( ) ;
2007-05-02 21:27:06 +04:00
cpu_khz = tsc_khz ;
if ( cpu_has ( & boot_cpu_data , X86_FEATURE_CONSTANT_TSC ) & &
2008-01-30 15:33:35 +03:00
( boot_cpu_data . x86_vendor = = X86_VENDOR_AMD ) )
2008-01-30 15:31:10 +03:00
cpu_khz = calculate_cpu_khz ( ) ;
2007-05-02 21:27:06 +04:00
2005-05-17 08:53:28 +04:00
if ( unsynchronized_tsc ( ) )
2007-05-02 21:27:08 +04:00
mark_tsc_unstable ( " TSCs unsynchronized " ) ;
2006-09-26 12:52:28 +04:00
2007-02-16 12:28:18 +03:00
if ( cpu_has ( & boot_cpu_data , X86_FEATURE_RDTSCP ) )
2006-09-26 12:52:28 +04:00
vgetcpu_mode = VGETCPU_RDTSCP ;
else
vgetcpu_mode = VGETCPU_LSL ;
2006-09-26 12:52:28 +04:00
printk ( KERN_INFO " time.c: Detected %d.%03d MHz processor. \n " ,
cpu_khz / 1000 , cpu_khz % 1000 ) ;
2007-03-05 11:30:50 +03:00
init_tsc_clocksource ( ) ;
2008-01-30 15:31:10 +03:00
late_time_init = choose_time_init ( ) ;
2005-04-17 02:20:36 +04:00
}