2007-02-16 01:28:19 -08:00
# include <linux/kernel.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/init.h>
# include <linux/clocksource.h>
# include <linux/time.h>
# include <linux/acpi.h>
# include <linux/cpufreq.h>
# include <asm/timex.h>
2007-02-16 01:28:20 -08:00
static int notsc __initdata = 0 ;
2007-02-16 01:28:19 -08:00
unsigned int cpu_khz ; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL ( cpu_khz ) ;
2007-05-02 19:27:06 +02:00
unsigned int tsc_khz ;
EXPORT_SYMBOL ( tsc_khz ) ;
2007-02-16 01:28:19 -08:00
static unsigned int cyc2ns_scale __read_mostly ;
void set_cyc2ns_scale ( unsigned long khz )
{
cyc2ns_scale = ( NSEC_PER_MSEC < < NS_SCALE ) / khz ;
}
2007-02-16 01:28:20 -08:00
static unsigned long long cycles_2_ns ( unsigned long long cyc )
2007-02-16 01:28:19 -08:00
{
return ( cyc * cyc2ns_scale ) > > NS_SCALE ;
}
unsigned long long sched_clock ( void )
{
unsigned long a = 0 ;
/* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
* which means it is not completely exact and may not be monotonous
* between CPUs . But the errors should be too small to matter for
* scheduling purposes .
*/
rdtscll ( a ) ;
return cycles_2_ns ( a ) ;
}
2007-02-16 01:28:20 -08:00
static int tsc_unstable ;
2007-07-19 01:49:23 -07:00
inline int check_tsc_unstable ( void )
2007-02-16 01:28:20 -08:00
{
return tsc_unstable ;
}
2007-02-16 01:28:19 -08:00
# ifdef CONFIG_CPU_FREQ
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
* changes .
*
* RED - PEN : On SMP we assume all CPUs run with the same frequency . It ' s
* not that important because current Opteron setups do not support
* scaling on SMP anyroads .
*
* Should fix up last_tsc too . Currently gettimeofday in the
* first tick after the change will be slightly wrong .
*/
2007-07-21 17:10:13 +02:00
static unsigned int ref_freq ;
static unsigned long loops_per_jiffy_ref ;
static unsigned long tsc_khz_ref ;
2007-02-16 01:28:19 -08:00
static int time_cpufreq_notifier ( struct notifier_block * nb , unsigned long val ,
void * data )
{
struct cpufreq_freqs * freq = data ;
unsigned long * lpj , dummy ;
if ( cpu_has ( & cpu_data [ freq - > cpu ] , X86_FEATURE_CONSTANT_TSC ) )
return 0 ;
lpj = & dummy ;
if ( ! ( freq - > flags & CPUFREQ_CONST_LOOPS ) )
# ifdef CONFIG_SMP
lpj = & cpu_data [ freq - > cpu ] . loops_per_jiffy ;
# else
lpj = & boot_cpu_data . loops_per_jiffy ;
# endif
if ( ! ref_freq ) {
ref_freq = freq - > old ;
loops_per_jiffy_ref = * lpj ;
2007-05-02 19:27:06 +02:00
tsc_khz_ref = tsc_khz ;
2007-02-16 01:28:19 -08:00
}
if ( ( val = = CPUFREQ_PRECHANGE & & freq - > old < freq - > new ) | |
( val = = CPUFREQ_POSTCHANGE & & freq - > old > freq - > new ) | |
( val = = CPUFREQ_RESUMECHANGE ) ) {
* lpj =
cpufreq_scale ( loops_per_jiffy_ref , ref_freq , freq - > new ) ;
2007-05-02 19:27:06 +02:00
tsc_khz = cpufreq_scale ( tsc_khz_ref , ref_freq , freq - > new ) ;
2007-02-16 01:28:19 -08:00
if ( ! ( freq - > flags & CPUFREQ_CONST_LOOPS ) )
2007-05-02 19:27:08 +02:00
mark_tsc_unstable ( " cpufreq changes " ) ;
2007-02-16 01:28:19 -08:00
}
2007-05-02 19:27:06 +02:00
set_cyc2ns_scale ( tsc_khz_ref ) ;
2007-02-16 01:28:19 -08:00
return 0 ;
}
static struct notifier_block time_cpufreq_notifier_block = {
. notifier_call = time_cpufreq_notifier
} ;
static int __init cpufreq_tsc ( void )
{
2007-07-21 17:10:13 +02:00
cpufreq_register_notifier ( & time_cpufreq_notifier_block ,
CPUFREQ_TRANSITION_NOTIFIER ) ;
2007-02-16 01:28:19 -08:00
return 0 ;
}
core_initcall ( cpufreq_tsc ) ;
# endif
/*
* Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs .
*/
__cpuinit int unsynchronized_tsc ( void )
{
if ( tsc_unstable )
return 1 ;
# ifdef CONFIG_SMP
if ( apic_is_clustered_box ( ) )
return 1 ;
# endif
/* Most intel systems have synchronized TSCs except for
multi node systems */
2007-07-21 17:10:13 +02:00
if ( boot_cpu_data . x86_vendor = = X86_VENDOR_INTEL ) {
2007-02-16 01:28:19 -08:00
# ifdef CONFIG_ACPI
/* But TSC doesn't tick in C3 so don't use it there */
2007-07-21 17:10:13 +02:00
if ( acpi_gbl_FADT . header . length > 0 & &
acpi_gbl_FADT . C3latency < 1000 )
2007-02-16 01:28:19 -08:00
return 1 ;
# endif
2007-07-21 17:10:13 +02:00
return 0 ;
2007-02-16 01:28:19 -08:00
}
2007-07-21 17:10:13 +02:00
/* Assume multi socket systems are not synchronized */
return num_present_cpus ( ) > 1 ;
2007-02-16 01:28:19 -08:00
}
int __init notsc_setup ( char * s )
{
notsc = 1 ;
return 1 ;
}
__setup ( " notsc " , notsc_setup ) ;
2007-02-16 01:28:20 -08:00
/* clock source code: */
static cycle_t read_tsc ( void )
{
cycle_t ret = ( cycle_t ) get_cycles_sync ( ) ;
return ret ;
}
2007-02-16 01:28:21 -08:00
static cycle_t __vsyscall_fn vread_tsc ( void )
{
cycle_t ret = ( cycle_t ) get_cycles_sync ( ) ;
return ret ;
}
2007-02-16 01:28:20 -08:00
static struct clocksource clocksource_tsc = {
. name = " tsc " ,
. rating = 300 ,
. read = read_tsc ,
. mask = CLOCKSOURCE_MASK ( 64 ) ,
. shift = 22 ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY ,
2007-02-16 01:28:21 -08:00
. vread = vread_tsc ,
2007-02-16 01:28:20 -08:00
} ;
2007-05-02 19:27:08 +02:00
void mark_tsc_unstable ( char * reason )
2007-02-16 01:28:20 -08:00
{
if ( ! tsc_unstable ) {
tsc_unstable = 1 ;
2007-05-02 19:27:08 +02:00
printk ( " Marking TSC unstable due to %s \n " , reason ) ;
2007-02-16 01:28:20 -08:00
/* Change only the rating, when not registered */
if ( clocksource_tsc . mult )
clocksource_change_rating ( & clocksource_tsc , 0 ) ;
else
clocksource_tsc . rating = 0 ;
}
}
EXPORT_SYMBOL_GPL ( mark_tsc_unstable ) ;
2007-03-05 00:30:50 -08:00
void __init init_tsc_clocksource ( void )
2007-02-16 01:28:20 -08:00
{
if ( ! notsc ) {
2007-05-02 19:27:06 +02:00
clocksource_tsc . mult = clocksource_khz2mult ( tsc_khz ,
2007-02-16 01:28:20 -08:00
clocksource_tsc . shift ) ;
if ( check_tsc_unstable ( ) )
clocksource_tsc . rating = 0 ;
2007-03-05 00:30:50 -08:00
clocksource_register ( & clocksource_tsc ) ;
2007-02-16 01:28:20 -08:00
}
}