2007-10-18 17:48:11 +01:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2007 MIPS Technologies , Inc .
* Copyright ( C ) 2007 Ralf Baechle < ralf @ linux - mips . org >
*/
# include <linux/clockchips.h>
# include <linux/interrupt.h>
# include <linux/percpu.h>
2009-06-19 14:05:26 +01:00
# include <linux/smp.h>
2010-10-07 14:08:54 +01:00
# include <linux/irq.h>
2007-10-18 17:48:11 +01:00
2007-10-19 07:55:48 +01:00
# include <asm/smtc_ipi.h>
2007-10-18 17:48:11 +01:00
# include <asm/time.h>
2008-09-09 21:48:52 +02:00
# include <asm/cevt-r4k.h>
/*
* The SMTC Kernel for the 34 K , 1004 K , et . al . replaces several
* of these routines with SMTC - specific variants .
*/
# ifndef CONFIG_MIPS_MT_SMTC
2007-10-18 17:48:11 +01:00
static int mips_next_event ( unsigned long delta ,
struct clock_event_device * evt )
{
unsigned int cnt ;
int res ;
cnt = read_c0_count ( ) ;
cnt + = delta ;
write_c0_compare ( cnt ) ;
2007-10-23 21:51:19 +09:00
res = ( ( int ) ( read_c0_count ( ) - cnt ) > 0 ) ? - ETIME : 0 ;
2007-10-18 17:48:11 +01:00
return res ;
}
2008-09-09 21:48:52 +02:00
# endif /* CONFIG_MIPS_MT_SMTC */
void mips_set_clock_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
2007-10-18 17:48:11 +01:00
{
/* Nothing to do ... */
}
2008-09-09 21:48:52 +02:00
DEFINE_PER_CPU ( struct clock_event_device , mips_clockevent_device ) ;
int cp0_timer_irq_installed ;
2007-10-18 17:48:11 +01:00
2008-09-09 21:48:52 +02:00
# ifndef CONFIG_MIPS_MT_SMTC
2007-10-18 17:48:11 +01:00
2008-09-09 21:48:52 +02:00
irqreturn_t c0_compare_interrupt ( int irq , void * dev_id )
2007-10-18 17:48:11 +01:00
{
const int r2 = cpu_has_mips_r2 ;
struct clock_event_device * cd ;
int cpu = smp_processor_id ( ) ;
/*
* Suckage alert :
* Before R2 of the architecture there was no way to see if a
* performance counter interrupt was pending , so we have to run
* the performance counter interrupt handler anyway .
*/
if ( handle_perf_irq ( r2 ) )
goto out ;
/*
* The same applies to performance counter interrupts . But with the
* above we now know that the reason we got here must be a timer
* interrupt . Being the paranoiacs we are we check anyway .
*/
if ( ! r2 | | ( read_c0_cause ( ) & ( 1 < < 30 ) ) ) {
2008-09-09 21:48:52 +02:00
/* Clear Count/Compare Interrupt */
write_c0_compare ( read_c0_compare ( ) ) ;
2007-10-18 17:48:11 +01:00
cd = & per_cpu ( mips_clockevent_device , cpu ) ;
cd - > event_handler ( cd ) ;
}
out :
return IRQ_HANDLED ;
}
2008-09-09 21:48:52 +02:00
# endif /* Not CONFIG_MIPS_MT_SMTC */
struct irqaction c0_compare_irqaction = {
2007-10-18 17:48:11 +01:00
. handler = c0_compare_interrupt ,
2009-10-08 21:17:54 +08:00
. flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER ,
2007-10-18 17:48:11 +01:00
. name = " timer " ,
} ;
2008-09-09 21:48:52 +02:00
void mips_event_handler ( struct clock_event_device * dev )
2007-10-18 17:48:11 +01:00
{
}
/*
* FIXME : This doesn ' t hold for the relocated E9000 compare interrupt .
*/
static int c0_compare_int_pending ( void )
{
2009-12-21 17:49:22 -08:00
return ( read_c0_cause ( ) > > cp0_compare_irq_shift ) & ( 1ul < < CAUSEB_IP ) ;
2007-10-18 17:48:11 +01:00
}
2008-09-09 21:48:52 +02:00
/*
* Compare interrupt can be routed and latched outside the core ,
* so a single execution hazard barrier may not be enough to give
* it time to clear as seen in the Cause register . 4 time the
* pipeline depth seems reasonably conservative , and empirically
* works better in configurations with high CPU / bus clock ratios .
*/
# define compare_change_hazard() \
do { \
irq_disable_hazard ( ) ; \
irq_disable_hazard ( ) ; \
irq_disable_hazard ( ) ; \
irq_disable_hazard ( ) ; \
} while ( 0 )
int c0_compare_int_usable ( void )
2007-10-18 17:48:11 +01:00
{
2007-10-23 21:55:42 +09:00
unsigned int delta ;
2007-10-18 17:48:11 +01:00
unsigned int cnt ;
/*
* IP7 already pending ? Try to clear it by acking the timer .
*/
if ( c0_compare_int_pending ( ) ) {
2007-10-23 01:14:06 +09:00
write_c0_compare ( read_c0_count ( ) ) ;
2008-09-09 21:48:52 +02:00
compare_change_hazard ( ) ;
2007-10-18 17:48:11 +01:00
if ( c0_compare_int_pending ( ) )
return 0 ;
}
2007-10-23 21:55:42 +09:00
for ( delta = 0x10 ; delta < = 0x400000 ; delta < < = 1 ) {
cnt = read_c0_count ( ) ;
cnt + = delta ;
write_c0_compare ( cnt ) ;
2008-09-09 21:48:52 +02:00
compare_change_hazard ( ) ;
2007-10-23 21:55:42 +09:00
if ( ( int ) ( read_c0_count ( ) - cnt ) < 0 )
break ;
/* increase delta if the timer was already expired */
}
2007-10-18 17:48:11 +01:00
2007-10-23 21:51:19 +09:00
while ( ( int ) ( read_c0_count ( ) - cnt ) < = 0 )
2007-10-18 17:48:11 +01:00
; /* Wait for expiry */
2008-09-09 21:48:52 +02:00
compare_change_hazard ( ) ;
2007-10-18 17:48:11 +01:00
if ( ! c0_compare_int_pending ( ) )
return 0 ;
2007-10-23 01:14:06 +09:00
write_c0_compare ( read_c0_count ( ) ) ;
2008-09-09 21:48:52 +02:00
compare_change_hazard ( ) ;
2007-10-18 17:48:11 +01:00
if ( c0_compare_int_pending ( ) )
return 0 ;
/*
* Feels like a real count / compare timer .
*/
return 1 ;
}
2008-09-09 21:48:52 +02:00
# ifndef CONFIG_MIPS_MT_SMTC
2008-12-21 09:26:22 +01:00
int __cpuinit r4k_clockevent_init ( void )
2007-10-18 17:48:11 +01:00
{
unsigned int cpu = smp_processor_id ( ) ;
struct clock_event_device * cd ;
2007-10-29 14:23:43 +00:00
unsigned int irq ;
2007-10-18 17:48:11 +01:00
2007-10-26 22:27:05 +09:00
if ( ! cpu_has_counter | | ! mips_hpt_frequency )
2007-11-21 16:39:44 +00:00
return - ENXIO ;
2007-10-18 17:48:11 +01:00
if ( ! c0_compare_int_usable ( ) )
2007-11-21 16:39:44 +00:00
return - ENXIO ;
2007-10-18 17:48:11 +01:00
2007-10-29 14:23:43 +00:00
/*
* With vectored interrupts things are getting platform specific .
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of it ' s liking .
*/
irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq ;
if ( get_c0_compare_int )
irq = get_c0_compare_int ( ) ;
2007-10-18 17:48:11 +01:00
cd = & per_cpu ( mips_clockevent_device , cpu ) ;
cd - > name = " MIPS " ;
cd - > features = CLOCK_EVT_FEAT_ONESHOT ;
2010-05-19 10:40:53 -07:00
clockevent_set_clock ( cd , mips_hpt_frequency ) ;
2007-10-18 17:48:11 +01:00
/* Calculate the min / max delta */
cd - > max_delta_ns = clockevent_delta2ns ( 0x7fffffff , cd ) ;
cd - > min_delta_ns = clockevent_delta2ns ( 0x300 , cd ) ;
cd - > rating = 300 ;
cd - > irq = irq ;
2008-12-13 21:20:26 +10:30
cd - > cpumask = cpumask_of ( cpu ) ;
2007-10-18 17:48:11 +01:00
cd - > set_next_event = mips_next_event ;
2008-09-09 21:48:52 +02:00
cd - > set_mode = mips_set_clock_mode ;
2007-10-18 17:48:11 +01:00
cd - > event_handler = mips_event_handler ;
clockevents_register_device ( cd ) ;
2007-10-30 02:21:08 +00:00
if ( cp0_timer_irq_installed )
2007-11-21 16:39:44 +00:00
return 0 ;
2007-10-29 14:23:43 +00:00
cp0_timer_irq_installed = 1 ;
setup_irq ( irq , & c0_compare_irqaction ) ;
2007-11-21 16:39:44 +00:00
return 0 ;
2007-10-18 17:48:11 +01:00
}
2008-09-09 21:48:52 +02:00
# endif /* Not CONFIG_MIPS_MT_SMTC */