2007-10-18 17:48:11 +01:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2007 MIPS Technologies , Inc .
* Copyright ( C ) 2007 Ralf Baechle < ralf @ linux - mips . org >
*/
# include <linux/clockchips.h>
# include <linux/interrupt.h>
# include <linux/percpu.h>
2007-10-19 07:55:48 +01:00
# include <asm/smtc_ipi.h>
2007-10-18 17:48:11 +01:00
# include <asm/time.h>
static int mips_next_event ( unsigned long delta ,
struct clock_event_device * evt )
{
unsigned int cnt ;
int res ;
# ifdef CONFIG_MIPS_MT_SMTC
{
unsigned long flags , vpflags ;
local_irq_save ( flags ) ;
vpflags = dvpe ( ) ;
# endif
cnt = read_c0_count ( ) ;
cnt + = delta ;
write_c0_compare ( cnt ) ;
2007-10-23 21:51:19 +09:00
res = ( ( int ) ( read_c0_count ( ) - cnt ) > 0 ) ? - ETIME : 0 ;
2007-10-18 17:48:11 +01:00
# ifdef CONFIG_MIPS_MT_SMTC
evpe ( vpflags ) ;
local_irq_restore ( flags ) ;
}
# endif
return res ;
}
static void mips_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
/* Nothing to do ... */
}
static DEFINE_PER_CPU ( struct clock_event_device , mips_clockevent_device ) ;
static int cp0_timer_irq_installed ;
/*
* Timer ack for an R4k - compatible timer of a known frequency .
*/
static void c0_timer_ack ( void )
{
write_c0_compare ( read_c0_compare ( ) ) ;
}
/*
* Possibly handle a performance counter interrupt .
* Return true if the timer interrupt should not be checked
*/
static inline int handle_perf_irq ( int r2 )
{
/*
* The performance counter overflow interrupt may be shared with the
* timer interrupt ( cp0_perfcount_irq < 0 ) . If it is and a
* performance counter has overflowed ( perf_irq ( ) = = IRQ_HANDLED )
* and we can ' t reliably determine if a counter interrupt has also
* happened ( ! r2 ) then don ' t check for a timer interrupt .
*/
return ( cp0_perfcount_irq < 0 ) & &
perf_irq ( ) = = IRQ_HANDLED & &
! r2 ;
}
static irqreturn_t c0_compare_interrupt ( int irq , void * dev_id )
{
const int r2 = cpu_has_mips_r2 ;
struct clock_event_device * cd ;
int cpu = smp_processor_id ( ) ;
/*
* Suckage alert :
* Before R2 of the architecture there was no way to see if a
* performance counter interrupt was pending , so we have to run
* the performance counter interrupt handler anyway .
*/
if ( handle_perf_irq ( r2 ) )
goto out ;
/*
* The same applies to performance counter interrupts . But with the
* above we now know that the reason we got here must be a timer
* interrupt . Being the paranoiacs we are we check anyway .
*/
if ( ! r2 | | ( read_c0_cause ( ) & ( 1 < < 30 ) ) ) {
c0_timer_ack ( ) ;
# ifdef CONFIG_MIPS_MT_SMTC
if ( cpu_data [ cpu ] . vpe_id )
goto out ;
cpu = 0 ;
# endif
cd = & per_cpu ( mips_clockevent_device , cpu ) ;
cd - > event_handler ( cd ) ;
}
out :
return IRQ_HANDLED ;
}
static struct irqaction c0_compare_irqaction = {
. handler = c0_compare_interrupt ,
# ifdef CONFIG_MIPS_MT_SMTC
. flags = IRQF_DISABLED ,
# else
. flags = IRQF_DISABLED | IRQF_PERCPU ,
# endif
. name = " timer " ,
} ;
# ifdef CONFIG_MIPS_MT_SMTC
DEFINE_PER_CPU ( struct clock_event_device , smtc_dummy_clockevent_device ) ;
static void smtc_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
}
static void mips_broadcast ( cpumask_t mask )
{
unsigned int cpu ;
for_each_cpu_mask ( cpu , mask )
smtc_send_ipi ( cpu , SMTC_CLOCK_TICK , 0 ) ;
}
static void setup_smtc_dummy_clockevent_device ( void )
{
//uint64_t mips_freq = mips_hpt_^frequency;
unsigned int cpu = smp_processor_id ( ) ;
struct clock_event_device * cd ;
cd = & per_cpu ( smtc_dummy_clockevent_device , cpu ) ;
cd - > name = " SMTC " ;
cd - > features = CLOCK_EVT_FEAT_DUMMY ;
/* Calculate the min / max delta */
cd - > mult = 0 ; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
cd - > shift = 0 ; //32;
cd - > max_delta_ns = 0 ; //clockevent_delta2ns(0x7fffffff, cd);
cd - > min_delta_ns = 0 ; //clockevent_delta2ns(0x30, cd);
cd - > rating = 200 ;
cd - > irq = 17 ; //-1;
// if (cpu)
// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
// else
cd - > cpumask = cpumask_of_cpu ( cpu ) ;
cd - > set_mode = smtc_set_mode ;
cd - > broadcast = mips_broadcast ;
clockevents_register_device ( cd ) ;
}
# endif
static void mips_event_handler ( struct clock_event_device * dev )
{
}
/*
* FIXME : This doesn ' t hold for the relocated E9000 compare interrupt .
*/
static int c0_compare_int_pending ( void )
{
return ( read_c0_cause ( ) > > cp0_compare_irq ) & 0x100 ;
}
static int c0_compare_int_usable ( void )
{
2007-10-23 21:55:42 +09:00
unsigned int delta ;
2007-10-18 17:48:11 +01:00
unsigned int cnt ;
/*
* IP7 already pending ? Try to clear it by acking the timer .
*/
if ( c0_compare_int_pending ( ) ) {
2007-10-23 01:14:06 +09:00
write_c0_compare ( read_c0_count ( ) ) ;
2007-10-18 17:48:11 +01:00
irq_disable_hazard ( ) ;
if ( c0_compare_int_pending ( ) )
return 0 ;
}
2007-10-23 21:55:42 +09:00
for ( delta = 0x10 ; delta < = 0x400000 ; delta < < = 1 ) {
cnt = read_c0_count ( ) ;
cnt + = delta ;
write_c0_compare ( cnt ) ;
irq_disable_hazard ( ) ;
if ( ( int ) ( read_c0_count ( ) - cnt ) < 0 )
break ;
/* increase delta if the timer was already expired */
}
2007-10-18 17:48:11 +01:00
2007-10-23 21:51:19 +09:00
while ( ( int ) ( read_c0_count ( ) - cnt ) < = 0 )
2007-10-18 17:48:11 +01:00
; /* Wait for expiry */
if ( ! c0_compare_int_pending ( ) )
return 0 ;
2007-10-23 01:14:06 +09:00
write_c0_compare ( read_c0_count ( ) ) ;
2007-10-18 17:48:11 +01:00
irq_disable_hazard ( ) ;
if ( c0_compare_int_pending ( ) )
return 0 ;
/*
* Feels like a real count / compare timer .
*/
return 1 ;
}
2007-11-21 16:39:44 +00:00
int __cpuinit mips_clockevent_init ( void )
2007-10-18 17:48:11 +01:00
{
uint64_t mips_freq = mips_hpt_frequency ;
unsigned int cpu = smp_processor_id ( ) ;
struct clock_event_device * cd ;
2007-10-29 14:23:43 +00:00
unsigned int irq ;
2007-10-18 17:48:11 +01:00
2007-10-26 22:27:05 +09:00
if ( ! cpu_has_counter | | ! mips_hpt_frequency )
2007-11-21 16:39:44 +00:00
return - ENXIO ;
2007-10-18 17:48:11 +01:00
# ifdef CONFIG_MIPS_MT_SMTC
setup_smtc_dummy_clockevent_device ( ) ;
/*
* On SMTC we only register VPE0 ' s compare interrupt as clockevent
* device .
*/
if ( cpu )
2007-11-21 16:39:44 +00:00
return 0 ;
2007-10-18 17:48:11 +01:00
# endif
if ( ! c0_compare_int_usable ( ) )
2007-11-21 16:39:44 +00:00
return - ENXIO ;
2007-10-18 17:48:11 +01:00
2007-10-29 14:23:43 +00:00
/*
* With vectored interrupts things are getting platform specific .
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of it ' s liking .
*/
irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq ;
if ( get_c0_compare_int )
irq = get_c0_compare_int ( ) ;
2007-10-18 17:48:11 +01:00
cd = & per_cpu ( mips_clockevent_device , cpu ) ;
cd - > name = " MIPS " ;
cd - > features = CLOCK_EVT_FEAT_ONESHOT ;
/* Calculate the min / max delta */
cd - > mult = div_sc ( ( unsigned long ) mips_freq , NSEC_PER_SEC , 32 ) ;
cd - > shift = 32 ;
cd - > max_delta_ns = clockevent_delta2ns ( 0x7fffffff , cd ) ;
cd - > min_delta_ns = clockevent_delta2ns ( 0x300 , cd ) ;
cd - > rating = 300 ;
cd - > irq = irq ;
# ifdef CONFIG_MIPS_MT_SMTC
cd - > cpumask = CPU_MASK_ALL ;
# else
cd - > cpumask = cpumask_of_cpu ( cpu ) ;
# endif
cd - > set_next_event = mips_next_event ;
cd - > set_mode = mips_set_mode ;
cd - > event_handler = mips_event_handler ;
clockevents_register_device ( cd ) ;
2007-10-30 02:21:08 +00:00
if ( cp0_timer_irq_installed )
2007-11-21 16:39:44 +00:00
return 0 ;
2007-10-29 14:23:43 +00:00
cp0_timer_irq_installed = 1 ;
2007-10-18 17:48:11 +01:00
# ifdef CONFIG_MIPS_MT_SMTC
# define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
2007-10-29 14:23:43 +00:00
setup_irq_smtc ( irq , & c0_compare_irqaction , CPUCTR_IMASKBIT ) ;
2007-10-18 17:48:11 +01:00
# else
2007-10-29 14:23:43 +00:00
setup_irq ( irq , & c0_compare_irqaction ) ;
# endif
2007-11-21 16:39:44 +00:00
return 0 ;
2007-10-18 17:48:11 +01:00
}