2006-04-05 09:45:45 +01:00
/*
* Malta Platform - specific hooks for SMP operation
*/
2007-08-06 16:32:20 +01:00
# include <linux/irq.h>
2007-03-04 18:27:34 +00:00
# include <linux/init.h>
2006-04-05 09:45:45 +01:00
2007-03-04 18:27:34 +00:00
# include <asm/mipsregs.h>
# include <asm/mipsmtregs.h>
# include <asm/smtc.h>
2006-04-05 09:45:45 +01:00
# include <asm/smtc_ipi.h>
/* VPE/SMP Prototype implements platform interfaces directly */
/*
* Cause the specified action to be performed on a targeted " CPU "
*/
2007-11-19 12:23:51 +00:00
static void msmtc_send_ipi_single ( int cpu , unsigned int action )
2006-04-05 09:45:45 +01:00
{
2007-03-04 18:27:34 +00:00
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
2006-04-05 09:45:45 +01:00
smtc_send_ipi ( cpu , LINUX_SMP_IPI , action ) ;
}
2009-09-24 09:34:44 -06:00
static void msmtc_send_ipi_mask ( const struct cpumask * mask , unsigned int action )
2006-04-05 09:45:45 +01:00
{
2007-11-19 12:23:51 +00:00
unsigned int i ;
2009-09-24 09:34:44 -06:00
for_each_cpu ( i , mask )
2007-11-19 12:23:51 +00:00
msmtc_send_ipi_single ( i , action ) ;
2006-04-05 09:45:45 +01:00
}
/*
* Post - config but pre - boot cleanup entry point
*/
2007-11-19 12:23:51 +00:00
static void __cpuinit msmtc_init_secondary ( void )
2006-04-05 09:45:45 +01:00
{
int myvpe ;
/* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
myvpe = read_c0_tcbind ( ) & TCBIND_CURVPE ;
if ( myvpe ! = 0 ) {
/* Ideally, this should be done only once per VPE, but... */
2007-08-01 19:42:37 +01:00
clear_c0_status ( ST0_IM ) ;
set_c0_status ( ( 0x100 < < cp0_compare_irq )
| ( 0x100 < < MIPS_CPU_IPI_IRQ ) ) ;
if ( cp0_perfcount_irq > = 0 )
set_c0_status ( 0x100 < < cp0_perfcount_irq ) ;
2006-04-05 09:45:45 +01:00
}
2007-11-19 12:23:51 +00:00
smtc_init_secondary ( ) ;
2006-04-05 09:45:45 +01:00
}
/*
2007-11-19 12:23:51 +00:00
* Platform " CPU " startup hook
2006-04-05 09:45:45 +01:00
*/
2007-11-19 12:23:51 +00:00
static void __cpuinit msmtc_boot_secondary ( int cpu , struct task_struct * idle )
2006-04-05 09:45:45 +01:00
{
2007-11-19 12:23:51 +00:00
smtc_boot_secondary ( cpu , idle ) ;
2006-04-05 09:45:45 +01:00
}
2007-11-19 12:23:51 +00:00
/*
* SMP initialization finalization entry point
*/
static void __cpuinit msmtc_smp_finish ( void )
2006-04-05 09:45:45 +01:00
{
2007-11-19 12:23:51 +00:00
smtc_smp_finish ( ) ;
2006-04-05 09:45:45 +01:00
}
/*
2007-11-19 12:23:51 +00:00
* Hook for after all CPUs are online
2006-04-05 09:45:45 +01:00
*/
2007-11-19 12:23:51 +00:00
static void msmtc_cpus_done ( void )
2006-04-05 09:45:45 +01:00
{
}
/*
2007-11-19 12:23:51 +00:00
* Platform SMP pre - initialization
*
* As noted above , we can assume a single CPU for now
* but it may be multithreaded .
2006-04-05 09:45:45 +01:00
*/
2007-11-19 12:23:51 +00:00
static void __init msmtc_smp_setup ( void )
2006-04-05 09:45:45 +01:00
{
2008-09-09 21:48:52 +02:00
/*
* we won ' t get the definitive value until
* we ' ve run smtc_prepare_cpus later , but
* we would appear to need an upper bound now .
*/
smp_num_siblings = smtc_build_cpu_map ( 0 ) ;
2006-04-05 09:45:45 +01:00
}
2007-08-03 19:38:03 +02:00
2007-11-19 12:23:51 +00:00
static void __init msmtc_prepare_cpus ( unsigned int max_cpus )
{
2008-09-09 21:48:52 +02:00
smtc_prepare_cpus ( max_cpus ) ;
2007-11-19 12:23:51 +00:00
}
struct plat_smp_ops msmtc_smp_ops = {
. send_ipi_single = msmtc_send_ipi_single ,
. send_ipi_mask = msmtc_send_ipi_mask ,
. init_secondary = msmtc_init_secondary ,
. smp_finish = msmtc_smp_finish ,
. cpus_done = msmtc_cpus_done ,
. boot_secondary = msmtc_boot_secondary ,
. smp_setup = msmtc_smp_setup ,
. prepare_cpus = msmtc_prepare_cpus ,
} ;
2007-08-03 19:38:03 +02:00
# ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
/*
* IRQ affinity hook
*/
2011-03-23 21:08:57 +00:00
int plat_set_irq_affinity ( struct irq_data * d , const struct cpumask * affinity ,
bool force )
2007-08-03 19:38:03 +02:00
{
2009-01-12 15:27:13 -08:00
cpumask_t tmask ;
2007-08-03 19:38:03 +02:00
int cpu = 0 ;
void smtc_set_irq_affinity ( unsigned int irq , cpumask_t aff ) ;
/*
* On the legacy Malta development board , all I / O interrupts
* are routed through the 8259 and combined in a single signal
* to the CPU daughterboard , and on the CoreFPGA2 / 3 34 K models ,
* that signal is brought to IP2 of both VPEs . To avoid racing
* concurrent interrupt service events , IP2 is enabled only on
* one VPE , by convention VPE0 . So long as no bits are ever
* cleared in the affinity mask , there will never be any
* interrupt forwarding . But as soon as a program or operator
* sets affinity for one of the related IRQs , we need to make
2011-03-30 22:57:33 -03:00
* sure that we don ' t ever try to forward across the VPE boundary ,
2007-08-03 19:38:03 +02:00
* at least not until we engineer a system where the interrupt
* _ack ( ) or _end ( ) function can somehow know that it corresponds
* to an interrupt taken on another VPE , and perform the appropriate
* restoration of Status . IM state using MFTR / MTTR instead of the
* normal local behavior . We also ensure that no attempt will
* be made to forward to an offline " CPU " .
*/
2009-01-12 15:27:13 -08:00
cpumask_copy ( & tmask , affinity ) ;
2008-12-13 21:20:26 +10:30
for_each_cpu ( cpu , affinity ) {
2007-08-03 19:38:03 +02:00
if ( ( cpu_data [ cpu ] . vpe_id ! = 0 ) | | ! cpu_online ( cpu ) )
cpu_clear ( cpu , tmask ) ;
}
2011-03-23 21:08:57 +00:00
cpumask_copy ( d - > affinity , & tmask ) ;
2007-08-03 19:38:03 +02:00
if ( cpus_empty ( tmask ) )
/*
* We could restore a default mask here , but the
* runtime code can anyway deal with the null set
*/
printk ( KERN_WARNING
2011-05-28 00:57:13 +01:00
" IRQ affinity leaves no legal CPU for IRQ %d \n " , d - > irq ) ;
2007-08-03 19:38:03 +02:00
/* Do any generic SMTC IRQ affinity setup */
2011-03-23 21:08:57 +00:00
smtc_set_irq_affinity ( d - > irq , tmask ) ;
2009-04-27 17:59:21 -07:00
2011-03-23 21:08:57 +00:00
return IRQ_SET_MASK_OK_NOCOPY ;
2007-08-03 19:38:03 +02:00
}
# endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */