2005-08-17 21:44:08 +04:00
/*
* This program is free software ; you can distribute it and / or modify it
* under the terms of the GNU General Public License ( Version 2 ) as
* published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License
* for more details .
*
* You should have received a copy of the GNU General Public License along
* with this program ; if not , write to the Free Software Foundation , Inc . ,
* 59 Temple Place - Suite 330 , Boston MA 02111 - 1307 , USA .
*
2006-04-05 12:45:45 +04:00
* Copyright ( C ) 2004 , 05 , 06 MIPS Technologies , Inc .
* Elizabeth Clarke ( beth @ mips . com )
* Ralf Baechle ( ralf @ linux - mips . org )
* Copyright ( C ) 2006 Ralf Baechle ( ralf @ linux - mips . org )
2005-08-17 21:44:08 +04:00
*/
# include <linux/kernel.h>
# include <linux/sched.h>
# include <linux/cpumask.h>
# include <linux/interrupt.h>
# include <linux/compiler.h>
2007-03-02 23:42:04 +03:00
# include <linux/smp.h>
2005-08-17 21:44:08 +04:00
# include <asm/atomic.h>
2006-04-05 12:45:45 +04:00
# include <asm/cacheflush.h>
2005-08-17 21:44:08 +04:00
# include <asm/cpu.h>
# include <asm/processor.h>
# include <asm/system.h>
# include <asm/hardirq.h>
# include <asm/mmu_context.h>
# include <asm/time.h>
# include <asm/mipsregs.h>
# include <asm/mipsmtregs.h>
2006-04-05 12:45:45 +04:00
# include <asm/mips_mt.h>
2005-08-17 21:44:08 +04:00
2008-04-28 20:14:26 +04:00
static void __init smvp_copy_vpe_config ( void )
2006-10-31 21:25:10 +03:00
{
write_vpe_c0_status (
( read_c0_status ( ) & ~ ( ST0_IM | ST0_IE | ST0_KSU ) ) | ST0_CU0 ) ;
/* set config to be the same as vpe0, particularly kseg0 coherency alg */
write_vpe_c0_config ( read_c0_config ( ) ) ;
/* make sure there are no software interrupts pending */
write_vpe_c0_cause ( 0 ) ;
/* Propagate Config7 */
write_vpe_c0_config7 ( read_c0_config7 ( ) ) ;
2006-10-31 21:33:09 +03:00
write_vpe_c0_count ( read_c0_count ( ) ) ;
2006-10-31 21:25:10 +03:00
}
2008-04-28 20:14:26 +04:00
static unsigned int __init smvp_vpe_init ( unsigned int tc , unsigned int mvpconf0 ,
2006-10-31 21:25:10 +03:00
unsigned int ncpu )
{
if ( tc > ( ( mvpconf0 & MVPCONF0_PVPE ) > > MVPCONF0_PVPE_SHIFT ) )
return ncpu ;
/* Deactivate all but VPE 0 */
if ( tc ! = 0 ) {
unsigned long tmp = read_vpe_c0_vpeconf0 ( ) ;
tmp & = ~ VPECONF0_VPA ;
/* master VPE */
tmp | = VPECONF0_MVP ;
write_vpe_c0_vpeconf0 ( tmp ) ;
/* Record this as available CPU */
2009-09-24 19:34:47 +04:00
set_cpu_possible ( tc , true ) ;
2006-10-31 21:25:10 +03:00
__cpu_number_map [ tc ] = + + ncpu ;
__cpu_logical_map [ ncpu ] = tc ;
}
/* Disable multi-threading with TC's */
write_vpe_c0_vpecontrol ( read_vpe_c0_vpecontrol ( ) & ~ VPECONTROL_TE ) ;
if ( tc ! = 0 )
2008-04-28 20:14:26 +04:00
smvp_copy_vpe_config ( ) ;
2006-10-31 21:25:10 +03:00
return ncpu ;
}
2008-04-28 20:14:26 +04:00
static void __init smvp_tc_init ( unsigned int tc , unsigned int mvpconf0 )
2006-10-31 21:25:10 +03:00
{
unsigned long tmp ;
if ( ! tc )
return ;
/* bind a TC to each VPE, May as well put all excess TC's
on the last VPE */
if ( tc > = ( ( ( mvpconf0 & MVPCONF0_PVPE ) > > MVPCONF0_PVPE_SHIFT ) + 1 ) )
write_tc_c0_tcbind ( read_tc_c0_tcbind ( ) | ( ( mvpconf0 & MVPCONF0_PVPE ) > > MVPCONF0_PVPE_SHIFT ) ) ;
else {
write_tc_c0_tcbind ( read_tc_c0_tcbind ( ) | tc ) ;
/* and set XTC */
write_vpe_c0_vpeconf0 ( read_vpe_c0_vpeconf0 ( ) | ( tc < < VPECONF0_XTC_SHIFT ) ) ;
}
tmp = read_tc_c0_tcstatus ( ) ;
/* mark not allocated and not dynamically allocatable */
tmp & = ~ ( TCSTATUS_A | TCSTATUS_DA ) ;
tmp | = TCSTATUS_IXMT ; /* interrupt exempt */
write_tc_c0_tcstatus ( tmp ) ;
write_tc_c0_tchalt ( TCHALT_H ) ;
}
2007-11-19 15:23:51 +03:00
static void vsmp_send_ipi_single ( int cpu , unsigned int action )
2005-08-17 21:44:08 +04:00
{
2007-11-19 15:23:51 +03:00
int i ;
unsigned long flags ;
int vpflags ;
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
local_irq_save ( flags ) ;
2005-08-17 21:44:08 +04:00
2011-03-31 05:57:33 +04:00
vpflags = dvpe ( ) ; /* can't access the other CPU's registers whilst MVPE enabled */
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
switch ( action ) {
case SMP_CALL_FUNCTION :
i = C_SW1 ;
break ;
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
case SMP_RESCHEDULE_YOURSELF :
default :
i = C_SW0 ;
break ;
}
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
/* 1:1 mapping of vpe and tc... */
settc ( cpu ) ;
write_vpe_c0_cause ( read_vpe_c0_cause ( ) | i ) ;
evpe ( vpflags ) ;
2007-03-02 23:42:04 +03:00
2007-11-19 15:23:51 +03:00
local_irq_restore ( flags ) ;
}
2005-08-17 21:44:08 +04:00
2009-09-24 19:34:44 +04:00
static void vsmp_send_ipi_mask ( const struct cpumask * mask , unsigned int action )
2007-11-19 15:23:51 +03:00
{
unsigned int i ;
2005-08-17 21:44:08 +04:00
2009-09-24 19:34:44 +04:00
for_each_cpu ( i , mask )
2007-11-19 15:23:51 +03:00
vsmp_send_ipi_single ( i , action ) ;
}
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
static void __cpuinit vsmp_init_secondary ( void )
{
2008-04-28 20:14:26 +04:00
extern int gic_present ;
2005-08-17 21:44:08 +04:00
2010-12-01 20:33:17 +03:00
/* This is Malta specific: IPI,performance and timer interrupts */
2008-04-28 20:14:26 +04:00
if ( gic_present )
change_c0_status ( ST0_IM , STATUSF_IP3 | STATUSF_IP4 |
STATUSF_IP6 | STATUSF_IP7 ) ;
else
change_c0_status ( ST0_IM , STATUSF_IP0 | STATUSF_IP1 |
STATUSF_IP6 | STATUSF_IP7 ) ;
2006-04-05 12:45:45 +04:00
}
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
static void __cpuinit vsmp_smp_finish ( void )
2006-04-05 12:45:45 +04:00
{
2008-04-28 20:14:26 +04:00
/* CDFIXME: remove this? */
2007-11-19 15:23:51 +03:00
write_c0_compare ( read_c0_count ( ) + ( 8 * mips_hpt_frequency / HZ ) ) ;
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
# ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if ( cpu_has_fpu )
cpu_set ( smp_processor_id ( ) , mt_fpu_cpumask ) ;
# endif /* CONFIG_MIPS_MT_FPAFF */
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
local_irq_enable ( ) ;
}
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
static void vsmp_cpus_done ( void )
{
2005-08-17 21:44:08 +04:00
}
/*
* Setup the PC , SP , and GP of a secondary processor and start it
* running !
* smp_bootstrap is the place to resume from
* __KSTK_TOS ( idle ) is apparently the stack pointer
* ( unsigned long ) idle - > thread_info the gp
* assumes a 1 : 1 mapping of TC = > VPE
*/
2007-11-19 15:23:51 +03:00
static void __cpuinit vsmp_boot_secondary ( int cpu , struct task_struct * idle )
2005-08-17 21:44:08 +04:00
{
2006-01-12 12:06:07 +03:00
struct thread_info * gp = task_thread_info ( idle ) ;
2005-08-17 21:44:08 +04:00
dvpe ( ) ;
set_c0_mvpcontrol ( MVPCONTROL_VPC ) ;
settc ( cpu ) ;
/* restart */
write_tc_c0_tcrestart ( ( unsigned long ) & smp_bootstrap ) ;
/* enable the tc this vpe/cpu will be running */
write_tc_c0_tcstatus ( ( read_tc_c0_tcstatus ( ) & ~ TCSTATUS_IXMT ) | TCSTATUS_A ) ;
write_tc_c0_tchalt ( 0 ) ;
/* enable the VPE */
write_vpe_c0_vpeconf0 ( read_vpe_c0_vpeconf0 ( ) | VPECONF0_VPA ) ;
/* stack pointer */
write_tc_gpr_sp ( __KSTK_TOS ( idle ) ) ;
/* global pointer */
2006-01-12 12:06:07 +03:00
write_tc_gpr_gp ( ( unsigned long ) gp ) ;
2005-08-17 21:44:08 +04:00
2006-04-05 12:45:45 +04:00
flush_icache_range ( ( unsigned long ) gp ,
( unsigned long ) ( gp + sizeof ( struct thread_info ) ) ) ;
2005-08-17 21:44:08 +04:00
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol ( MVPCONTROL_VPC ) ;
evpe ( EVPE_ENABLE ) ;
}
2007-11-19 15:23:51 +03:00
/*
* Common setup before any secondaries are started
* Make sure all CPU ' s are in a sensible state before we boot any of the
2008-04-28 20:14:26 +04:00
* secondaries
2007-11-19 15:23:51 +03:00
*/
static void __init vsmp_smp_setup ( void )
2005-08-17 21:44:08 +04:00
{
2007-11-19 15:23:51 +03:00
unsigned int mvpconf0 , ntc , tc , ncpu = 0 ;
unsigned int nvpe ;
2005-08-17 21:44:08 +04:00
2006-04-05 12:45:47 +04:00
# ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if ( cpu_has_fpu )
2007-11-19 15:23:51 +03:00
cpu_set ( 0 , mt_fpu_cpumask ) ;
2006-04-05 12:45:47 +04:00
# endif /* CONFIG_MIPS_MT_FPAFF */
2007-11-19 15:23:51 +03:00
if ( ! cpu_has_mipsmt )
return ;
2006-04-05 12:45:47 +04:00
2007-11-19 15:23:51 +03:00
/* disable MT so we can configure */
dvpe ( ) ;
dmt ( ) ;
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol ( MVPCONTROL_VPC ) ;
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
mvpconf0 = read_c0_mvpconf0 ( ) ;
ntc = ( mvpconf0 & MVPCONF0_PTC ) > > MVPCONF0_PTC_SHIFT ;
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
nvpe = ( ( mvpconf0 & MVPCONF0_PVPE ) > > MVPCONF0_PVPE_SHIFT ) + 1 ;
smp_num_siblings = nvpe ;
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
/* we'll always have more TC's than VPE's, so loop setting everything
to a sensible state */
for ( tc = 0 ; tc < = ntc ; tc + + ) {
settc ( tc ) ;
2005-08-17 21:44:08 +04:00
2008-04-28 20:14:26 +04:00
smvp_tc_init ( tc , mvpconf0 ) ;
ncpu = smvp_vpe_init ( tc , mvpconf0 , ncpu ) ;
2007-11-19 15:23:51 +03:00
}
2005-08-17 21:44:08 +04:00
2007-11-19 15:23:51 +03:00
/* Release config state */
clear_c0_mvpcontrol ( MVPCONTROL_VPC ) ;
/* We'll wait until starting the secondaries before starting MVPE */
printk ( KERN_INFO " Detected %i available secondary CPU(s) \n " , ncpu ) ;
}
static void __init vsmp_prepare_cpus ( unsigned int max_cpus )
{
mips_mt_set_cpuoptions ( ) ;
2005-08-17 21:44:08 +04:00
}
2007-11-19 15:23:51 +03:00
struct plat_smp_ops vsmp_smp_ops = {
. send_ipi_single = vsmp_send_ipi_single ,
. send_ipi_mask = vsmp_send_ipi_mask ,
. init_secondary = vsmp_init_secondary ,
. smp_finish = vsmp_smp_finish ,
. cpus_done = vsmp_cpus_done ,
. boot_secondary = vsmp_boot_secondary ,
. smp_setup = vsmp_smp_setup ,
. prepare_cpus = vsmp_prepare_cpus ,
} ;