2011-11-16 05:25:45 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2011 by Kevin Cernekee ( cernekee @ gmail . com )
*
* SMP support for BMIPS
*/
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/mm.h>
# include <linux/delay.h>
# include <linux/smp.h>
# include <linux/interrupt.h>
# include <linux/spinlock.h>
# include <linux/cpu.h>
# include <linux/cpumask.h>
# include <linux/reboot.h>
# include <linux/io.h>
# include <linux/compiler.h>
# include <linux/linkage.h>
# include <linux/bug.h>
# include <linux/kernel.h>
# include <asm/time.h>
# include <asm/pgtable.h>
# include <asm/processor.h>
# include <asm/bootinfo.h>
# include <asm/pmon.h>
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
# include <asm/mipsregs.h>
# include <asm/bmips.h>
# include <asm/traps.h>
# include <asm/barrier.h>
static int __maybe_unused max_cpus = 1 ;
/* these may be configured by the platform code */
int bmips_smp_enabled = 1 ;
int bmips_cpu_offset ;
cpumask_t bmips_booted_mask ;
# ifdef CONFIG_SMP
/* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */
unsigned long bmips_smp_boot_sp ;
unsigned long bmips_smp_boot_gp ;
2013-12-18 17:12:01 +04:00
static void bmips43xx_send_ipi_single ( int cpu , unsigned int action ) ;
static void bmips5000_send_ipi_single ( int cpu , unsigned int action ) ;
static irqreturn_t bmips43xx_ipi_interrupt ( int irq , void * dev_id ) ;
static irqreturn_t bmips5000_ipi_interrupt ( int irq , void * dev_id ) ;
2011-11-16 05:25:45 +04:00
/* SW interrupts 0,1 are used for interprocessor signaling */
# define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0)
# define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1)
# define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift))
# define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
# define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
# define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0))
static void __init bmips_smp_setup ( void )
{
2013-06-26 22:11:56 +04:00
int i , cpu = 1 , boot_cpu = 0 ;
2013-08-05 14:50:25 +04:00
int cpu_hw_intr ;
2013-12-18 17:12:01 +04:00
switch ( current_cpu_type ( ) ) {
case CPU_BMIPS4350 :
case CPU_BMIPS4380 :
/* arbitration priority */
clear_c0_brcm_cmt_ctrl ( 0x30 ) ;
/* NBK and weak order flags */
set_c0_brcm_config_0 ( 0x30000 ) ;
/* Find out if we are running on TP0 or TP1 */
boot_cpu = ! ! ( read_c0_brcm_cmt_local ( ) & ( 1 < < 31 ) ) ;
/*
* MIPS interrupts 0 , 1 ( SW INT 0 , 1 ) cross over to the other
* thread
* MIPS interrupt 2 ( HW INT 0 ) is the CPU0 L1 controller output
* MIPS interrupt 3 ( HW INT 1 ) is the CPU1 L1 controller output
*/
if ( boot_cpu = = 0 )
cpu_hw_intr = 0x02 ;
else
cpu_hw_intr = 0x1d ;
change_c0_brcm_cmt_intr ( 0xf8018000 ,
( cpu_hw_intr < < 27 ) | ( 0x03 < < 15 ) ) ;
/* single core, 2 threads (2 pipelines) */
max_cpus = 2 ;
break ;
case CPU_BMIPS5000 :
/* enable raceless SW interrupts */
set_c0_brcm_config ( 0x03 < < 22 ) ;
/* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
change_c0_brcm_mode ( 0x1f < < 27 , 0x02 < < 27 ) ;
/* N cores, 2 threads per core */
max_cpus = ( ( ( read_c0_brcm_config ( ) > > 6 ) & 0x03 ) + 1 ) < < 1 ;
/* clear any pending SW interrupts */
for ( i = 0 ; i < max_cpus ; i + + ) {
write_c0_brcm_action ( ACTION_CLR_IPI ( i , 0 ) ) ;
write_c0_brcm_action ( ACTION_CLR_IPI ( i , 1 ) ) ;
}
2011-11-16 05:25:45 +04:00
2013-12-18 17:12:01 +04:00
break ;
default :
max_cpus = 1 ;
2011-11-16 05:25:45 +04:00
}
if ( ! bmips_smp_enabled )
max_cpus = 1 ;
/* this can be overridden by the BSP */
if ( ! board_ebase_setup )
board_ebase_setup = & bmips_ebase_setup ;
2013-06-26 22:11:56 +04:00
__cpu_number_map [ boot_cpu ] = 0 ;
__cpu_logical_map [ 0 ] = boot_cpu ;
2011-11-16 05:25:45 +04:00
for ( i = 0 ; i < max_cpus ; i + + ) {
2013-06-26 22:11:56 +04:00
if ( i ! = boot_cpu ) {
__cpu_number_map [ i ] = cpu ;
__cpu_logical_map [ cpu ] = i ;
cpu + + ;
}
2011-11-16 05:25:45 +04:00
set_cpu_possible ( i , 1 ) ;
set_cpu_present ( i , 1 ) ;
}
}
/*
* IPI IRQ setup - runs on CPU0
*/
static void bmips_prepare_cpus ( unsigned int max_cpus )
{
2013-12-18 17:12:01 +04:00
irqreturn_t ( * bmips_ipi_interrupt ) ( int irq , void * dev_id ) ;
switch ( current_cpu_type ( ) ) {
case CPU_BMIPS4350 :
case CPU_BMIPS4380 :
bmips_ipi_interrupt = bmips43xx_ipi_interrupt ;
break ;
case CPU_BMIPS5000 :
bmips_ipi_interrupt = bmips5000_ipi_interrupt ;
break ;
default :
return ;
}
2011-11-16 05:25:45 +04:00
if ( request_irq ( IPI0_IRQ , bmips_ipi_interrupt , IRQF_PERCPU ,
" smp_ipi0 " , NULL ) )
2013-09-18 18:05:26 +04:00
panic ( " Can't request IPI0 interrupt " ) ;
2011-11-16 05:25:45 +04:00
if ( request_irq ( IPI1_IRQ , bmips_ipi_interrupt , IRQF_PERCPU ,
" smp_ipi1 " , NULL ) )
2013-09-18 18:05:26 +04:00
panic ( " Can't request IPI1 interrupt " ) ;
2011-11-16 05:25:45 +04:00
}
/*
* Tell the hardware to boot CPUx - runs on CPU0
*/
static void bmips_boot_secondary ( int cpu , struct task_struct * idle )
{
bmips_smp_boot_sp = __KSTK_TOS ( idle ) ;
bmips_smp_boot_gp = ( unsigned long ) task_thread_info ( idle ) ;
mb ( ) ;
/*
* Initial boot sequence for secondary CPU :
* bmips_reset_nmi_vec @ a000_0000 - >
* bmips_smp_entry - >
* plat_wired_tlb_setup ( cached function call ; optional ) - >
* start_secondary ( cached jump )
*
* Warm restart sequence :
* play_dead WAIT loop - >
* bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC - >
* eret to play_dead - >
* bmips_secondary_reentry - >
* start_secondary
*/
pr_info ( " SMP: Booting CPU%d... \n " , cpu ) ;
2013-12-18 17:12:01 +04:00
if ( cpumask_test_cpu ( cpu , & bmips_booted_mask ) ) {
switch ( current_cpu_type ( ) ) {
case CPU_BMIPS4350 :
case CPU_BMIPS4380 :
bmips43xx_send_ipi_single ( cpu , 0 ) ;
break ;
case CPU_BMIPS5000 :
bmips5000_send_ipi_single ( cpu , 0 ) ;
break ;
}
}
2011-11-16 05:25:45 +04:00
else {
2013-12-18 17:12:01 +04:00
switch ( current_cpu_type ( ) ) {
case CPU_BMIPS4350 :
case CPU_BMIPS4380 :
/* Reset slave TP1 if booting from TP0 */
if ( cpu_logical_map ( cpu ) = = 1 )
set_c0_brcm_cmt_ctrl ( 0x01 ) ;
break ;
case CPU_BMIPS5000 :
2014-10-21 08:27:52 +04:00
write_c0_brcm_action ( ACTION_BOOT_THREAD ( cpu ) ) ;
2013-12-18 17:12:01 +04:00
break ;
2011-11-16 05:25:45 +04:00
}
cpumask_set_cpu ( cpu , & bmips_booted_mask ) ;
}
}
/*
* Early setup - runs on secondary CPU after cache probe
*/
static void bmips_init_secondary ( void )
{
/* move NMI vector to kseg0, in case XKS01 is enabled */
2013-12-18 17:12:01 +04:00
void __iomem * cbr ;
2011-11-16 05:25:45 +04:00
unsigned long old_vec ;
2013-07-24 20:12:11 +04:00
unsigned long relo_vector ;
int boot_cpu ;
2011-11-16 05:25:45 +04:00
2013-12-18 17:12:01 +04:00
switch ( current_cpu_type ( ) ) {
case CPU_BMIPS4350 :
case CPU_BMIPS4380 :
cbr = BMIPS_GET_CBR ( ) ;
2013-07-24 20:12:11 +04:00
2013-12-18 17:12:01 +04:00
boot_cpu = ! ! ( read_c0_brcm_cmt_local ( ) & ( 1 < < 31 ) ) ;
relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
BMIPS_RELO_VECTOR_CONTROL_1 ;
2011-11-16 05:25:45 +04:00
2013-12-18 17:12:01 +04:00
old_vec = __raw_readl ( cbr + relo_vector ) ;
__raw_writel ( old_vec & ~ 0x20000000 , cbr + relo_vector ) ;
2011-11-16 05:25:45 +04:00
2013-12-18 17:12:01 +04:00
clear_c0_cause ( smp_processor_id ( ) ? C_SW1 : C_SW0 ) ;
break ;
case CPU_BMIPS5000 :
write_c0_brcm_bootvec ( read_c0_brcm_bootvec ( ) &
( smp_processor_id ( ) & 0x01 ? ~ 0x20000000 : ~ 0x2000 ) ) ;
write_c0_brcm_action ( ACTION_CLR_IPI ( smp_processor_id ( ) , 0 ) ) ;
break ;
}
2011-11-16 05:25:45 +04:00
}
/*
* Late setup - runs on secondary CPU before entering the idle loop
*/
static void bmips_smp_finish ( void )
{
pr_info ( " SMP: CPU%d is running \n " , smp_processor_id ( ) ) ;
2012-07-19 11:13:53 +04:00
/* make sure there won't be a timer interrupt for a little while */
write_c0_compare ( read_c0_count ( ) + mips_hpt_frequency / HZ ) ;
irq_enable_hazard ( ) ;
set_c0_status ( IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE ) ;
irq_enable_hazard ( ) ;
2011-11-16 05:25:45 +04:00
}
/*
* BMIPS5000 raceless IPIs
*
* Each CPU has two inbound SW IRQs which are independent of all other CPUs .
* IPI0 is used for SMP_RESCHEDULE_YOURSELF
* IPI1 is used for SMP_CALL_FUNCTION
*/
2013-12-18 17:12:01 +04:00
static void bmips5000_send_ipi_single ( int cpu , unsigned int action )
2011-11-16 05:25:45 +04:00
{
write_c0_brcm_action ( ACTION_SET_IPI ( cpu , action = = SMP_CALL_FUNCTION ) ) ;
}
2013-12-18 17:12:01 +04:00
static irqreturn_t bmips5000_ipi_interrupt ( int irq , void * dev_id )
2011-11-16 05:25:45 +04:00
{
int action = irq - IPI0_IRQ ;
write_c0_brcm_action ( ACTION_CLR_IPI ( smp_processor_id ( ) , action ) ) ;
if ( action = = 0 )
scheduler_ipi ( ) ;
else
smp_call_function_interrupt ( ) ;
return IRQ_HANDLED ;
}
2013-12-18 17:12:01 +04:00
static void bmips5000_send_ipi_mask ( const struct cpumask * mask ,
unsigned int action )
{
unsigned int i ;
for_each_cpu ( i , mask )
bmips5000_send_ipi_single ( i , action ) ;
}
2011-11-16 05:25:45 +04:00
/*
* BMIPS43xx racey IPIs
*
* We use one inbound SW IRQ for each CPU .
*
* A spinlock must be held in order to keep CPUx from accidentally clearing
* an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy . The
* same spinlock is used to protect the action masks .
*/
static DEFINE_SPINLOCK ( ipi_lock ) ;
static DEFINE_PER_CPU ( int , ipi_action_mask ) ;
2013-12-18 17:12:01 +04:00
static void bmips43xx_send_ipi_single ( int cpu , unsigned int action )
2011-11-16 05:25:45 +04:00
{
unsigned long flags ;
spin_lock_irqsave ( & ipi_lock , flags ) ;
set_c0_cause ( cpu ? C_SW1 : C_SW0 ) ;
per_cpu ( ipi_action_mask , cpu ) | = action ;
irq_enable_hazard ( ) ;
spin_unlock_irqrestore ( & ipi_lock , flags ) ;
}
2013-12-18 17:12:01 +04:00
static irqreturn_t bmips43xx_ipi_interrupt ( int irq , void * dev_id )
2011-11-16 05:25:45 +04:00
{
unsigned long flags ;
int action , cpu = irq - IPI0_IRQ ;
spin_lock_irqsave ( & ipi_lock , flags ) ;
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 21:30:44 +04:00
action = __this_cpu_read ( ipi_action_mask ) ;
2011-11-16 05:25:45 +04:00
per_cpu ( ipi_action_mask , cpu ) = 0 ;
clear_c0_cause ( cpu ? C_SW1 : C_SW0 ) ;
spin_unlock_irqrestore ( & ipi_lock , flags ) ;
if ( action & SMP_RESCHEDULE_YOURSELF )
scheduler_ipi ( ) ;
if ( action & SMP_CALL_FUNCTION )
smp_call_function_interrupt ( ) ;
return IRQ_HANDLED ;
}
2013-12-18 17:12:01 +04:00
static void bmips43xx_send_ipi_mask ( const struct cpumask * mask ,
2011-11-16 05:25:45 +04:00
unsigned int action )
{
unsigned int i ;
for_each_cpu ( i , mask )
2013-12-18 17:12:01 +04:00
bmips43xx_send_ipi_single ( i , action ) ;
2011-11-16 05:25:45 +04:00
}
# ifdef CONFIG_HOTPLUG_CPU
static int bmips_cpu_disable ( void )
{
unsigned int cpu = smp_processor_id ( ) ;
if ( cpu = = 0 )
return - EBUSY ;
pr_info ( " SMP: CPU%d is offline \n " , cpu ) ;
2012-03-29 09:08:30 +04:00
set_cpu_online ( cpu , false ) ;
2011-11-16 05:25:45 +04:00
cpu_clear ( cpu , cpu_callin_map ) ;
local_flush_tlb_all ( ) ;
local_flush_icache_range ( 0 , ~ 0 ) ;
return 0 ;
}
static void bmips_cpu_die ( unsigned int cpu )
{
}
void __ref play_dead ( void )
{
idle_task_exit ( ) ;
/* flush data cache */
_dma_cache_wback_inv ( 0 , ~ 0 ) ;
/*
* Wakeup is on SW0 or SW1 ; disable everything else
* Use BEV ! IV ( BMIPS_WARM_RESTART_VEC ) to avoid the regular Linux
* IRQ handlers ; this clears ST0_IE and returns immediately .
*/
clear_c0_cause ( CAUSEF_IV | C_SW0 | C_SW1 ) ;
change_c0_status ( IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV ,
IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV ) ;
irq_disable_hazard ( ) ;
/*
* wait for SW interrupt from bmips_boot_secondary ( ) , then jump
* back to start_secondary ( )
*/
__asm__ __volatile__ (
" wait \n "
" j bmips_secondary_reentry \n "
: : : " memory " ) ;
}
# endif /* CONFIG_HOTPLUG_CPU */
2013-12-18 17:12:01 +04:00
struct plat_smp_ops bmips43xx_smp_ops = {
. smp_setup = bmips_smp_setup ,
. prepare_cpus = bmips_prepare_cpus ,
. boot_secondary = bmips_boot_secondary ,
. smp_finish = bmips_smp_finish ,
. init_secondary = bmips_init_secondary ,
. send_ipi_single = bmips43xx_send_ipi_single ,
. send_ipi_mask = bmips43xx_send_ipi_mask ,
# ifdef CONFIG_HOTPLUG_CPU
. cpu_disable = bmips_cpu_disable ,
. cpu_die = bmips_cpu_die ,
# endif
} ;
struct plat_smp_ops bmips5000_smp_ops = {
2011-11-16 05:25:45 +04:00
. smp_setup = bmips_smp_setup ,
. prepare_cpus = bmips_prepare_cpus ,
. boot_secondary = bmips_boot_secondary ,
. smp_finish = bmips_smp_finish ,
. init_secondary = bmips_init_secondary ,
2013-12-18 17:12:01 +04:00
. send_ipi_single = bmips5000_send_ipi_single ,
. send_ipi_mask = bmips5000_send_ipi_mask ,
2011-11-16 05:25:45 +04:00
# ifdef CONFIG_HOTPLUG_CPU
. cpu_disable = bmips_cpu_disable ,
. cpu_die = bmips_cpu_die ,
# endif
} ;
# endif /* CONFIG_SMP */
/***********************************************************************
* BMIPS vector relocation
* This is primarily used for SMP boot , but it is applicable to some
* UP BMIPS systems as well .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 17:38:59 +04:00
static void bmips_wr_vec ( unsigned long dst , char * start , char * end )
2011-11-16 05:25:45 +04:00
{
memcpy ( ( void * ) dst , start , end - start ) ;
dma_cache_wback ( ( unsigned long ) start , end - start ) ;
local_flush_icache_range ( dst , dst + ( end - start ) ) ;
instruction_hazard ( ) ;
}
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 17:38:59 +04:00
static inline void bmips_nmi_handler_setup ( void )
2011-11-16 05:25:45 +04:00
{
bmips_wr_vec ( BMIPS_NMI_RESET_VEC , & bmips_reset_nmi_vec ,
& bmips_reset_nmi_vec_end ) ;
bmips_wr_vec ( BMIPS_WARM_RESTART_VEC , & bmips_smp_int_vec ,
& bmips_smp_int_vec_end ) ;
}
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 17:38:59 +04:00
void bmips_ebase_setup ( void )
2011-11-16 05:25:45 +04:00
{
unsigned long new_ebase = ebase ;
void __iomem __maybe_unused * cbr ;
BUG_ON ( ebase ! = CKSEG0 ) ;
2013-12-18 17:12:01 +04:00
switch ( current_cpu_type ( ) ) {
case CPU_BMIPS4350 :
/*
* BMIPS4350 cannot relocate the normal vectors , but it
* can relocate the BEV = 1 vectors . So CPU1 starts up at
* the relocated BEV = 1 , IV = 0 general exception vector @
* 0xa000 _0380 .
*
* set_uncached_handler ( ) is used here because :
* - CPU1 will run this from uncached space
* - None of the cacheflush functions are set up yet
*/
set_uncached_handler ( BMIPS_WARM_RESTART_VEC - CKSEG0 ,
& bmips_smp_int_vec , 0x80 ) ;
__sync ( ) ;
return ;
case CPU_BMIPS4380 :
/*
* 0x8000 _0000 : reset / NMI ( initially in kseg1 )
* 0x8000 _0400 : normal vectors
*/
new_ebase = 0x80000400 ;
cbr = BMIPS_GET_CBR ( ) ;
__raw_writel ( 0x80080800 , cbr + BMIPS_RELO_VECTOR_CONTROL_0 ) ;
__raw_writel ( 0xa0080800 , cbr + BMIPS_RELO_VECTOR_CONTROL_1 ) ;
break ;
case CPU_BMIPS5000 :
/*
* 0x8000 _0000 : reset / NMI ( initially in kseg1 )
* 0x8000 _1000 : normal vectors
*/
new_ebase = 0x80001000 ;
write_c0_brcm_bootvec ( 0xa0088008 ) ;
write_c0_ebase ( new_ebase ) ;
if ( max_cpus > 2 )
bmips_write_zscm_reg ( 0xa0 , 0xa008a008 ) ;
break ;
default :
return ;
}
2011-11-16 05:25:45 +04:00
board_nmi_handler_setup = & bmips_nmi_handler_setup ;
ebase = new_ebase ;
}
asmlinkage void __weak plat_wired_tlb_setup ( void )
{
/*
* Called when starting / restarting a secondary CPU .
* Kernel stacks and other important data might only be accessible
* once the wired entries are present .
*/
}