2005-04-17 02:20:36 +04:00
/*
* arch / s390 / kernel / smp . c
*
2006-02-18 00:52:46 +03:00
* Copyright ( C ) IBM Corp . 1999 , 2006
2005-04-17 02:20:36 +04:00
* Author ( s ) : Denis Joseph Barrow ( djbarrow @ de . ibm . com , barrow_dj @ yahoo . com ) ,
* Martin Schwidefsky ( schwidefsky @ de . ibm . com )
* Heiko Carstens ( heiko . carstens @ de . ibm . com )
*
* based on other smp stuff by
* ( c ) 1995 Alan Cox , CymruNET Ltd < alan @ cymru . net >
* ( c ) 1998 Ingo Molnar
*
* We work with logical cpu numbering everywhere we can . The only
* functions using the real cpu address ( got from STAP ) are the sigp
* functions . For all other functions we use the identity mapping .
* That means that cpu_number_map [ i ] = = i for every cpu . cpu_number_map is
* used e . g . to find the idle task belonging to a logical cpu . Every array
* in the kernel is sorted by the logical cpu number and not by the physical
* one which is causing all the confusion with __cpu_logical_map and
* cpu_number_map in other architectures .
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/mm.h>
# include <linux/spinlock.h>
# include <linux/kernel_stat.h>
# include <linux/smp_lock.h>
# include <linux/delay.h>
# include <linux/cache.h>
# include <linux/interrupt.h>
# include <linux/cpu.h>
2007-02-05 23:16:47 +03:00
# include <linux/timex.h>
# include <asm/setup.h>
2005-04-17 02:20:36 +04:00
# include <asm/sigp.h>
# include <asm/pgalloc.h>
# include <asm/irq.h>
# include <asm/s390_ext.h>
# include <asm/cpcmd.h>
# include <asm/tlbflush.h>
2007-02-05 23:16:47 +03:00
# include <asm/timer.h>
2005-04-17 02:20:36 +04:00
extern volatile int __cpu_logical_map [ ] ;
/*
* An array with a pointer the lowcore of every CPU .
*/
struct _lowcore * lowcore_ptr [ NR_CPUS ] ;
2006-02-18 00:52:46 +03:00
cpumask_t cpu_online_map = CPU_MASK_NONE ;
cpumask_t cpu_possible_map = CPU_MASK_NONE ;
2005-04-17 02:20:36 +04:00
static struct task_struct * current_set [ NR_CPUS ] ;
static void smp_ext_bitcall ( int , ec_bit_sig ) ;
/*
2007-02-21 12:55:06 +03:00
* Structure and data for __smp_call_function_map ( ) . This is designed to
* minimise static memory requirements . It also looks cleaner .
2005-04-17 02:20:36 +04:00
*/
static DEFINE_SPINLOCK ( call_lock ) ;
struct call_data_struct {
void ( * func ) ( void * info ) ;
void * info ;
2007-02-21 12:55:06 +03:00
cpumask_t started ;
cpumask_t finished ;
2005-04-17 02:20:36 +04:00
int wait ;
} ;
static struct call_data_struct * call_data ;
/*
* ' Call function ' interrupt callback
*/
static void do_call_function ( void )
{
void ( * func ) ( void * info ) = call_data - > func ;
void * info = call_data - > info ;
int wait = call_data - > wait ;
2007-02-21 12:55:06 +03:00
cpu_set ( smp_processor_id ( ) , call_data - > started ) ;
2005-04-17 02:20:36 +04:00
( * func ) ( info ) ;
if ( wait )
2007-02-21 12:55:06 +03:00
cpu_set ( smp_processor_id ( ) , call_data - > finished ) ; ;
2005-04-17 02:20:36 +04:00
}
2007-02-21 12:55:06 +03:00
static void __smp_call_function_map ( void ( * func ) ( void * info ) , void * info ,
int nonatomic , int wait , cpumask_t map )
2005-04-17 02:20:36 +04:00
{
struct call_data_struct data ;
2007-02-21 12:55:06 +03:00
int cpu , local = 0 ;
2005-04-17 02:20:36 +04:00
2007-02-21 12:55:06 +03:00
/*
* Can deadlock when interrupts are disabled or if in wrong context ,
* caller must disable preemption
*/
WARN_ON ( irqs_disabled ( ) | | in_irq ( ) | | preemptible ( ) ) ;
2005-04-17 02:20:36 +04:00
2007-02-21 12:55:06 +03:00
/*
* Check for local function call . We have to have the same call order
* as in on_each_cpu ( ) because of machine_restart_smp ( ) .
*/
if ( cpu_isset ( smp_processor_id ( ) , map ) ) {
local = 1 ;
cpu_clear ( smp_processor_id ( ) , map ) ;
}
cpus_and ( map , map , cpu_online_map ) ;
if ( cpus_empty ( map ) )
goto out ;
2005-04-17 02:20:36 +04:00
data . func = func ;
data . info = info ;
2007-02-21 12:55:06 +03:00
data . started = CPU_MASK_NONE ;
2005-04-17 02:20:36 +04:00
data . wait = wait ;
if ( wait )
2007-02-21 12:55:06 +03:00
data . finished = CPU_MASK_NONE ;
2005-04-17 02:20:36 +04:00
2007-02-12 17:47:04 +03:00
spin_lock_bh ( & call_lock ) ;
2005-04-17 02:20:36 +04:00
call_data = & data ;
2007-02-21 12:55:06 +03:00
for_each_cpu_mask ( cpu , map )
smp_ext_bitcall ( cpu , ec_call_function ) ;
2005-04-17 02:20:36 +04:00
/* Wait for response */
2007-02-21 12:55:06 +03:00
while ( ! cpus_equal ( map , data . started ) )
2005-04-17 02:20:36 +04:00
cpu_relax ( ) ;
if ( wait )
2007-02-21 12:55:06 +03:00
while ( ! cpus_equal ( map , data . finished ) )
2005-04-17 02:20:36 +04:00
cpu_relax ( ) ;
2007-02-21 12:55:06 +03:00
2007-02-12 17:47:04 +03:00
spin_unlock_bh ( & call_lock ) ;
2005-04-17 02:20:36 +04:00
2007-02-21 12:55:06 +03:00
out :
local_irq_disable ( ) ;
if ( local )
func ( info ) ;
local_irq_enable ( ) ;
2005-04-17 02:20:36 +04:00
}
/*
2007-02-21 12:55:06 +03:00
* smp_call_function :
* @ func : the function to run ; this must be fast and non - blocking
* @ info : an arbitrary pointer to pass to the function
* @ nonatomic : unused
* @ wait : if true , wait ( atomically ) until function has completed on other CPUs
2005-04-17 02:20:36 +04:00
*
2007-02-21 12:55:06 +03:00
* Run a function on all other CPUs .
2005-04-17 02:20:36 +04:00
*
2007-02-21 12:55:06 +03:00
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler . Must be called with preemption disabled .
* You may call it from a bottom half .
2005-04-17 02:20:36 +04:00
*/
2007-02-21 12:55:06 +03:00
int smp_call_function ( void ( * func ) ( void * info ) , void * info , int nonatomic ,
int wait )
2005-04-17 02:20:36 +04:00
{
2007-02-21 12:55:06 +03:00
cpumask_t map ;
2005-04-17 02:20:36 +04:00
2007-02-21 12:55:06 +03:00
map = cpu_online_map ;
cpu_clear ( smp_processor_id ( ) , map ) ;
__smp_call_function_map ( func , info , nonatomic , wait , map ) ;
return 0 ;
}
EXPORT_SYMBOL ( smp_call_function ) ;
2005-04-17 02:20:36 +04:00
2007-02-21 12:55:06 +03:00
/*
* smp_call_function_on :
* @ func : the function to run ; this must be fast and non - blocking
* @ info : an arbitrary pointer to pass to the function
* @ nonatomic : unused
* @ wait : if true , wait ( atomically ) until function has completed on other CPUs
* @ cpu : the CPU where func should run
*
* Run a function on one processor .
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler . Must be called with preemption disabled .
* You may call it from a bottom half .
*/
int smp_call_function_on ( void ( * func ) ( void * info ) , void * info , int nonatomic ,
int wait , int cpu )
{
cpumask_t map = CPU_MASK_NONE ;
2005-04-17 02:20:36 +04:00
2007-02-21 12:55:06 +03:00
cpu_set ( cpu , map ) ;
__smp_call_function_map ( func , info , nonatomic , wait , map ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
EXPORT_SYMBOL ( smp_call_function_on ) ;
2007-02-05 23:18:53 +03:00
static void do_send_stop ( void )
2005-04-17 02:20:36 +04:00
{
int cpu , rc ;
/* stop all processors */
for_each_online_cpu ( cpu ) {
if ( cpu = = smp_processor_id ( ) )
continue ;
do {
rc = signal_processor ( cpu , sigp_stop ) ;
} while ( rc = = sigp_busy ) ;
}
}
2007-02-05 23:18:53 +03:00
static void do_store_status ( void )
2005-04-17 02:20:36 +04:00
{
int cpu , rc ;
/* store status of all processors in their lowcores (real 0) */
for_each_online_cpu ( cpu ) {
if ( cpu = = smp_processor_id ( ) )
continue ;
do {
rc = signal_processor_p (
( __u32 ) ( unsigned long ) lowcore_ptr [ cpu ] , cpu ,
sigp_store_status_at_address ) ;
} while ( rc = = sigp_busy ) ;
}
}
2007-02-05 23:18:53 +03:00
static void do_wait_for_stop ( void )
2006-12-04 17:40:33 +03:00
{
int cpu ;
/* Wait for all other cpus to enter stopped state */
for_each_online_cpu ( cpu ) {
if ( cpu = = smp_processor_id ( ) )
continue ;
while ( ! smp_cpu_not_running ( cpu ) )
cpu_relax ( ) ;
}
}
2005-04-17 02:20:36 +04:00
/*
* this function sends a ' stop ' sigp to all other CPUs in the system .
* it goes straight through .
*/
void smp_send_stop ( void )
{
2006-12-04 17:40:33 +03:00
/* Disable all interrupts/machine checks */
2007-02-05 23:18:17 +03:00
__load_psw_mask ( psw_kernel_bits & ~ PSW_MASK_MCHECK ) ;
2006-12-04 17:40:33 +03:00
2005-04-17 02:20:36 +04:00
/* write magic number to zero page (absolute 0) */
lowcore_ptr [ smp_processor_id ( ) ] - > panic_magic = __PANIC_MAGIC ;
/* stop other processors. */
do_send_stop ( ) ;
2006-12-04 17:40:33 +03:00
/* wait until other processors are stopped */
do_wait_for_stop ( ) ;
2005-04-17 02:20:36 +04:00
/* store status of other processors. */
do_store_status ( ) ;
}
/*
* Reboot , halt and power_off routines for SMP .
*/
void machine_restart_smp ( char * __unused )
{
2006-12-04 17:40:33 +03:00
smp_send_stop ( ) ;
do_reipl ( ) ;
2005-04-17 02:20:36 +04:00
}
void machine_halt_smp ( void )
{
2006-12-04 17:40:33 +03:00
smp_send_stop ( ) ;
if ( MACHINE_IS_VM & & strlen ( vmhalt_cmd ) > 0 )
__cpcmd ( vmhalt_cmd , NULL , 0 , NULL ) ;
signal_processor ( smp_processor_id ( ) , sigp_stop_and_store_status ) ;
for ( ; ; ) ;
2005-04-17 02:20:36 +04:00
}
void machine_power_off_smp ( void )
{
2006-12-04 17:40:33 +03:00
smp_send_stop ( ) ;
if ( MACHINE_IS_VM & & strlen ( vmpoff_cmd ) > 0 )
__cpcmd ( vmpoff_cmd , NULL , 0 , NULL ) ;
signal_processor ( smp_processor_id ( ) , sigp_stop_and_store_status ) ;
for ( ; ; ) ;
2005-04-17 02:20:36 +04:00
}
/*
* This is the main routine where commands issued by other
* cpus are handled .
*/
2007-02-05 23:16:47 +03:00
static void do_ext_call_interrupt ( __u16 code )
2005-04-17 02:20:36 +04:00
{
unsigned long bits ;
/*
* handle bit signal external calls
*
* For the ec_schedule signal we have to do nothing . All the work
* is done automatically when we return from the interrupt .
*/
bits = xchg ( & S390_lowcore . ext_call_fast , 0 ) ;
if ( test_bit ( ec_call_function , & bits ) )
do_call_function ( ) ;
}
/*
* Send an external call sigp to another cpu and return without waiting
* for its completion .
*/
static void smp_ext_bitcall ( int cpu , ec_bit_sig sig )
{
/*
* Set signaling bit in lowcore of target cpu and kick it
*/
set_bit ( sig , ( unsigned long * ) & lowcore_ptr [ cpu ] - > ext_call_fast ) ;
2005-07-27 22:45:00 +04:00
while ( signal_processor ( cpu , sigp_emergency_signal ) = = sigp_busy )
2005-04-17 02:20:36 +04:00
udelay ( 10 ) ;
}
2006-01-06 11:19:28 +03:00
# ifndef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
/*
* this function sends a ' purge tlb ' signal to another CPU .
*/
void smp_ptlb_callback ( void * info )
{
local_flush_tlb ( ) ;
}
void smp_ptlb_all ( void )
{
on_each_cpu ( smp_ptlb_callback , NULL , 0 , 1 ) ;
}
EXPORT_SYMBOL ( smp_ptlb_all ) ;
2006-01-06 11:19:28 +03:00
# endif /* ! CONFIG_64BIT */
2005-04-17 02:20:36 +04:00
/*
* this function sends a ' reschedule ' IPI to another CPU .
* it goes straight through and wastes no time serializing
* anything . Worst case is that we lose a reschedule . . .
*/
void smp_send_reschedule ( int cpu )
{
smp_ext_bitcall ( cpu , ec_schedule ) ;
}
/*
* parameter area for the set / clear control bit callbacks
*/
2006-09-28 18:56:43 +04:00
struct ec_creg_mask_parms {
2005-04-17 02:20:36 +04:00
unsigned long orvals [ 16 ] ;
unsigned long andvals [ 16 ] ;
2006-09-28 18:56:43 +04:00
} ;
2005-04-17 02:20:36 +04:00
/*
* callback for setting / clearing control bits
*/
2007-02-05 23:16:47 +03:00
static void smp_ctl_bit_callback ( void * info ) {
2006-09-28 18:56:43 +04:00
struct ec_creg_mask_parms * pp = info ;
2005-04-17 02:20:36 +04:00
unsigned long cregs [ 16 ] ;
int i ;
2006-09-28 18:56:43 +04:00
__ctl_store ( cregs , 0 , 15 ) ;
for ( i = 0 ; i < = 15 ; i + + )
2005-04-17 02:20:36 +04:00
cregs [ i ] = ( cregs [ i ] & pp - > andvals [ i ] ) | pp - > orvals [ i ] ;
2006-09-28 18:56:43 +04:00
__ctl_load ( cregs , 0 , 15 ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Set a bit in a control register of all cpus
*/
2006-09-28 18:56:43 +04:00
void smp_ctl_set_bit ( int cr , int bit )
{
struct ec_creg_mask_parms parms ;
2005-04-17 02:20:36 +04:00
2006-09-28 18:56:43 +04:00
memset ( & parms . orvals , 0 , sizeof ( parms . orvals ) ) ;
memset ( & parms . andvals , 0xff , sizeof ( parms . andvals ) ) ;
2005-04-17 02:20:36 +04:00
parms . orvals [ cr ] = 1 < < bit ;
2006-09-28 18:56:43 +04:00
on_each_cpu ( smp_ctl_bit_callback , & parms , 0 , 1 ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Clear a bit in a control register of all cpus
*/
2006-09-28 18:56:43 +04:00
void smp_ctl_clear_bit ( int cr , int bit )
{
struct ec_creg_mask_parms parms ;
2005-04-17 02:20:36 +04:00
2006-09-28 18:56:43 +04:00
memset ( & parms . orvals , 0 , sizeof ( parms . orvals ) ) ;
memset ( & parms . andvals , 0xff , sizeof ( parms . andvals ) ) ;
2005-04-17 02:20:36 +04:00
parms . andvals [ cr ] = ~ ( 1L < < bit ) ;
2006-09-28 18:56:43 +04:00
on_each_cpu ( smp_ctl_bit_callback , & parms , 0 , 1 ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Lets check how many CPUs we have .
*/
2006-02-18 00:52:46 +03:00
static unsigned int
__init smp_count_cpus ( void )
2005-04-17 02:20:36 +04:00
{
2006-02-18 00:52:46 +03:00
unsigned int cpu , num_cpus ;
2005-04-17 02:20:36 +04:00
__u16 boot_cpu_addr ;
/*
* cpu 0 is the boot cpu . See smp_prepare_boot_cpu .
*/
boot_cpu_addr = S390_lowcore . cpu_data . cpu_addr ;
current_thread_info ( ) - > cpu = 0 ;
num_cpus = 1 ;
2006-02-18 00:52:46 +03:00
for ( cpu = 0 ; cpu < = 65535 ; cpu + + ) {
2005-04-17 02:20:36 +04:00
if ( ( __u16 ) cpu = = boot_cpu_addr )
continue ;
2006-02-18 00:52:46 +03:00
__cpu_logical_map [ 1 ] = ( __u16 ) cpu ;
if ( signal_processor ( 1 , sigp_sense ) = =
2005-04-17 02:20:36 +04:00
sigp_not_operational )
continue ;
num_cpus + + ;
}
printk ( " Detected %d CPU's \n " , ( int ) num_cpus ) ;
printk ( " Boot cpu address %2X \n " , boot_cpu_addr ) ;
2006-02-18 00:52:46 +03:00
return num_cpus ;
2005-04-17 02:20:36 +04:00
}
/*
* Activate a secondary processor .
*/
int __devinit start_secondary ( void * cpuvoid )
{
/* Setup the cpu */
cpu_init ( ) ;
2005-11-09 08:39:01 +03:00
preempt_disable ( ) ;
2007-02-05 23:18:19 +03:00
/* Enable TOD clock interrupts on the secondary cpu. */
2005-04-17 02:20:36 +04:00
init_cpu_timer ( ) ;
# ifdef CONFIG_VIRT_TIMER
2007-02-05 23:18:19 +03:00
/* Enable cpu timer interrupts on the secondary cpu. */
2005-04-17 02:20:36 +04:00
init_cpu_vtimer ( ) ;
# endif
/* Enable pfault pseudo page faults on this cpu. */
2006-12-04 17:40:40 +03:00
pfault_init ( ) ;
2005-04-17 02:20:36 +04:00
/* Mark this cpu as online */
cpu_set ( smp_processor_id ( ) , cpu_online_map ) ;
/* Switch on interrupts */
local_irq_enable ( ) ;
/* Print info about this processor */
print_cpu_info ( & S390_lowcore . cpu_data ) ;
/* cpu_idle will call schedule for us */
cpu_idle ( ) ;
return 0 ;
}
static void __init smp_create_idle ( unsigned int cpu )
{
struct task_struct * p ;
/*
* don ' t care about the psw and regs settings since we ' ll never
* reschedule the forked task .
*/
p = fork_idle ( cpu ) ;
if ( IS_ERR ( p ) )
panic ( " failed fork for CPU %u: %li " , cpu , PTR_ERR ( p ) ) ;
current_set [ cpu ] = p ;
}
/* Reserving and releasing of CPUs */
static DEFINE_SPINLOCK ( smp_reserve_lock ) ;
static int smp_cpu_reserved [ NR_CPUS ] ;
int
smp_get_cpu ( cpumask_t cpu_mask )
{
unsigned long flags ;
int cpu ;
spin_lock_irqsave ( & smp_reserve_lock , flags ) ;
/* Try to find an already reserved cpu. */
for_each_cpu_mask ( cpu , cpu_mask ) {
if ( smp_cpu_reserved [ cpu ] ! = 0 ) {
smp_cpu_reserved [ cpu ] + + ;
/* Found one. */
goto out ;
}
}
/* Reserve a new cpu from cpu_mask. */
for_each_cpu_mask ( cpu , cpu_mask ) {
if ( cpu_online ( cpu ) ) {
smp_cpu_reserved [ cpu ] + + ;
goto out ;
}
}
cpu = - ENODEV ;
out :
spin_unlock_irqrestore ( & smp_reserve_lock , flags ) ;
return cpu ;
}
void
smp_put_cpu ( int cpu )
{
unsigned long flags ;
spin_lock_irqsave ( & smp_reserve_lock , flags ) ;
smp_cpu_reserved [ cpu ] - - ;
spin_unlock_irqrestore ( & smp_reserve_lock , flags ) ;
}
2007-02-05 23:18:53 +03:00
static int
2005-04-17 02:20:36 +04:00
cpu_stopped ( int cpu )
{
__u32 status ;
/* Check for stopped state */
if ( signal_processor_ps ( & status , 0 , cpu , sigp_sense ) = = sigp_status_stored ) {
if ( status & 0x40 )
return 1 ;
}
return 0 ;
}
/* Upping and downing of CPUs */
int
__cpu_up ( unsigned int cpu )
{
struct task_struct * idle ;
struct _lowcore * cpu_lowcore ;
struct stack_frame * sf ;
sigp_ccode ccode ;
int curr_cpu ;
for ( curr_cpu = 0 ; curr_cpu < = 65535 ; curr_cpu + + ) {
__cpu_logical_map [ cpu ] = ( __u16 ) curr_cpu ;
if ( cpu_stopped ( cpu ) )
break ;
}
if ( ! cpu_stopped ( cpu ) )
return - ENODEV ;
ccode = signal_processor_p ( ( __u32 ) ( unsigned long ) ( lowcore_ptr [ cpu ] ) ,
cpu , sigp_set_prefix ) ;
if ( ccode ) {
printk ( " sigp_set_prefix failed for cpu %d "
" with condition code %d \n " ,
( int ) cpu , ( int ) ccode ) ;
return - EIO ;
}
idle = current_set [ cpu ] ;
cpu_lowcore = lowcore_ptr [ cpu ] ;
cpu_lowcore - > kernel_stack = ( unsigned long )
2006-01-12 12:05:50 +03:00
task_stack_page ( idle ) + ( THREAD_SIZE ) ;
2005-04-17 02:20:36 +04:00
sf = ( struct stack_frame * ) ( cpu_lowcore - > kernel_stack
- sizeof ( struct pt_regs )
- sizeof ( struct stack_frame ) ) ;
memset ( sf , 0 , sizeof ( struct stack_frame ) ) ;
sf - > gprs [ 9 ] = ( unsigned long ) sf ;
cpu_lowcore - > save_area [ 15 ] = ( unsigned long ) sf ;
__ctl_store ( cpu_lowcore - > cregs_save_area [ 0 ] , 0 , 15 ) ;
2006-09-28 18:56:43 +04:00
asm volatile (
" stam 0,15,0(%0) "
: : " a " ( & cpu_lowcore - > access_regs_save_area ) : " memory " ) ;
2005-04-17 02:20:36 +04:00
cpu_lowcore - > percpu_offset = __per_cpu_offset [ cpu ] ;
cpu_lowcore - > current_task = ( unsigned long ) idle ;
cpu_lowcore - > cpu_data . cpu_nr = cpu ;
eieio ( ) ;
2006-03-24 14:15:17 +03:00
while ( signal_processor ( cpu , sigp_restart ) = = sigp_busy )
udelay ( 10 ) ;
2005-04-17 02:20:36 +04:00
while ( ! cpu_online ( cpu ) )
cpu_relax ( ) ;
return 0 ;
}
2006-02-18 00:52:46 +03:00
static unsigned int __initdata additional_cpus ;
2006-02-18 00:52:47 +03:00
static unsigned int __initdata possible_cpus ;
2006-02-18 00:52:46 +03:00
void __init smp_setup_cpu_possible_map ( void )
{
2006-02-18 00:52:48 +03:00
unsigned int phy_cpus , pos_cpus , cpu ;
2006-02-18 00:52:46 +03:00
2006-02-18 00:52:48 +03:00
phy_cpus = smp_count_cpus ( ) ;
pos_cpus = min ( phy_cpus + additional_cpus , ( unsigned int ) NR_CPUS ) ;
2006-02-18 00:52:46 +03:00
2006-02-18 00:52:47 +03:00
if ( possible_cpus )
2006-02-18 00:52:48 +03:00
pos_cpus = min ( possible_cpus , ( unsigned int ) NR_CPUS ) ;
2006-02-18 00:52:46 +03:00
2006-02-18 00:52:48 +03:00
for ( cpu = 0 ; cpu < pos_cpus ; cpu + + )
2006-02-18 00:52:46 +03:00
cpu_set ( cpu , cpu_possible_map ) ;
2006-02-18 00:52:48 +03:00
phy_cpus = min ( phy_cpus , pos_cpus ) ;
for ( cpu = 0 ; cpu < phy_cpus ; cpu + + )
cpu_set ( cpu , cpu_present_map ) ;
2006-02-18 00:52:46 +03:00
}
# ifdef CONFIG_HOTPLUG_CPU
static int __init setup_additional_cpus ( char * s )
{
additional_cpus = simple_strtoul ( s , NULL , 0 ) ;
return 0 ;
}
early_param ( " additional_cpus " , setup_additional_cpus ) ;
2006-02-18 00:52:47 +03:00
static int __init setup_possible_cpus ( char * s )
{
possible_cpus = simple_strtoul ( s , NULL , 0 ) ;
return 0 ;
}
early_param ( " possible_cpus " , setup_possible_cpus ) ;
2005-04-17 02:20:36 +04:00
int
__cpu_disable ( void )
{
unsigned long flags ;
2006-09-28 18:56:43 +04:00
struct ec_creg_mask_parms cr_parms ;
2005-06-26 01:54:50 +04:00
int cpu = smp_processor_id ( ) ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & smp_reserve_lock , flags ) ;
2005-06-26 01:54:50 +04:00
if ( smp_cpu_reserved [ cpu ] ! = 0 ) {
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & smp_reserve_lock , flags ) ;
return - EBUSY ;
}
2005-06-26 01:54:50 +04:00
cpu_clear ( cpu , cpu_online_map ) ;
2005-04-17 02:20:36 +04:00
/* Disable pfault pseudo page faults on this cpu. */
2006-12-04 17:40:40 +03:00
pfault_fini ( ) ;
2005-04-17 02:20:36 +04:00
2006-09-28 18:56:43 +04:00
memset ( & cr_parms . orvals , 0 , sizeof ( cr_parms . orvals ) ) ;
memset ( & cr_parms . andvals , 0xff , sizeof ( cr_parms . andvals ) ) ;
2005-04-17 02:20:36 +04:00
2006-09-28 18:56:43 +04:00
/* disable all external interrupts */
2005-04-17 02:20:36 +04:00
cr_parms . orvals [ 0 ] = 0 ;
cr_parms . andvals [ 0 ] = ~ ( 1 < < 15 | 1 < < 14 | 1 < < 13 | 1 < < 12 |
1 < < 11 | 1 < < 10 | 1 < < 6 | 1 < < 4 ) ;
/* disable all I/O interrupts */
cr_parms . orvals [ 6 ] = 0 ;
cr_parms . andvals [ 6 ] = ~ ( 1 < < 31 | 1 < < 30 | 1 < < 29 | 1 < < 28 |
1 < < 27 | 1 < < 26 | 1 < < 25 | 1 < < 24 ) ;
/* disable most machine checks */
cr_parms . orvals [ 14 ] = 0 ;
cr_parms . andvals [ 14 ] = ~ ( 1 < < 28 | 1 < < 27 | 1 < < 26 | 1 < < 25 | 1 < < 24 ) ;
2006-09-28 18:56:43 +04:00
2005-04-17 02:20:36 +04:00
smp_ctl_bit_callback ( & cr_parms ) ;
spin_unlock_irqrestore ( & smp_reserve_lock , flags ) ;
return 0 ;
}
void
__cpu_die ( unsigned int cpu )
{
/* Wait until target cpu is down */
while ( ! smp_cpu_not_running ( cpu ) )
cpu_relax ( ) ;
printk ( " Processor %d spun down \n " , cpu ) ;
}
void
cpu_die ( void )
{
idle_task_exit ( ) ;
signal_processor ( smp_processor_id ( ) , sigp_stop ) ;
BUG ( ) ;
for ( ; ; ) ;
}
2006-02-18 00:52:46 +03:00
# endif /* CONFIG_HOTPLUG_CPU */
2005-04-17 02:20:36 +04:00
/*
* Cycle through the processors and setup structures .
*/
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
unsigned long stack ;
unsigned int cpu ;
int i ;
2005-07-27 22:45:00 +04:00
/* request the 0x1201 emergency signal external interrupt */
if ( register_external_interrupt ( 0x1201 , do_ext_call_interrupt ) ! = 0 )
panic ( " Couldn't request external interrupt 0x1201 " ) ;
2005-04-17 02:20:36 +04:00
memset ( lowcore_ptr , 0 , sizeof ( lowcore_ptr ) ) ;
/*
* Initialize prefix pages and stacks for all possible cpus
*/
print_cpu_info ( & S390_lowcore . cpu_data ) ;
2006-03-31 14:30:26 +04:00
for_each_possible_cpu ( i ) {
2005-04-17 02:20:36 +04:00
lowcore_ptr [ i ] = ( struct _lowcore * )
__get_free_pages ( GFP_KERNEL | GFP_DMA ,
sizeof ( void * ) = = 8 ? 1 : 0 ) ;
stack = __get_free_pages ( GFP_KERNEL , ASYNC_ORDER ) ;
if ( lowcore_ptr [ i ] = = NULL | | stack = = 0ULL )
panic ( " smp_boot_cpus failed to allocate memory \n " ) ;
* ( lowcore_ptr [ i ] ) = S390_lowcore ;
lowcore_ptr [ i ] - > async_stack = stack + ( ASYNC_SIZE ) ;
stack = __get_free_pages ( GFP_KERNEL , 0 ) ;
if ( stack = = 0ULL )
panic ( " smp_boot_cpus failed to allocate memory \n " ) ;
lowcore_ptr [ i ] - > panic_stack = stack + ( PAGE_SIZE ) ;
2006-01-06 11:19:28 +03:00
# ifndef CONFIG_64BIT
2005-06-26 01:55:30 +04:00
if ( MACHINE_HAS_IEEE ) {
lowcore_ptr [ i ] - > extended_save_area_addr =
( __u32 ) __get_free_pages ( GFP_KERNEL , 0 ) ;
if ( lowcore_ptr [ i ] - > extended_save_area_addr = = 0 )
panic ( " smp_boot_cpus failed to "
" allocate memory \n " ) ;
}
2005-04-17 02:20:36 +04:00
# endif
}
2006-01-06 11:19:28 +03:00
# ifndef CONFIG_64BIT
2005-06-26 01:55:30 +04:00
if ( MACHINE_HAS_IEEE )
ctl_set_bit ( 14 , 29 ) ; /* enable extended save area */
# endif
2005-04-17 02:20:36 +04:00
set_prefix ( ( u32 ) ( unsigned long ) lowcore_ptr [ smp_processor_id ( ) ] ) ;
2006-03-31 14:30:26 +04:00
for_each_possible_cpu ( cpu )
2005-04-17 02:20:36 +04:00
if ( cpu ! = smp_processor_id ( ) )
smp_create_idle ( cpu ) ;
}
void __devinit smp_prepare_boot_cpu ( void )
{
BUG_ON ( smp_processor_id ( ) ! = 0 ) ;
cpu_set ( 0 , cpu_online_map ) ;
S390_lowcore . percpu_offset = __per_cpu_offset [ 0 ] ;
current_set [ 0 ] = current ;
}
void smp_cpus_done ( unsigned int max_cpus )
{
2006-02-18 00:52:48 +03:00
cpu_present_map = cpu_possible_map ;
2005-04-17 02:20:36 +04:00
}
/*
* the frequency of the profiling timer can be changed
* by writing a multiplier value into / proc / profile .
*
* usually you want to run this on all CPUs ; )
*/
int setup_profiling_timer ( unsigned int multiplier )
{
return 0 ;
}
static DEFINE_PER_CPU ( struct cpu , cpu_devices ) ;
static int __init topology_init ( void )
{
int cpu ;
int ret ;
2006-03-31 14:30:26 +04:00
for_each_possible_cpu ( cpu ) {
2007-01-09 12:18:44 +03:00
struct cpu * c = & per_cpu ( cpu_devices , cpu ) ;
c - > hotpluggable = 1 ;
ret = register_cpu ( c , cpu ) ;
2005-04-17 02:20:36 +04:00
if ( ret )
printk ( KERN_WARNING " topology_init: register_cpu %d "
" failed (%d) \n " , cpu , ret ) ;
}
return 0 ;
}
subsys_initcall ( topology_init ) ;
2006-02-18 00:52:46 +03:00
EXPORT_SYMBOL ( cpu_online_map ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( cpu_possible_map ) ;
EXPORT_SYMBOL ( lowcore_ptr ) ;
EXPORT_SYMBOL ( smp_ctl_set_bit ) ;
EXPORT_SYMBOL ( smp_ctl_clear_bit ) ;
EXPORT_SYMBOL ( smp_get_cpu ) ;
EXPORT_SYMBOL ( smp_put_cpu ) ;