2005-04-17 02:20:36 +04:00
/*
* linux / kernel / softirq . c
*
* Copyright ( C ) 1992 Linus Torvalds
*
2008-01-30 15:30:00 +03:00
* Distribute under GPLv2 .
*
* Rewritten . Old one was good in 2.2 , but in 2.3 it was immoral . - - ANK ( 990903 )
2005-04-17 02:20:36 +04:00
*/
# include <linux/module.h>
# include <linux/kernel_stat.h>
# include <linux/interrupt.h>
# include <linux/init.h>
# include <linux/mm.h>
# include <linux/notifier.h>
# include <linux/percpu.h>
# include <linux/cpu.h>
2007-07-17 15:03:35 +04:00
# include <linux/freezer.h>
2005-04-17 02:20:36 +04:00
# include <linux/kthread.h>
# include <linux/rcupdate.h>
2006-03-22 11:08:16 +03:00
# include <linux/smp.h>
2007-02-16 12:28:03 +03:00
# include <linux/tick.h>
2005-04-17 02:20:36 +04:00
# include <asm/irq.h>
/*
- No shared variables , all the data are CPU local .
- If a softirq needs serialization , let it serialize itself
by its own spinlocks .
- Even if softirq is serialized , only local cpu is marked for
execution . Hence , we get something sort of weak cpu binding .
Though it is still not clear , will it result in better locality
or will not .
Examples :
- NET RX softirq . It is multithreaded and does not require
any global serialization .
- NET TX softirq . It kicks software netdevice queues , hence
it is logically serialized per device , but this serialization
is invisible to common code .
- Tasklets : serialized wrt itself .
*/
# ifndef __ARCH_IRQ_STAT
irq_cpustat_t irq_stat [ NR_CPUS ] ____cacheline_aligned ;
EXPORT_SYMBOL ( irq_stat ) ;
# endif
static struct softirq_action softirq_vec [ 32 ] __cacheline_aligned_in_smp ;
static DEFINE_PER_CPU ( struct task_struct * , ksoftirqd ) ;
/*
* we cannot loop indefinitely here to avoid userspace starvation ,
* but we also don ' t want to introduce a worst case 1 / HZ latency
* to the pending events , so lets the scheduler to balance
* the softirq load for us .
*/
static inline void wakeup_softirqd ( void )
{
/* Interrupts are disabled: no need to stop preemption */
struct task_struct * tsk = __get_cpu_var ( ksoftirqd ) ;
if ( tsk & & tsk - > state ! = TASK_RUNNING )
wake_up_process ( tsk ) ;
}
2006-07-03 11:24:42 +04:00
/*
* This one is for softirq . c - internal use ,
* where hardirqs are disabled legitimately :
*/
2006-07-30 14:04:02 +04:00
# ifdef CONFIG_TRACE_IRQFLAGS
2006-07-03 11:24:42 +04:00
static void __local_bh_disable ( unsigned long ip )
{
unsigned long flags ;
WARN_ON_ONCE ( in_irq ( ) ) ;
raw_local_irq_save ( flags ) ;
add_preempt_count ( SOFTIRQ_OFFSET ) ;
/*
* Were softirqs turned off above :
*/
if ( softirq_count ( ) = = SOFTIRQ_OFFSET )
trace_softirqs_off ( ip ) ;
raw_local_irq_restore ( flags ) ;
}
2006-07-30 14:04:02 +04:00
# else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable ( unsigned long ip )
{
add_preempt_count ( SOFTIRQ_OFFSET ) ;
barrier ( ) ;
}
# endif /* CONFIG_TRACE_IRQFLAGS */
2006-07-03 11:24:42 +04:00
void local_bh_disable ( void )
{
__local_bh_disable ( ( unsigned long ) __builtin_return_address ( 0 ) ) ;
}
EXPORT_SYMBOL ( local_bh_disable ) ;
void __local_bh_enable ( void )
{
WARN_ON_ONCE ( in_irq ( ) ) ;
/*
* softirqs should never be enabled by __local_bh_enable ( ) ,
* it always nests inside local_bh_enable ( ) sections :
*/
WARN_ON_ONCE ( softirq_count ( ) = = SOFTIRQ_OFFSET ) ;
sub_preempt_count ( SOFTIRQ_OFFSET ) ;
}
EXPORT_SYMBOL_GPL ( __local_bh_enable ) ;
/*
* Special - case - softirqs can safely be enabled in
* cond_resched_softirq ( ) , or by __do_softirq ( ) ,
* without processing still - pending softirqs :
*/
void _local_bh_enable ( void )
{
WARN_ON_ONCE ( in_irq ( ) ) ;
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
if ( softirq_count ( ) = = SOFTIRQ_OFFSET )
trace_softirqs_on ( ( unsigned long ) __builtin_return_address ( 0 ) ) ;
sub_preempt_count ( SOFTIRQ_OFFSET ) ;
}
EXPORT_SYMBOL ( _local_bh_enable ) ;
void local_bh_enable ( void )
{
2006-07-30 14:04:02 +04:00
# ifdef CONFIG_TRACE_IRQFLAGS
2006-07-03 11:24:42 +04:00
unsigned long flags ;
WARN_ON_ONCE ( in_irq ( ) ) ;
2006-07-30 14:04:02 +04:00
# endif
2006-07-03 11:24:42 +04:00
WARN_ON_ONCE ( irqs_disabled ( ) ) ;
2006-07-30 14:04:02 +04:00
# ifdef CONFIG_TRACE_IRQFLAGS
2006-07-03 11:24:42 +04:00
local_irq_save ( flags ) ;
2006-07-30 14:04:02 +04:00
# endif
2006-07-03 11:24:42 +04:00
/*
* Are softirqs going to be turned on now :
*/
if ( softirq_count ( ) = = SOFTIRQ_OFFSET )
trace_softirqs_on ( ( unsigned long ) __builtin_return_address ( 0 ) ) ;
/*
* Keep preemption disabled until we are done with
* softirq processing :
*/
sub_preempt_count ( SOFTIRQ_OFFSET - 1 ) ;
if ( unlikely ( ! in_interrupt ( ) & & local_softirq_pending ( ) ) )
do_softirq ( ) ;
dec_preempt_count ( ) ;
2006-07-30 14:04:02 +04:00
# ifdef CONFIG_TRACE_IRQFLAGS
2006-07-03 11:24:42 +04:00
local_irq_restore ( flags ) ;
2006-07-30 14:04:02 +04:00
# endif
2006-07-03 11:24:42 +04:00
preempt_check_resched ( ) ;
}
EXPORT_SYMBOL ( local_bh_enable ) ;
void local_bh_enable_ip ( unsigned long ip )
{
2006-07-30 14:04:02 +04:00
# ifdef CONFIG_TRACE_IRQFLAGS
2006-07-03 11:24:42 +04:00
unsigned long flags ;
WARN_ON_ONCE ( in_irq ( ) ) ;
local_irq_save ( flags ) ;
2006-07-30 14:04:02 +04:00
# endif
2006-07-03 11:24:42 +04:00
/*
* Are softirqs going to be turned on now :
*/
if ( softirq_count ( ) = = SOFTIRQ_OFFSET )
trace_softirqs_on ( ip ) ;
/*
* Keep preemption disabled until we are done with
* softirq processing :
*/
sub_preempt_count ( SOFTIRQ_OFFSET - 1 ) ;
if ( unlikely ( ! in_interrupt ( ) & & local_softirq_pending ( ) ) )
do_softirq ( ) ;
dec_preempt_count ( ) ;
2006-07-30 14:04:02 +04:00
# ifdef CONFIG_TRACE_IRQFLAGS
2006-07-03 11:24:42 +04:00
local_irq_restore ( flags ) ;
2006-07-30 14:04:02 +04:00
# endif
2006-07-03 11:24:42 +04:00
preempt_check_resched ( ) ;
}
EXPORT_SYMBOL ( local_bh_enable_ip ) ;
2005-04-17 02:20:36 +04:00
/*
* We restart softirq processing MAX_SOFTIRQ_RESTART times ,
* and we fall back to softirqd after that .
*
* This number has been established via experimentation .
* The two things to balance is latency against fairness -
* we want to handle softirqs as soon as possible , but they
* should not be able to lock up the box .
*/
# define MAX_SOFTIRQ_RESTART 10
asmlinkage void __do_softirq ( void )
{
struct softirq_action * h ;
__u32 pending ;
int max_restart = MAX_SOFTIRQ_RESTART ;
int cpu ;
pending = local_softirq_pending ( ) ;
2006-07-03 11:25:40 +04:00
account_system_vtime ( current ) ;
2006-07-03 11:24:42 +04:00
__local_bh_disable ( ( unsigned long ) __builtin_return_address ( 0 ) ) ;
trace_softirq_enter ( ) ;
2005-04-17 02:20:36 +04:00
cpu = smp_processor_id ( ) ;
restart :
/* Reset the pending bitmask before enabling irqs */
2005-09-12 20:49:24 +04:00
set_softirq_pending ( 0 ) ;
2005-04-17 02:20:36 +04:00
2005-07-30 21:22:49 +04:00
local_irq_enable ( ) ;
2005-04-17 02:20:36 +04:00
h = softirq_vec ;
do {
if ( pending & 1 ) {
h - > action ( h ) ;
rcu_bh_qsctr_inc ( cpu ) ;
}
h + + ;
pending > > = 1 ;
} while ( pending ) ;
2005-07-30 21:22:49 +04:00
local_irq_disable ( ) ;
2005-04-17 02:20:36 +04:00
pending = local_softirq_pending ( ) ;
if ( pending & & - - max_restart )
goto restart ;
if ( pending )
wakeup_softirqd ( ) ;
2006-07-03 11:24:42 +04:00
trace_softirq_exit ( ) ;
2006-07-03 11:25:40 +04:00
account_system_vtime ( current ) ;
2006-07-03 11:24:42 +04:00
_local_bh_enable ( ) ;
2005-04-17 02:20:36 +04:00
}
# ifndef __ARCH_HAS_DO_SOFTIRQ
asmlinkage void do_softirq ( void )
{
__u32 pending ;
unsigned long flags ;
if ( in_interrupt ( ) )
return ;
local_irq_save ( flags ) ;
pending = local_softirq_pending ( ) ;
if ( pending )
__do_softirq ( ) ;
local_irq_restore ( flags ) ;
}
# endif
2007-02-16 12:27:45 +03:00
/*
* Enter an interrupt context .
*/
void irq_enter ( void )
{
2008-01-30 15:30:04 +03:00
# ifdef CONFIG_NO_HZ
int cpu = smp_processor_id ( ) ;
if ( idle_cpu ( cpu ) & & ! in_interrupt ( ) )
tick_nohz_stop_idle ( cpu ) ;
# endif
2007-02-16 12:28:03 +03:00
__irq_enter ( ) ;
# ifdef CONFIG_NO_HZ
2008-01-30 15:30:04 +03:00
if ( idle_cpu ( cpu ) )
2007-02-16 12:28:03 +03:00
tick_nohz_update_jiffies ( ) ;
# endif
2007-02-16 12:27:45 +03:00
}
2005-04-17 02:20:36 +04:00
# ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
# define invoke_softirq() __do_softirq()
# else
# define invoke_softirq() do_softirq()
# endif
/*
* Exit an interrupt context . Process softirqs if needed and possible :
*/
void irq_exit ( void )
{
account_system_vtime ( current ) ;
2006-07-03 11:24:42 +04:00
trace_hardirq_exit ( ) ;
2005-04-17 02:20:36 +04:00
sub_preempt_count ( IRQ_EXIT_OFFSET ) ;
if ( ! in_interrupt ( ) & & local_softirq_pending ( ) )
invoke_softirq ( ) ;
2007-02-16 12:28:03 +03:00
# ifdef CONFIG_NO_HZ
/* Make sure that timer wheel updates are propagated */
if ( ! in_interrupt ( ) & & idle_cpu ( smp_processor_id ( ) ) & & ! need_resched ( ) )
tick_nohz_stop_sched_tick ( ) ;
2008-02-29 20:46:50 +03:00
rcu_irq_exit ( ) ;
2007-02-16 12:28:03 +03:00
# endif
2005-04-17 02:20:36 +04:00
preempt_enable_no_resched ( ) ;
}
/*
* This function must run with irqs disabled !
*/
2008-02-08 15:19:53 +03:00
inline void raise_softirq_irqoff ( unsigned int nr )
2005-04-17 02:20:36 +04:00
{
__raise_softirq_irqoff ( nr ) ;
/*
* If we ' re in an interrupt or softirq , we ' re done
* ( this also catches softirq - disabled code ) . We will
* actually run the softirq once we return from
* the irq or softirq .
*
* Otherwise we wake up ksoftirqd to make sure we
* schedule the softirq soon .
*/
if ( ! in_interrupt ( ) )
wakeup_softirqd ( ) ;
}
2008-02-08 15:19:53 +03:00
void raise_softirq ( unsigned int nr )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
local_irq_save ( flags ) ;
raise_softirq_irqoff ( nr ) ;
local_irq_restore ( flags ) ;
}
void open_softirq ( int nr , void ( * action ) ( struct softirq_action * ) , void * data )
{
softirq_vec [ nr ] . data = data ;
softirq_vec [ nr ] . action = action ;
}
/* Tasklets */
struct tasklet_head
{
2008-03-05 02:23:25 +03:00
struct tasklet_struct * head ;
struct tasklet_struct * * tail ;
2005-04-17 02:20:36 +04:00
} ;
/* Some compilers disobey section attribute on statics when not
initialized - - RR */
static DEFINE_PER_CPU ( struct tasklet_head , tasklet_vec ) = { NULL } ;
static DEFINE_PER_CPU ( struct tasklet_head , tasklet_hi_vec ) = { NULL } ;
2008-02-08 15:19:53 +03:00
void __tasklet_schedule ( struct tasklet_struct * t )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
local_irq_save ( flags ) ;
2008-03-05 02:23:25 +03:00
t - > next = NULL ;
* __get_cpu_var ( tasklet_vec ) . tail = t ;
__get_cpu_var ( tasklet_vec ) . tail = & ( t - > next ) ;
2005-04-17 02:20:36 +04:00
raise_softirq_irqoff ( TASKLET_SOFTIRQ ) ;
local_irq_restore ( flags ) ;
}
EXPORT_SYMBOL ( __tasklet_schedule ) ;
2008-02-08 15:19:53 +03:00
void __tasklet_hi_schedule ( struct tasklet_struct * t )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
local_irq_save ( flags ) ;
2008-03-05 02:23:25 +03:00
t - > next = NULL ;
* __get_cpu_var ( tasklet_hi_vec ) . tail = t ;
__get_cpu_var ( tasklet_hi_vec ) . tail = & ( t - > next ) ;
2005-04-17 02:20:36 +04:00
raise_softirq_irqoff ( HI_SOFTIRQ ) ;
local_irq_restore ( flags ) ;
}
EXPORT_SYMBOL ( __tasklet_hi_schedule ) ;
static void tasklet_action ( struct softirq_action * a )
{
struct tasklet_struct * list ;
local_irq_disable ( ) ;
2008-03-05 02:23:25 +03:00
list = __get_cpu_var ( tasklet_vec ) . head ;
__get_cpu_var ( tasklet_vec ) . head = NULL ;
__get_cpu_var ( tasklet_vec ) . tail = & __get_cpu_var ( tasklet_vec ) . head ;
2005-04-17 02:20:36 +04:00
local_irq_enable ( ) ;
while ( list ) {
struct tasklet_struct * t = list ;
list = list - > next ;
if ( tasklet_trylock ( t ) ) {
if ( ! atomic_read ( & t - > count ) ) {
if ( ! test_and_clear_bit ( TASKLET_STATE_SCHED , & t - > state ) )
BUG ( ) ;
t - > func ( t - > data ) ;
tasklet_unlock ( t ) ;
continue ;
}
tasklet_unlock ( t ) ;
}
local_irq_disable ( ) ;
2008-03-05 02:23:25 +03:00
t - > next = NULL ;
* __get_cpu_var ( tasklet_vec ) . tail = t ;
__get_cpu_var ( tasklet_vec ) . tail = & ( t - > next ) ;
2005-04-17 02:20:36 +04:00
__raise_softirq_irqoff ( TASKLET_SOFTIRQ ) ;
local_irq_enable ( ) ;
}
}
static void tasklet_hi_action ( struct softirq_action * a )
{
struct tasklet_struct * list ;
local_irq_disable ( ) ;
2008-03-05 02:23:25 +03:00
list = __get_cpu_var ( tasklet_hi_vec ) . head ;
__get_cpu_var ( tasklet_hi_vec ) . head = NULL ;
__get_cpu_var ( tasklet_hi_vec ) . tail = & __get_cpu_var ( tasklet_hi_vec ) . head ;
2005-04-17 02:20:36 +04:00
local_irq_enable ( ) ;
while ( list ) {
struct tasklet_struct * t = list ;
list = list - > next ;
if ( tasklet_trylock ( t ) ) {
if ( ! atomic_read ( & t - > count ) ) {
if ( ! test_and_clear_bit ( TASKLET_STATE_SCHED , & t - > state ) )
BUG ( ) ;
t - > func ( t - > data ) ;
tasklet_unlock ( t ) ;
continue ;
}
tasklet_unlock ( t ) ;
}
local_irq_disable ( ) ;
2008-03-05 02:23:25 +03:00
t - > next = NULL ;
* __get_cpu_var ( tasklet_hi_vec ) . tail = t ;
__get_cpu_var ( tasklet_hi_vec ) . tail = & ( t - > next ) ;
2005-04-17 02:20:36 +04:00
__raise_softirq_irqoff ( HI_SOFTIRQ ) ;
local_irq_enable ( ) ;
}
}
void tasklet_init ( struct tasklet_struct * t ,
void ( * func ) ( unsigned long ) , unsigned long data )
{
t - > next = NULL ;
t - > state = 0 ;
atomic_set ( & t - > count , 0 ) ;
t - > func = func ;
t - > data = data ;
}
EXPORT_SYMBOL ( tasklet_init ) ;
void tasklet_kill ( struct tasklet_struct * t )
{
if ( in_interrupt ( ) )
printk ( " Attempt to kill tasklet from interrupt \n " ) ;
while ( test_and_set_bit ( TASKLET_STATE_SCHED , & t - > state ) ) {
do
yield ( ) ;
while ( test_bit ( TASKLET_STATE_SCHED , & t - > state ) ) ;
}
tasklet_unlock_wait ( t ) ;
clear_bit ( TASKLET_STATE_SCHED , & t - > state ) ;
}
EXPORT_SYMBOL ( tasklet_kill ) ;
void __init softirq_init ( void )
{
2008-03-05 02:23:25 +03:00
int cpu ;
for_each_possible_cpu ( cpu ) {
per_cpu ( tasklet_vec , cpu ) . tail =
& per_cpu ( tasklet_vec , cpu ) . head ;
per_cpu ( tasklet_hi_vec , cpu ) . tail =
& per_cpu ( tasklet_hi_vec , cpu ) . head ;
}
2005-04-17 02:20:36 +04:00
open_softirq ( TASKLET_SOFTIRQ , tasklet_action , NULL ) ;
open_softirq ( HI_SOFTIRQ , tasklet_hi_action , NULL ) ;
}
static int ksoftirqd ( void * __bind_cpu )
{
set_current_state ( TASK_INTERRUPTIBLE ) ;
while ( ! kthread_should_stop ( ) ) {
preempt_disable ( ) ;
if ( ! local_softirq_pending ( ) ) {
preempt_enable_no_resched ( ) ;
schedule ( ) ;
preempt_disable ( ) ;
}
__set_current_state ( TASK_RUNNING ) ;
while ( local_softirq_pending ( ) ) {
/* Preempt disable stops cpu going offline.
If already offline , we ' ll be on wrong CPU :
don ' t process */
if ( cpu_is_offline ( ( long ) __bind_cpu ) )
goto wait_to_die ;
do_softirq ( ) ;
preempt_enable_no_resched ( ) ;
cond_resched ( ) ;
preempt_disable ( ) ;
}
preempt_enable ( ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
}
__set_current_state ( TASK_RUNNING ) ;
return 0 ;
wait_to_die :
preempt_enable ( ) ;
/* Wait for kthread_stop */
set_current_state ( TASK_INTERRUPTIBLE ) ;
while ( ! kthread_should_stop ( ) ) {
schedule ( ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
}
__set_current_state ( TASK_RUNNING ) ;
return 0 ;
}
# ifdef CONFIG_HOTPLUG_CPU
/*
* tasklet_kill_immediate is called to remove a tasklet which can already be
* scheduled for execution on @ cpu .
*
* Unlike tasklet_kill , this function removes the tasklet
* _immediately_ , even if the tasklet is in TASKLET_STATE_SCHED state .
*
* When this function is called , @ cpu must be in the CPU_DEAD state .
*/
void tasklet_kill_immediate ( struct tasklet_struct * t , unsigned int cpu )
{
struct tasklet_struct * * i ;
BUG_ON ( cpu_online ( cpu ) ) ;
BUG_ON ( test_bit ( TASKLET_STATE_RUN , & t - > state ) ) ;
if ( ! test_bit ( TASKLET_STATE_SCHED , & t - > state ) )
return ;
/* CPU is dead, so no lock needed. */
2008-03-05 02:23:25 +03:00
for ( i = & per_cpu ( tasklet_vec , cpu ) . head ; * i ; i = & ( * i ) - > next ) {
2005-04-17 02:20:36 +04:00
if ( * i = = t ) {
* i = t - > next ;
2008-03-05 02:23:25 +03:00
/* If this was the tail element, move the tail ptr */
if ( * i = = NULL )
per_cpu ( tasklet_vec , cpu ) . tail = i ;
2005-04-17 02:20:36 +04:00
return ;
}
}
BUG ( ) ;
}
static void takeover_tasklets ( unsigned int cpu )
{
/* CPU is dead, so no lock needed. */
local_irq_disable ( ) ;
/* Find end, append list for that CPU. */
2008-05-01 15:34:23 +04:00
if ( & per_cpu ( tasklet_vec , cpu ) . head ! = per_cpu ( tasklet_vec , cpu ) . tail ) {
* ( __get_cpu_var ( tasklet_vec ) . tail ) = per_cpu ( tasklet_vec , cpu ) . head ;
__get_cpu_var ( tasklet_vec ) . tail = per_cpu ( tasklet_vec , cpu ) . tail ;
per_cpu ( tasklet_vec , cpu ) . head = NULL ;
per_cpu ( tasklet_vec , cpu ) . tail = & per_cpu ( tasklet_vec , cpu ) . head ;
}
2005-04-17 02:20:36 +04:00
raise_softirq_irqoff ( TASKLET_SOFTIRQ ) ;
2008-05-01 15:34:23 +04:00
if ( & per_cpu ( tasklet_hi_vec , cpu ) . head ! = per_cpu ( tasklet_hi_vec , cpu ) . tail ) {
* __get_cpu_var ( tasklet_hi_vec ) . tail = per_cpu ( tasklet_hi_vec , cpu ) . head ;
__get_cpu_var ( tasklet_hi_vec ) . tail = per_cpu ( tasklet_hi_vec , cpu ) . tail ;
per_cpu ( tasklet_hi_vec , cpu ) . head = NULL ;
per_cpu ( tasklet_hi_vec , cpu ) . tail = & per_cpu ( tasklet_hi_vec , cpu ) . head ;
}
2005-04-17 02:20:36 +04:00
raise_softirq_irqoff ( HI_SOFTIRQ ) ;
local_irq_enable ( ) ;
}
# endif /* CONFIG_HOTPLUG_CPU */
2006-07-30 14:03:35 +04:00
static int __cpuinit cpu_callback ( struct notifier_block * nfb ,
2005-04-17 02:20:36 +04:00
unsigned long action ,
void * hcpu )
{
int hotcpu = ( unsigned long ) hcpu ;
struct task_struct * p ;
switch ( action ) {
case CPU_UP_PREPARE :
2007-05-09 13:35:10 +04:00
case CPU_UP_PREPARE_FROZEN :
2005-04-17 02:20:36 +04:00
p = kthread_create ( ksoftirqd , hcpu , " ksoftirqd/%d " , hotcpu ) ;
if ( IS_ERR ( p ) ) {
printk ( " ksoftirqd for %i failed \n " , hotcpu ) ;
return NOTIFY_BAD ;
}
kthread_bind ( p , hotcpu ) ;
per_cpu ( ksoftirqd , hotcpu ) = p ;
break ;
case CPU_ONLINE :
2007-05-09 13:35:10 +04:00
case CPU_ONLINE_FROZEN :
2005-04-17 02:20:36 +04:00
wake_up_process ( per_cpu ( ksoftirqd , hotcpu ) ) ;
break ;
# ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED :
2007-05-09 13:35:10 +04:00
case CPU_UP_CANCELED_FROZEN :
2006-06-25 16:49:10 +04:00
if ( ! per_cpu ( ksoftirqd , hotcpu ) )
break ;
2005-04-17 02:20:36 +04:00
/* Unbind so it can run. Fall thru. */
2005-11-07 11:58:38 +03:00
kthread_bind ( per_cpu ( ksoftirqd , hotcpu ) ,
any_online_cpu ( cpu_online_map ) ) ;
2005-04-17 02:20:36 +04:00
case CPU_DEAD :
2007-07-16 10:39:48 +04:00
case CPU_DEAD_FROZEN : {
struct sched_param param = { . sched_priority = MAX_RT_PRIO - 1 } ;
2005-04-17 02:20:36 +04:00
p = per_cpu ( ksoftirqd , hotcpu ) ;
per_cpu ( ksoftirqd , hotcpu ) = NULL ;
2007-07-16 10:39:48 +04:00
sched_setscheduler ( p , SCHED_FIFO , & param ) ;
2005-04-17 02:20:36 +04:00
kthread_stop ( p ) ;
takeover_tasklets ( hotcpu ) ;
break ;
2007-07-16 10:39:48 +04:00
}
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_HOTPLUG_CPU */
}
return NOTIFY_OK ;
}
2006-07-30 14:03:35 +04:00
static struct notifier_block __cpuinitdata cpu_nfb = {
2005-04-17 02:20:36 +04:00
. notifier_call = cpu_callback
} ;
__init int spawn_ksoftirqd ( void )
{
void * cpu = ( void * ) ( long ) smp_processor_id ( ) ;
2006-09-29 13:00:22 +04:00
int err = cpu_callback ( & cpu_nfb , CPU_UP_PREPARE , cpu ) ;
BUG_ON ( err = = NOTIFY_BAD ) ;
2005-04-17 02:20:36 +04:00
cpu_callback ( & cpu_nfb , CPU_ONLINE , cpu ) ;
register_cpu_notifier ( & cpu_nfb ) ;
return 0 ;
}
2006-03-22 11:08:16 +03:00
# ifdef CONFIG_SMP
/*
* Call a function on all processors
*/
int on_each_cpu ( void ( * func ) ( void * info ) , void * info , int retry , int wait )
{
int ret = 0 ;
preempt_disable ( ) ;
2008-06-06 13:18:06 +04:00
ret = smp_call_function ( func , info , wait ) ;
2006-03-22 11:08:16 +03:00
local_irq_disable ( ) ;
func ( info ) ;
local_irq_enable ( ) ;
preempt_enable ( ) ;
return ret ;
}
EXPORT_SYMBOL ( on_each_cpu ) ;
# endif