2009-01-07 19:45:46 +03:00
/*
* async . c : Asynchronous function calls for boot performance
*
* ( C ) Copyright 2009 Intel Corporation
* Author : Arjan van de Ven < arjan @ linux . intel . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; version 2
* of the License .
*/
/*
Goals and Theory of Operation
The primary goal of this feature is to reduce the kernel boot time ,
by doing various independent hardware delays and discovery operations
decoupled and not strictly serialized .
More specifically , the asynchronous function call concept allows
certain operations ( primarily during system boot ) to happen
asynchronously , out of order , while these operations still
have their externally visible parts happen sequentially and in - order .
( not unlike how out - of - order CPUs retire their instructions in order )
Key to the asynchronous function call implementation is the concept of
a " sequence cookie " ( which , although it has an abstracted type , can be
thought of as a monotonically incrementing number ) .
The async core will assign each scheduled event such a sequence cookie and
pass this to the called functions .
The asynchronously called function should before doing a globally visible
operation , such as registering device numbers , call the
async_synchronize_cookie ( ) function and pass in its own cookie . The
async_synchronize_cookie ( ) function will make sure that all asynchronous
operations that were scheduled prior to the operation corresponding with the
cookie have completed .
Subsystem / driver initialization code that scheduled asynchronous probe
functions , but which shares global resources with other drivers / subsystems
that do not use the asynchronous call feature , need to do a full
synchronization with the async_synchronize_full ( ) function , before returning
from their init function . This is to maintain strict ordering between the
asynchronous and synchronous parts of the kernel .
*/
# include <linux/async.h>
2009-03-15 21:11:44 +03:00
# include <linux/bug.h>
2009-01-07 19:45:46 +03:00
# include <linux/module.h>
# include <linux/wait.h>
# include <linux/sched.h>
# include <linux/init.h>
# include <linux/kthread.h>
2009-01-19 15:45:31 +03:00
# include <linux/delay.h>
2009-01-07 19:45:46 +03:00
# include <asm/atomic.h>
static async_cookie_t next_cookie = 1 ;
# define MAX_THREADS 256
# define MAX_WORK 32768
static LIST_HEAD ( async_pending ) ;
static LIST_HEAD ( async_running ) ;
static DEFINE_SPINLOCK ( async_lock ) ;
2009-01-10 00:23:45 +03:00
static int async_enabled = 0 ;
2009-01-07 19:45:46 +03:00
struct async_entry {
struct list_head list ;
async_cookie_t cookie ;
async_func_ptr * func ;
void * data ;
struct list_head * running ;
} ;
static DECLARE_WAIT_QUEUE_HEAD ( async_done ) ;
static DECLARE_WAIT_QUEUE_HEAD ( async_new ) ;
static atomic_t entry_count ;
static atomic_t thread_count ;
extern int initcall_debug ;
/*
* MUST be called with the lock held !
*/
static async_cookie_t __lowest_in_progress ( struct list_head * running )
{
struct async_entry * entry ;
2009-05-25 00:03:43 +04:00
2009-01-11 18:35:01 +03:00
if ( ! list_empty ( running ) ) {
entry = list_first_entry ( running ,
2009-01-07 19:45:46 +03:00
struct async_entry , list ) ;
2009-06-08 23:31:53 +04:00
return entry - > cookie ;
2009-01-07 19:45:46 +03:00
}
2009-06-08 23:31:53 +04:00
list_for_each_entry ( entry , & async_pending , list )
if ( entry - > running = = running )
return entry - > cookie ;
2009-05-25 00:03:43 +04:00
2009-06-08 23:31:53 +04:00
return next_cookie ; /* "infinity" value */
2009-01-07 19:45:46 +03:00
}
2009-01-11 18:35:01 +03:00
static async_cookie_t lowest_in_progress ( struct list_head * running )
{
unsigned long flags ;
async_cookie_t ret ;
spin_lock_irqsave ( & async_lock , flags ) ;
ret = __lowest_in_progress ( running ) ;
spin_unlock_irqrestore ( & async_lock , flags ) ;
return ret ;
}
2009-01-07 19:45:46 +03:00
/*
* pick the first pending entry and run it
*/
static void run_one_entry ( void )
{
unsigned long flags ;
struct async_entry * entry ;
ktime_t calltime , delta , rettime ;
/* 1) pick one task from the pending queue */
spin_lock_irqsave ( & async_lock , flags ) ;
if ( list_empty ( & async_pending ) )
goto out ;
entry = list_first_entry ( & async_pending , struct async_entry , list ) ;
/* 2) move it to the running queue */
2009-02-02 15:24:34 +03:00
list_move_tail ( & entry - > list , entry - > running ) ;
2009-01-07 19:45:46 +03:00
spin_unlock_irqrestore ( & async_lock , flags ) ;
/* 3) run it (and print duration)*/
2009-01-07 20:28:53 +03:00
if ( initcall_debug & & system_state = = SYSTEM_BOOTING ) {
2009-02-05 02:11:58 +03:00
printk ( " calling %lli_%pF @ %i \n " , ( long long ) entry - > cookie ,
entry - > func , task_pid_nr ( current ) ) ;
2009-01-07 19:45:46 +03:00
calltime = ktime_get ( ) ;
}
entry - > func ( entry - > data , entry - > cookie ) ;
2009-01-07 20:28:53 +03:00
if ( initcall_debug & & system_state = = SYSTEM_BOOTING ) {
2009-01-07 19:45:46 +03:00
rettime = ktime_get ( ) ;
delta = ktime_sub ( rettime , calltime ) ;
2009-02-05 02:11:58 +03:00
printk ( " initcall %lli_%pF returned 0 after %lld usecs \n " ,
( long long ) entry - > cookie ,
entry - > func ,
( long long ) ktime_to_ns ( delta ) > > 10 ) ;
2009-01-07 19:45:46 +03:00
}
/* 4) remove it from the running queue */
spin_lock_irqsave ( & async_lock , flags ) ;
list_del ( & entry - > list ) ;
/* 5) free the entry */
kfree ( entry ) ;
atomic_dec ( & entry_count ) ;
spin_unlock_irqrestore ( & async_lock , flags ) ;
/* 6) wake up any waiters. */
wake_up ( & async_done ) ;
return ;
out :
spin_unlock_irqrestore ( & async_lock , flags ) ;
}
static async_cookie_t __async_schedule ( async_func_ptr * ptr , void * data , struct list_head * running )
{
struct async_entry * entry ;
unsigned long flags ;
async_cookie_t newcookie ;
/* allow irq-off callers */
entry = kzalloc ( sizeof ( struct async_entry ) , GFP_ATOMIC ) ;
/*
* If we ' re out of memory or if there ' s too much work
* pending already , we execute synchronously .
*/
2009-01-10 00:23:45 +03:00
if ( ! async_enabled | | ! entry | | atomic_read ( & entry_count ) > MAX_WORK ) {
2009-01-07 19:45:46 +03:00
kfree ( entry ) ;
spin_lock_irqsave ( & async_lock , flags ) ;
newcookie = next_cookie + + ;
spin_unlock_irqrestore ( & async_lock , flags ) ;
/* low on memory.. run synchronously */
ptr ( data , newcookie ) ;
return newcookie ;
}
entry - > func = ptr ;
entry - > data = data ;
entry - > running = running ;
spin_lock_irqsave ( & async_lock , flags ) ;
newcookie = entry - > cookie = next_cookie + + ;
list_add_tail ( & entry - > list , & async_pending ) ;
atomic_inc ( & entry_count ) ;
spin_unlock_irqrestore ( & async_lock , flags ) ;
wake_up ( & async_new ) ;
return newcookie ;
}
2009-01-19 15:45:33 +03:00
/**
* async_schedule - schedule a function for asynchronous execution
* @ ptr : function to execute asynchronously
* @ data : data pointer to pass to the function
*
* Returns an async_cookie_t that may be used for checkpointing later .
* Note : This function may be called from atomic or non - atomic contexts .
*/
2009-01-07 19:45:46 +03:00
async_cookie_t async_schedule ( async_func_ptr * ptr , void * data )
{
2009-01-19 15:45:28 +03:00
return __async_schedule ( ptr , data , & async_running ) ;
2009-01-07 19:45:46 +03:00
}
EXPORT_SYMBOL_GPL ( async_schedule ) ;
2009-01-19 15:45:33 +03:00
/**
2009-01-20 17:31:31 +03:00
* async_schedule_domain - schedule a function for asynchronous execution within a certain domain
2009-01-19 15:45:33 +03:00
* @ ptr : function to execute asynchronously
* @ data : data pointer to pass to the function
2009-01-20 17:31:31 +03:00
* @ running : running list for the domain
2009-01-19 15:45:33 +03:00
*
* Returns an async_cookie_t that may be used for checkpointing later .
2009-01-20 17:31:31 +03:00
* @ running may be used in the async_synchronize_ * _domain ( ) functions
* to wait within a certain synchronization domain rather than globally .
* A synchronization domain is specified via the running queue @ running to use .
2009-01-19 15:45:33 +03:00
* Note : This function may be called from atomic or non - atomic contexts .
*/
2009-01-20 17:31:31 +03:00
async_cookie_t async_schedule_domain ( async_func_ptr * ptr , void * data ,
struct list_head * running )
2009-01-07 19:45:46 +03:00
{
return __async_schedule ( ptr , data , running ) ;
}
2009-01-20 17:31:31 +03:00
EXPORT_SYMBOL_GPL ( async_schedule_domain ) ;
2009-01-07 19:45:46 +03:00
2009-01-19 15:45:33 +03:00
/**
* async_synchronize_full - synchronize all asynchronous function calls
*
* This function waits until all asynchronous function calls have been done .
*/
2009-01-07 19:45:46 +03:00
void async_synchronize_full ( void )
{
2009-01-08 23:35:11 +03:00
do {
async_synchronize_cookie ( next_cookie ) ;
} while ( ! list_empty ( & async_running ) | | ! list_empty ( & async_pending ) ) ;
2009-01-07 19:45:46 +03:00
}
EXPORT_SYMBOL_GPL ( async_synchronize_full ) ;
2009-01-19 15:45:33 +03:00
/**
2009-01-20 17:31:31 +03:00
* async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
2009-01-19 15:45:33 +03:00
* @ list : running list to synchronize on
*
2009-01-20 17:31:31 +03:00
* This function waits until all asynchronous function calls for the
* synchronization domain specified by the running list @ list have been done .
2009-01-19 15:45:33 +03:00
*/
2009-01-20 17:31:31 +03:00
void async_synchronize_full_domain ( struct list_head * list )
2009-01-07 19:45:46 +03:00
{
2009-01-20 17:31:31 +03:00
async_synchronize_cookie_domain ( next_cookie , list ) ;
2009-01-07 19:45:46 +03:00
}
2009-01-20 17:31:31 +03:00
EXPORT_SYMBOL_GPL ( async_synchronize_full_domain ) ;
2009-01-07 19:45:46 +03:00
2009-01-19 15:45:33 +03:00
/**
2009-01-20 17:31:31 +03:00
* async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
2009-01-19 15:45:33 +03:00
* @ cookie : async_cookie_t to use as checkpoint
* @ running : running list to synchronize on
*
2009-01-20 17:31:31 +03:00
* This function waits until all asynchronous function calls for the
* synchronization domain specified by the running list @ list submitted
* prior to @ cookie have been done .
2009-01-19 15:45:33 +03:00
*/
2009-01-20 17:31:31 +03:00
void async_synchronize_cookie_domain ( async_cookie_t cookie ,
struct list_head * running )
2009-01-07 19:45:46 +03:00
{
ktime_t starttime , delta , endtime ;
2009-01-07 20:28:53 +03:00
if ( initcall_debug & & system_state = = SYSTEM_BOOTING ) {
2009-01-07 19:45:46 +03:00
printk ( " async_waiting @ %i \n " , task_pid_nr ( current ) ) ;
starttime = ktime_get ( ) ;
}
2009-01-11 18:35:01 +03:00
wait_event ( async_done , lowest_in_progress ( running ) > = cookie ) ;
2009-01-07 19:45:46 +03:00
2009-01-07 20:28:53 +03:00
if ( initcall_debug & & system_state = = SYSTEM_BOOTING ) {
2009-01-07 19:45:46 +03:00
endtime = ktime_get ( ) ;
delta = ktime_sub ( endtime , starttime ) ;
printk ( " async_continuing @ %i after %lli usec \n " ,
2009-02-05 02:11:58 +03:00
task_pid_nr ( current ) ,
( long long ) ktime_to_ns ( delta ) > > 10 ) ;
2009-01-07 19:45:46 +03:00
}
}
2009-01-20 17:31:31 +03:00
EXPORT_SYMBOL_GPL ( async_synchronize_cookie_domain ) ;
2009-01-07 19:45:46 +03:00
2009-01-19 15:45:33 +03:00
/**
* async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
* @ cookie : async_cookie_t to use as checkpoint
*
* This function waits until all asynchronous function calls prior to @ cookie
* have been done .
*/
2009-01-07 19:45:46 +03:00
void async_synchronize_cookie ( async_cookie_t cookie )
{
2009-01-20 17:31:31 +03:00
async_synchronize_cookie_domain ( cookie , & async_running ) ;
2009-01-07 19:45:46 +03:00
}
EXPORT_SYMBOL_GPL ( async_synchronize_cookie ) ;
static int async_thread ( void * unused )
{
DECLARE_WAITQUEUE ( wq , current ) ;
add_wait_queue ( & async_new , & wq ) ;
while ( ! kthread_should_stop ( ) ) {
int ret = HZ ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
/*
* check the list head without lock . . false positives
* are dealt with inside run_one_entry ( ) while holding
* the lock .
*/
rmb ( ) ;
if ( ! list_empty ( & async_pending ) )
run_one_entry ( ) ;
else
ret = schedule_timeout ( HZ ) ;
if ( ret = = 0 ) {
/*
* we timed out , this means we as thread are redundant .
* we sign off and die , but we to avoid any races there
* is a last - straw check to see if work snuck in .
*/
atomic_dec ( & thread_count ) ;
wmb ( ) ; /* manager must see our departure first */
if ( list_empty ( & async_pending ) )
break ;
/*
* woops work came in between us timing out and us
* signing off ; we need to stay alive and keep working .
*/
atomic_inc ( & thread_count ) ;
}
}
remove_wait_queue ( & async_new , & wq ) ;
return 0 ;
}
static int async_manager_thread ( void * unused )
{
DECLARE_WAITQUEUE ( wq , current ) ;
add_wait_queue ( & async_new , & wq ) ;
while ( ! kthread_should_stop ( ) ) {
int tc , ec ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
tc = atomic_read ( & thread_count ) ;
rmb ( ) ;
ec = atomic_read ( & entry_count ) ;
while ( tc < ec & & tc < MAX_THREADS ) {
2009-01-19 15:45:31 +03:00
if ( IS_ERR ( kthread_run ( async_thread , NULL , " async/%i " ,
tc ) ) ) {
msleep ( 100 ) ;
continue ;
}
2009-01-07 19:45:46 +03:00
atomic_inc ( & thread_count ) ;
tc + + ;
}
schedule ( ) ;
}
remove_wait_queue ( & async_new , & wq ) ;
return 0 ;
}
static int __init async_init ( void )
{
2009-03-15 21:11:44 +03:00
async_enabled =
! IS_ERR ( kthread_run ( async_manager_thread , NULL , " async/mgr " ) ) ;
2009-01-07 19:45:46 +03:00
2009-03-15 21:11:44 +03:00
WARN_ON ( ! async_enabled ) ;
return 0 ;
2009-01-10 00:23:45 +03:00
}
2009-01-07 19:45:46 +03:00
core_initcall ( async_init ) ;