2005-04-17 02:20:36 +04:00
/*
* Read - Copy Update mechanism for mutual exclusion
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*
2008-01-25 23:08:24 +03:00
* Copyright IBM Corporation , 2001
2005-04-17 02:20:36 +04:00
*
* Authors : Dipankar Sarma < dipankar @ in . ibm . com >
* Manfred Spraul < manfred @ colorfullife . com >
*
* Based on the original work by Paul McKenney < paulmck @ us . ibm . com >
* and inputs from Rusty Russell , Andrea Arcangeli and Andi Kleen .
* Papers :
* http : //www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
* http : //lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
*
* For detailed explanation of Read - Copy Update mechanism see -
* http : //lse.sourceforge.net/locking/rcupdate.html
*
*/
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/spinlock.h>
# include <linux/smp.h>
# include <linux/interrupt.h>
# include <linux/sched.h>
# include <asm/atomic.h>
# include <linux/bitops.h>
# include <linux/completion.h>
# include <linux/percpu.h>
# include <linux/notifier.h>
# include <linux/cpu.h>
2006-03-23 14:00:19 +03:00
# include <linux/mutex.h>
2008-01-25 23:08:24 +03:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
2008-01-25 23:08:24 +03:00
struct rcu_synchronize {
struct rcu_head head ;
struct completion completion ;
2005-04-17 02:20:36 +04:00
} ;
2008-01-25 23:08:24 +03:00
static DEFINE_PER_CPU ( struct rcu_head , rcu_barrier_head ) = { NULL } ;
2006-03-08 08:55:33 +03:00
static atomic_t rcu_barrier_cpu_count ;
2006-03-23 14:00:19 +03:00
static DEFINE_MUTEX ( rcu_barrier_mutex ) ;
2006-03-08 08:55:33 +03:00
static struct completion rcu_barrier_completion ;
2008-02-14 02:03:15 +03:00
/*
* Awaken the corresponding synchronize_rcu ( ) instance now that a
* grace period has elapsed .
*/
2008-01-25 23:08:24 +03:00
static void wakeme_after_rcu ( struct rcu_head * head )
2006-03-08 08:55:33 +03:00
{
2008-01-25 23:08:24 +03:00
struct rcu_synchronize * rcu ;
rcu = container_of ( head , struct rcu_synchronize , head ) ;
complete ( & rcu - > completion ) ;
2006-03-08 08:55:33 +03:00
}
2005-04-17 02:20:36 +04:00
/**
2008-01-25 23:08:24 +03:00
* synchronize_rcu - wait until a grace period has elapsed .
2005-04-17 02:20:36 +04:00
*
2008-01-25 23:08:24 +03:00
* Control will return to the caller some time after a full grace
* period has elapsed , in other words after all currently executing RCU
2005-04-17 02:20:36 +04:00
* read - side critical sections have completed . RCU read - side critical
* sections are delimited by rcu_read_lock ( ) and rcu_read_unlock ( ) ,
* and may be nested .
*/
2008-01-25 23:08:24 +03:00
void synchronize_rcu ( void )
2005-04-17 02:20:36 +04:00
{
2008-01-25 23:08:24 +03:00
struct rcu_synchronize rcu ;
2005-04-17 02:20:36 +04:00
2008-01-25 23:08:24 +03:00
init_completion ( & rcu . completion ) ;
/* Will wake me after RCU finished */
call_rcu ( & rcu . head , wakeme_after_rcu ) ;
2005-10-31 02:03:12 +03:00
2008-01-25 23:08:24 +03:00
/* Wait for it */
wait_for_completion ( & rcu . completion ) ;
2006-06-27 13:54:04 +04:00
}
2008-01-25 23:08:24 +03:00
EXPORT_SYMBOL_GPL ( synchronize_rcu ) ;
2006-06-27 13:54:04 +04:00
2005-12-12 11:37:05 +03:00
static void rcu_barrier_callback ( struct rcu_head * notused )
{
if ( atomic_dec_and_test ( & rcu_barrier_cpu_count ) )
complete ( & rcu_barrier_completion ) ;
}
/*
* Called with preemption disabled , and from cross - cpu IRQ context .
*/
static void rcu_barrier_func ( void * notused )
{
int cpu = smp_processor_id ( ) ;
2008-01-25 23:08:24 +03:00
struct rcu_head * head = & per_cpu ( rcu_barrier_head , cpu ) ;
2005-12-12 11:37:05 +03:00
atomic_inc ( & rcu_barrier_cpu_count ) ;
call_rcu ( head , rcu_barrier_callback ) ;
}
/**
* rcu_barrier - Wait until all the in - flight RCUs are complete .
*/
void rcu_barrier ( void )
{
BUG_ON ( in_interrupt ( ) ) ;
2006-03-23 14:00:19 +03:00
/* Take cpucontrol mutex to protect against CPU hotplug */
mutex_lock ( & rcu_barrier_mutex ) ;
2005-12-12 11:37:05 +03:00
init_completion ( & rcu_barrier_completion ) ;
atomic_set ( & rcu_barrier_cpu_count , 0 ) ;
2008-01-25 23:08:24 +03:00
/*
* The queueing of callbacks in all CPUs must be atomic with
* respect to RCU , otherwise one CPU may queue a callback ,
* wait for a grace period , decrement barrier count and call
* complete ( ) , while other CPUs have not yet queued anything .
* So , we need to make sure that grace periods cannot complete
* until all the callbacks are queued .
*/
rcu_read_lock ( ) ;
2005-12-12 11:37:05 +03:00
on_each_cpu ( rcu_barrier_func , NULL , 0 , 1 ) ;
2008-01-25 23:08:24 +03:00
rcu_read_unlock ( ) ;
2005-12-12 11:37:05 +03:00
wait_for_completion ( & rcu_barrier_completion ) ;
2006-03-23 14:00:19 +03:00
mutex_unlock ( & rcu_barrier_mutex ) ;
2005-12-12 11:37:05 +03:00
}
EXPORT_SYMBOL_GPL ( rcu_barrier ) ;
2005-04-17 02:20:36 +04:00
void __init rcu_init ( void )
{
2008-01-25 23:08:24 +03:00
__rcu_init ( ) ;
2005-04-17 02:20:36 +04:00
}