2012-12-18 04:01:36 +04:00
# include <linux/atomic.h>
2012-12-18 04:01:32 +04:00
# include <linux/rwsem.h>
# include <linux/percpu.h>
# include <linux/wait.h>
2012-12-18 04:01:38 +04:00
# include <linux/lockdep.h>
2012-12-18 04:01:32 +04:00
# include <linux/percpu-rwsem.h>
# include <linux/rcupdate.h>
# include <linux/sched.h>
# include <linux/errno.h>
2012-12-18 04:01:38 +04:00
int __percpu_init_rwsem ( struct percpu_rw_semaphore * brw ,
const char * name , struct lock_class_key * rwsem_key )
2012-12-18 04:01:32 +04:00
{
brw - > fast_read_ctr = alloc_percpu ( int ) ;
if ( unlikely ( ! brw - > fast_read_ctr ) )
return - ENOMEM ;
2012-12-18 04:01:38 +04:00
/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
__init_rwsem ( & brw - > rw_sem , name , rwsem_key ) ;
2015-08-21 20:42:57 +03:00
rcu_sync_init ( & brw - > rss , RCU_SCHED_SYNC ) ;
2012-12-18 04:01:32 +04:00
atomic_set ( & brw - > slow_read_ctr , 0 ) ;
init_waitqueue_head ( & brw - > write_waitq ) ;
return 0 ;
}
2015-09-01 06:21:59 +03:00
EXPORT_SYMBOL_GPL ( __percpu_init_rwsem ) ;
2012-12-18 04:01:32 +04:00
void percpu_free_rwsem ( struct percpu_rw_semaphore * brw )
{
2015-08-21 20:42:55 +03:00
/*
* XXX : temporary kludge . The error path in alloc_super ( )
* assumes that percpu_free_rwsem ( ) is safe after kzalloc ( ) .
*/
if ( ! brw - > fast_read_ctr )
return ;
2015-08-21 20:42:57 +03:00
rcu_sync_dtor ( & brw - > rss ) ;
2012-12-18 04:01:32 +04:00
free_percpu ( brw - > fast_read_ctr ) ;
brw - > fast_read_ctr = NULL ; /* catch use after free bugs */
}
/*
2015-08-21 20:43:00 +03:00
* This is the fast - path for down_read / up_read . If it succeeds we rely
* on the barriers provided by rcu_sync_enter / exit ; see the comments in
* percpu_down_write ( ) and percpu_up_write ( ) .
2012-12-18 04:01:32 +04:00
*
* If this helper fails the callers rely on the normal rw_semaphore and
* atomic_dec_and_test ( ) , so in this case we have the necessary barriers .
*/
static bool update_fast_ctr ( struct percpu_rw_semaphore * brw , unsigned int val )
{
2015-08-21 20:42:57 +03:00
bool success ;
2012-12-18 04:01:32 +04:00
preempt_disable ( ) ;
2015-08-21 20:42:57 +03:00
success = rcu_sync_is_idle ( & brw - > rss ) ;
if ( likely ( success ) )
2012-12-18 04:01:32 +04:00
__this_cpu_add ( * brw - > fast_read_ctr , val ) ;
preempt_enable ( ) ;
return success ;
}
/*
* Like the normal down_read ( ) this is not recursive , the writer can
* come after the first percpu_down_read ( ) and create the deadlock .
2012-12-18 04:01:38 +04:00
*
* Note : returns with lock_is_held ( brw - > rw_sem ) = = T for lockdep ,
* percpu_up_read ( ) does rwsem_release ( ) . This pairs with the usage
* of - > rw_sem in percpu_down / up_write ( ) .
2012-12-18 04:01:32 +04:00
*/
void percpu_down_read ( struct percpu_rw_semaphore * brw )
{
2012-12-18 04:01:38 +04:00
might_sleep ( ) ;
2015-08-21 20:43:03 +03:00
rwsem_acquire_read ( & brw - > rw_sem . dep_map , 0 , 0 , _RET_IP_ ) ;
if ( likely ( update_fast_ctr ( brw , + 1 ) ) )
2012-12-18 04:01:32 +04:00
return ;
2015-08-21 20:43:03 +03:00
/* Avoid rwsem_acquire_read() and rwsem_release() */
__down_read ( & brw - > rw_sem ) ;
2012-12-18 04:01:32 +04:00
atomic_inc ( & brw - > slow_read_ctr ) ;
2012-12-18 04:01:38 +04:00
__up_read ( & brw - > rw_sem ) ;
2012-12-18 04:01:32 +04:00
}
2015-09-01 06:21:59 +03:00
EXPORT_SYMBOL_GPL ( percpu_down_read ) ;
2012-12-18 04:01:32 +04:00
2015-07-21 18:45:57 +03:00
int percpu_down_read_trylock ( struct percpu_rw_semaphore * brw )
{
if ( unlikely ( ! update_fast_ctr ( brw , + 1 ) ) ) {
if ( ! __down_read_trylock ( & brw - > rw_sem ) )
return 0 ;
atomic_inc ( & brw - > slow_read_ctr ) ;
__up_read ( & brw - > rw_sem ) ;
}
rwsem_acquire_read ( & brw - > rw_sem . dep_map , 0 , 1 , _RET_IP_ ) ;
return 1 ;
}
2012-12-18 04:01:32 +04:00
void percpu_up_read ( struct percpu_rw_semaphore * brw )
{
2012-12-18 04:01:38 +04:00
rwsem_release ( & brw - > rw_sem . dep_map , 1 , _RET_IP_ ) ;
2012-12-18 04:01:32 +04:00
if ( likely ( update_fast_ctr ( brw , - 1 ) ) )
return ;
/* false-positive is possible but harmless */
if ( atomic_dec_and_test ( & brw - > slow_read_ctr ) )
wake_up_all ( & brw - > write_waitq ) ;
}
2015-09-01 06:21:59 +03:00
EXPORT_SYMBOL_GPL ( percpu_up_read ) ;
2012-12-18 04:01:32 +04:00
static int clear_fast_ctr ( struct percpu_rw_semaphore * brw )
{
unsigned int sum = 0 ;
int cpu ;
for_each_possible_cpu ( cpu ) {
sum + = per_cpu ( * brw - > fast_read_ctr , cpu ) ;
per_cpu ( * brw - > fast_read_ctr , cpu ) = 0 ;
}
return sum ;
}
void percpu_down_write ( struct percpu_rw_semaphore * brw )
{
/*
2015-08-21 20:43:00 +03:00
* Make rcu_sync_is_idle ( ) = = F and thus disable the fast - path in
* percpu_down_read ( ) and percpu_up_read ( ) , and wait for gp pass .
2012-12-18 04:01:32 +04:00
*
2015-08-21 20:43:00 +03:00
* The latter synchronises us with the preceding readers which used
* the fast - past , so we can not miss the result of __this_cpu_add ( )
* or anything else inside their criticial sections .
2012-12-18 04:01:32 +04:00
*/
2015-08-21 20:42:57 +03:00
rcu_sync_enter ( & brw - > rss ) ;
2012-12-18 04:01:32 +04:00
2012-12-18 04:01:36 +04:00
/* exclude other writers, and block the new readers completely */
down_write ( & brw - > rw_sem ) ;
2012-12-18 04:01:32 +04:00
/* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
atomic_add ( clear_fast_ctr ( brw ) , & brw - > slow_read_ctr ) ;
/* wait for all readers to complete their percpu_up_read() */
wait_event ( brw - > write_waitq , ! atomic_read ( & brw - > slow_read_ctr ) ) ;
}
2015-09-01 06:21:59 +03:00
EXPORT_SYMBOL_GPL ( percpu_down_write ) ;
2012-12-18 04:01:32 +04:00
void percpu_up_write ( struct percpu_rw_semaphore * brw )
{
2012-12-18 04:01:36 +04:00
/* release the lock, but the readers can't use the fast-path */
2012-12-18 04:01:32 +04:00
up_write ( & brw - > rw_sem ) ;
/*
2015-08-21 20:43:00 +03:00
* Enable the fast - path in percpu_down_read ( ) and percpu_up_read ( )
* but only after another gp pass ; this adds the necessary barrier
* to ensure the reader can ' t miss the changes done by us .
2012-12-18 04:01:32 +04:00
*/
2015-08-21 20:42:57 +03:00
rcu_sync_exit ( & brw - > rss ) ;
2012-12-18 04:01:32 +04:00
}
2015-09-01 06:21:59 +03:00
EXPORT_SYMBOL_GPL ( percpu_up_write ) ;