2005-04-17 02:20:36 +04:00
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
* generic spinlock implementation
*
* Copyright ( c ) 2001 David Howells ( dhowells @ redhat . com ) .
* - Derived partially from idea by Andrea Arcangeli < andrea @ suse . de >
* - Derived also from comments by Linus
*/
# include <linux/rwsem.h>
# include <linux/sched.h>
2011-11-17 06:29:17 +04:00
# include <linux/export.h>
2005-04-17 02:20:36 +04:00
2013-05-07 17:45:49 +04:00
enum rwsem_waiter_type {
RWSEM_WAITING_FOR_WRITE ,
RWSEM_WAITING_FOR_READ
} ;
2005-04-17 02:20:36 +04:00
struct rwsem_waiter {
struct list_head list ;
struct task_struct * task ;
2013-05-07 17:45:49 +04:00
enum rwsem_waiter_type type ;
2005-04-17 02:20:36 +04:00
} ;
2009-12-15 05:00:21 +03:00
int rwsem_is_locked ( struct rw_semaphore * sem )
{
int ret = 1 ;
unsigned long flags ;
2010-02-24 11:54:54 +03:00
if ( raw_spin_trylock_irqsave ( & sem - > wait_lock , flags ) ) {
2014-07-16 16:54:55 +04:00
ret = ( sem - > count ! = 0 ) ;
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2009-12-15 05:00:21 +03:00
}
return ret ;
}
EXPORT_SYMBOL ( rwsem_is_locked ) ;
2005-04-17 02:20:36 +04:00
/*
* initialise the semaphore
*/
2006-07-03 11:24:53 +04:00
void __init_rwsem ( struct rw_semaphore * sem , const char * name ,
struct lock_class_key * key )
2005-04-17 02:20:36 +04:00
{
2006-07-03 11:24:53 +04:00
# ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore :
*/
debug_check_no_locks_freed ( ( void * ) sem , sizeof ( * sem ) ) ;
2006-10-11 09:45:14 +04:00
lockdep_init_map ( & sem - > dep_map , name , key , 0 ) ;
2006-07-03 11:24:53 +04:00
# endif
2014-07-16 16:54:55 +04:00
sem - > count = 0 ;
2010-02-24 11:54:54 +03:00
raw_spin_lock_init ( & sem - > wait_lock ) ;
2005-04-17 02:20:36 +04:00
INIT_LIST_HEAD ( & sem - > wait_list ) ;
}
2009-12-15 05:00:20 +03:00
EXPORT_SYMBOL ( __init_rwsem ) ;
2005-04-17 02:20:36 +04:00
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here , then :
* - the ' active count ' _reached_ zero
* - the ' waiting count ' is non - zero
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if wakewrite is non - zero
*/
static inline struct rw_semaphore *
__rwsem_do_wake ( struct rw_semaphore * sem , int wakewrite )
{
struct rwsem_waiter * waiter ;
struct task_struct * tsk ;
int woken ;
waiter = list_entry ( sem - > wait_list . next , struct rwsem_waiter , list ) ;
2013-05-07 17:45:49 +04:00
if ( waiter - > type = = RWSEM_WAITING_FOR_WRITE ) {
2013-05-07 17:45:58 +04:00
if ( wakewrite )
/* Wake up a writer. Note that we do not grant it the
* lock - it will have to acquire it when it runs . */
wake_up_process ( waiter - > task ) ;
2005-04-17 02:20:36 +04:00
goto out ;
}
/* grant an infinite number of read locks to the front of the queue */
woken = 0 ;
2013-05-07 17:45:58 +04:00
do {
2005-04-17 02:20:36 +04:00
struct list_head * next = waiter - > list . next ;
list_del ( & waiter - > list ) ;
tsk = waiter - > task ;
2015-01-30 12:14:24 +03:00
/*
* Make sure we do not wakeup the next reader before
* setting the nil condition to grant the next reader ;
* otherwise we could miss the wakeup on the other
* side and end up sleeping again . See the pairing
* in rwsem_down_read_failed ( ) .
*/
2005-05-01 19:58:47 +04:00
smp_mb ( ) ;
2005-04-17 02:20:36 +04:00
waiter - > task = NULL ;
wake_up_process ( tsk ) ;
put_task_struct ( tsk ) ;
woken + + ;
2013-05-07 17:45:58 +04:00
if ( next = = & sem - > wait_list )
2005-04-17 02:20:36 +04:00
break ;
waiter = list_entry ( next , struct rwsem_waiter , list ) ;
2013-05-07 17:45:58 +04:00
} while ( waiter - > type ! = RWSEM_WAITING_FOR_WRITE ) ;
2005-04-17 02:20:36 +04:00
2014-07-16 16:54:55 +04:00
sem - > count + = woken ;
2005-04-17 02:20:36 +04:00
out :
return sem ;
}
/*
* wake a single writer
*/
static inline struct rw_semaphore *
__rwsem_wake_one_writer ( struct rw_semaphore * sem )
{
struct rwsem_waiter * waiter ;
waiter = list_entry ( sem - > wait_list . next , struct rwsem_waiter , list ) ;
2013-02-01 14:59:16 +04:00
wake_up_process ( waiter - > task ) ;
2005-04-17 02:20:36 +04:00
return sem ;
}
/*
* get a read lock on the semaphore
*/
2008-02-08 15:19:55 +03:00
void __sched __down_read ( struct rw_semaphore * sem )
2005-04-17 02:20:36 +04:00
{
struct rwsem_waiter waiter ;
struct task_struct * tsk ;
2010-04-07 22:52:46 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
2010-02-24 11:54:54 +03:00
raw_spin_lock_irqsave ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
2014-07-16 16:54:55 +04:00
if ( sem - > count > = 0 & & list_empty ( & sem - > wait_list ) ) {
2005-04-17 02:20:36 +04:00
/* granted */
2014-07-16 16:54:55 +04:00
sem - > count + + ;
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
goto out ;
}
tsk = current ;
set_task_state ( tsk , TASK_UNINTERRUPTIBLE ) ;
/* set up my own style of waitqueue */
waiter . task = tsk ;
2013-05-07 17:45:49 +04:00
waiter . type = RWSEM_WAITING_FOR_READ ;
2005-04-17 02:20:36 +04:00
get_task_struct ( tsk ) ;
list_add_tail ( & waiter . list , & sem - > wait_list ) ;
/* we don't need to touch the semaphore struct anymore */
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
/* wait to be given the lock */
for ( ; ; ) {
if ( ! waiter . task )
break ;
schedule ( ) ;
set_task_state ( tsk , TASK_UNINTERRUPTIBLE ) ;
}
2015-01-26 10:36:04 +03:00
__set_task_state ( tsk , TASK_RUNNING ) ;
2005-04-17 02:20:36 +04:00
out :
2006-07-03 11:24:29 +04:00
;
2005-04-17 02:20:36 +04:00
}
/*
* trylock for reading - - returns 1 if successful , 0 if contention
*/
2008-02-08 15:19:55 +03:00
int __down_read_trylock ( struct rw_semaphore * sem )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
int ret = 0 ;
2010-02-24 11:54:54 +03:00
raw_spin_lock_irqsave ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
2014-07-16 16:54:55 +04:00
if ( sem - > count > = 0 & & list_empty ( & sem - > wait_list ) ) {
2005-04-17 02:20:36 +04:00
/* granted */
2014-07-16 16:54:55 +04:00
sem - > count + + ;
2005-04-17 02:20:36 +04:00
ret = 1 ;
}
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
/*
* get a write lock on the semaphore
*/
2016-04-07 18:12:26 +03:00
int __sched __down_write_common ( struct rw_semaphore * sem , int state )
2005-04-17 02:20:36 +04:00
{
struct rwsem_waiter waiter ;
struct task_struct * tsk ;
2010-04-07 22:52:46 +04:00
unsigned long flags ;
2016-04-07 18:12:26 +03:00
int ret = 0 ;
2005-04-17 02:20:36 +04:00
2010-02-24 11:54:54 +03:00
raw_spin_lock_irqsave ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
/* set up my own style of waitqueue */
2013-02-01 14:59:16 +04:00
tsk = current ;
2005-04-17 02:20:36 +04:00
waiter . task = tsk ;
2013-05-07 17:45:49 +04:00
waiter . type = RWSEM_WAITING_FOR_WRITE ;
2005-04-17 02:20:36 +04:00
list_add_tail ( & waiter . list , & sem - > wait_list ) ;
2013-02-01 14:59:16 +04:00
/* wait for someone to release the lock */
2005-04-17 02:20:36 +04:00
for ( ; ; ) {
2013-02-01 14:59:16 +04:00
/*
* That is the key to support write lock stealing : allows the
* task already on CPU to get the lock soon rather than put
* itself into sleep and waiting for system woke it or someone
* else in the head of the wait list up .
*/
2014-07-16 16:54:55 +04:00
if ( sem - > count = = 0 )
2005-04-17 02:20:36 +04:00
break ;
2016-04-07 18:12:26 +03:00
if ( signal_pending_state ( state , current ) ) {
ret = - EINTR ;
goto out ;
}
set_task_state ( tsk , state ) ;
2013-02-01 14:59:16 +04:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
schedule ( ) ;
raw_spin_lock_irqsave ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
}
2013-02-01 14:59:16 +04:00
/* got the lock */
2014-07-16 16:54:55 +04:00
sem - > count = - 1 ;
2016-04-07 18:12:26 +03:00
out :
2013-02-01 14:59:16 +04:00
list_del ( & waiter . list ) ;
2005-04-17 02:20:36 +04:00
2013-02-01 14:59:16 +04:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2016-04-07 18:12:26 +03:00
return ret ;
}
void __sched __down_write ( struct rw_semaphore * sem )
{
__down_write_common ( sem , TASK_UNINTERRUPTIBLE ) ;
}
int __sched __down_write_killable ( struct rw_semaphore * sem )
{
return __down_write_common ( sem , TASK_KILLABLE ) ;
2005-04-17 02:20:36 +04:00
}
/*
* trylock for writing - - returns 1 if successful , 0 if contention
*/
2008-02-08 15:19:55 +03:00
int __down_write_trylock ( struct rw_semaphore * sem )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
int ret = 0 ;
2010-02-24 11:54:54 +03:00
raw_spin_lock_irqsave ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
2014-07-16 16:54:55 +04:00
if ( sem - > count = = 0 ) {
2013-02-01 14:59:16 +04:00
/* got the lock */
2014-07-16 16:54:55 +04:00
sem - > count = - 1 ;
2005-04-17 02:20:36 +04:00
ret = 1 ;
}
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
/*
* release a read lock on the semaphore
*/
2008-02-08 15:19:55 +03:00
void __up_read ( struct rw_semaphore * sem )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
2010-02-24 11:54:54 +03:00
raw_spin_lock_irqsave ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
2014-07-16 16:54:55 +04:00
if ( - - sem - > count = = 0 & & ! list_empty ( & sem - > wait_list ) )
2005-04-17 02:20:36 +04:00
sem = __rwsem_wake_one_writer ( sem ) ;
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
}
/*
* release a write lock on the semaphore
*/
2008-02-08 15:19:55 +03:00
void __up_write ( struct rw_semaphore * sem )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
2010-02-24 11:54:54 +03:00
raw_spin_lock_irqsave ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
2014-07-16 16:54:55 +04:00
sem - > count = 0 ;
2005-04-17 02:20:36 +04:00
if ( ! list_empty ( & sem - > wait_list ) )
sem = __rwsem_do_wake ( sem , 1 ) ;
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
}
/*
* downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue
*/
2008-02-08 15:19:55 +03:00
void __downgrade_write ( struct rw_semaphore * sem )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
2010-02-24 11:54:54 +03:00
raw_spin_lock_irqsave ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
2014-07-16 16:54:55 +04:00
sem - > count = 1 ;
2005-04-17 02:20:36 +04:00
if ( ! list_empty ( & sem - > wait_list ) )
sem = __rwsem_do_wake ( sem , 0 ) ;
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
}