2005-04-17 02:20:36 +04:00
/* rwsem.c: R/W semaphores: contention handling functions
*
* Written by David Howells ( dhowells @ redhat . com ) .
* Derived from arch / i386 / kernel / semaphore . c
2013-02-05 17:11:55 +04:00
*
* Writer lock - stealing by Alex Shi < alex . shi @ intel . com >
2005-04-17 02:20:36 +04:00
*/
# include <linux/rwsem.h>
# include <linux/sched.h>
# include <linux/init.h>
2011-11-17 06:29:17 +04:00
# include <linux/export.h>
2005-04-17 02:20:36 +04:00
2006-07-03 11:24:53 +04:00
/*
* Initialize an rwsem :
*/
void __init_rwsem ( struct rw_semaphore * sem , const char * name ,
struct lock_class_key * key )
{
# ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore :
*/
debug_check_no_locks_freed ( ( void * ) sem , sizeof ( * sem ) ) ;
2006-10-11 09:45:14 +04:00
lockdep_init_map ( & sem - > dep_map , name , key , 0 ) ;
2006-07-03 11:24:53 +04:00
# endif
sem - > count = RWSEM_UNLOCKED_VALUE ;
2010-02-24 11:54:54 +03:00
raw_spin_lock_init ( & sem - > wait_lock ) ;
2006-07-03 11:24:53 +04:00
INIT_LIST_HEAD ( & sem - > wait_list ) ;
}
EXPORT_SYMBOL ( __init_rwsem ) ;
2013-05-07 17:45:49 +04:00
enum rwsem_waiter_type {
RWSEM_WAITING_FOR_WRITE ,
RWSEM_WAITING_FOR_READ
} ;
2005-04-17 02:20:36 +04:00
struct rwsem_waiter {
struct list_head list ;
struct task_struct * task ;
2013-05-07 17:45:49 +04:00
enum rwsem_waiter_type type ;
2005-04-17 02:20:36 +04:00
} ;
2010-08-10 04:21:17 +04:00
/* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and
* RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held
* since the rwsem value was observed .
*/
# define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */
# define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */
# define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
2005-04-17 02:20:36 +04:00
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here from up_xxxx ( ) , then :
* - the ' active part ' of count ( & 0x0000ffff ) reached 0 ( but may have changed )
* - the ' waiting part ' of count ( & 0xffff0000 ) is - ve ( and will still be so )
2010-08-10 04:21:15 +04:00
* - there must be someone on the queue
2005-04-17 02:20:36 +04:00
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if downgrading is false
*/
2010-08-10 04:21:17 +04:00
static struct rw_semaphore *
__rwsem_do_wake ( struct rw_semaphore * sem , int wake_type )
2005-04-17 02:20:36 +04:00
{
struct rwsem_waiter * waiter ;
struct task_struct * tsk ;
struct list_head * next ;
2013-02-05 17:11:55 +04:00
signed long woken , loop , adjustment ;
2005-04-17 02:20:36 +04:00
2010-08-10 04:21:15 +04:00
waiter = list_entry ( sem - > wait_list . next , struct rwsem_waiter , list ) ;
2013-05-07 17:45:49 +04:00
if ( waiter - > type ! = RWSEM_WAITING_FOR_WRITE )
2010-08-10 04:21:15 +04:00
goto readers_only ;
2010-08-10 04:21:17 +04:00
if ( wake_type = = RWSEM_WAKE_READ_OWNED )
2010-08-10 04:21:19 +04:00
/* Another active reader was observed, so wakeup is not
* likely to succeed . Save the atomic op .
*/
2010-08-10 04:21:15 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
2013-02-05 17:11:55 +04:00
/* Wake up the writing waiter and let the task grab the sem: */
wake_up_process ( waiter - > task ) ;
2005-04-17 02:20:36 +04:00
goto out ;
2010-08-10 04:21:15 +04:00
readers_only :
2010-08-10 04:21:17 +04:00
/* If we come here from up_xxxx(), another thread might have reached
* rwsem_down_failed_common ( ) before we acquired the spinlock and
* woken up a waiter , making it now active . We prefer to check for
* this first in order to not spend too much time with the spinlock
* held if we ' re not going to be able to wake up readers in the end .
*
* Note that we do not need to update the rwsem count : any writer
* trying to acquire rwsem will run rwsem_down_write_failed ( ) due
* to the waiting threads and block trying to acquire the spinlock .
*
* We use a dummy atomic update in order to acquire the cache line
* exclusively since we expect to succeed and run the final rwsem
* count adjustment pretty soon .
*/
if ( wake_type = = RWSEM_WAKE_ANY & &
2010-08-10 04:21:19 +04:00
rwsem_atomic_update ( 0 , sem ) < RWSEM_WAITING_BIAS )
/* Someone grabbed the sem for write already */
2010-08-10 04:21:17 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
2010-08-10 04:21:15 +04:00
/* Grant an infinite number of read locks to the readers at the front
* of the queue . Note we increment the ' active part ' of the count by
* the number of readers before waking any processes up .
2005-04-17 02:20:36 +04:00
*/
woken = 0 ;
do {
woken + + ;
if ( waiter - > list . next = = & sem - > wait_list )
break ;
waiter = list_entry ( waiter - > list . next ,
struct rwsem_waiter , list ) ;
2013-05-07 17:45:49 +04:00
} while ( waiter - > type ! = RWSEM_WAITING_FOR_WRITE ) ;
2005-04-17 02:20:36 +04:00
2010-08-10 04:21:18 +04:00
adjustment = woken * RWSEM_ACTIVE_READ_BIAS ;
2013-05-07 17:45:49 +04:00
if ( waiter - > type ! = RWSEM_WAITING_FOR_WRITE )
2010-08-10 04:21:18 +04:00
/* hit end of list above */
adjustment - = RWSEM_WAITING_BIAS ;
2005-04-17 02:20:36 +04:00
2010-08-10 04:21:18 +04:00
rwsem_atomic_add ( adjustment , sem ) ;
2005-04-17 02:20:36 +04:00
next = sem - > wait_list . next ;
2010-08-10 04:21:18 +04:00
for ( loop = woken ; loop > 0 ; loop - - ) {
2005-04-17 02:20:36 +04:00
waiter = list_entry ( next , struct rwsem_waiter , list ) ;
next = waiter - > list . next ;
tsk = waiter - > task ;
2005-05-01 19:58:47 +04:00
smp_mb ( ) ;
2005-04-17 02:20:36 +04:00
waiter - > task = NULL ;
wake_up_process ( tsk ) ;
put_task_struct ( tsk ) ;
}
sem - > wait_list . next = next ;
next - > prev = & sem - > wait_list ;
out :
return sem ;
2013-02-05 17:11:55 +04:00
}
/* Try to get write sem, caller holds sem->wait_lock: */
2013-05-07 17:45:54 +04:00
static int try_get_writer_sem ( struct rw_semaphore * sem )
2013-02-05 17:11:55 +04:00
{
long oldcount , adjustment ;
2005-04-17 02:20:36 +04:00
2013-02-05 17:11:55 +04:00
adjustment = RWSEM_ACTIVE_WRITE_BIAS ;
2013-05-07 17:45:54 +04:00
if ( list_is_singular ( & sem - > wait_list ) )
2013-02-05 17:11:55 +04:00
adjustment - = RWSEM_WAITING_BIAS ;
try_again_write :
oldcount = rwsem_atomic_update ( adjustment , sem ) - adjustment ;
2013-05-07 17:45:53 +04:00
if ( ! ( oldcount & RWSEM_ACTIVE_MASK ) )
2013-02-05 17:11:55 +04:00
return 1 ;
/* some one grabbed the sem already */
2010-08-10 04:21:18 +04:00
if ( rwsem_atomic_update ( - adjustment , sem ) & RWSEM_ACTIVE_MASK )
2013-02-05 17:11:55 +04:00
return 0 ;
2010-08-10 04:21:15 +04:00
goto try_again_write ;
2005-04-17 02:20:36 +04:00
}
/*
2013-05-07 17:45:51 +04:00
* wait for the read lock to be granted
2005-04-17 02:20:36 +04:00
*/
2013-05-07 17:45:51 +04:00
struct rw_semaphore __sched * rwsem_down_read_failed ( struct rw_semaphore * sem )
2005-04-17 02:20:36 +04:00
{
2013-05-07 17:45:51 +04:00
signed long adjustment = - RWSEM_ACTIVE_READ_BIAS ;
2010-08-10 04:21:20 +04:00
struct rwsem_waiter waiter ;
2005-04-17 02:20:36 +04:00
struct task_struct * tsk = current ;
signed long count ;
/* set up my own style of waitqueue */
2010-08-10 04:21:20 +04:00
waiter . task = tsk ;
2013-05-07 17:45:52 +04:00
waiter . type = RWSEM_WAITING_FOR_READ ;
2005-04-17 02:20:36 +04:00
get_task_struct ( tsk ) ;
2013-05-07 17:45:50 +04:00
raw_spin_lock_irq ( & sem - > wait_lock ) ;
2010-08-10 04:21:18 +04:00
if ( list_empty ( & sem - > wait_list ) )
adjustment + = RWSEM_WAITING_BIAS ;
2010-08-10 04:21:20 +04:00
list_add_tail ( & waiter . list , & sem - > wait_list ) ;
2005-04-17 02:20:36 +04:00
2010-08-10 04:21:17 +04:00
/* we're now waiting on the lock, but no longer actively locking */
2005-04-17 02:20:36 +04:00
count = rwsem_atomic_update ( adjustment , sem ) ;
2013-05-07 17:45:52 +04:00
/* If there are no active locks, wake the front queued process(es). */
2010-08-10 04:21:19 +04:00
if ( count = = RWSEM_WAITING_BIAS )
2010-08-10 04:21:17 +04:00
sem = __rwsem_do_wake ( sem , RWSEM_WAKE_NO_ACTIVE ) ;
2005-04-17 02:20:36 +04:00
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irq ( & sem - > wait_lock ) ;
2005-04-17 02:20:36 +04:00
/* wait to be given the lock */
2013-05-07 17:45:50 +04:00
while ( true ) {
set_task_state ( tsk , TASK_UNINTERRUPTIBLE ) ;
2010-08-10 04:21:20 +04:00
if ( ! waiter . task )
2005-04-17 02:20:36 +04:00
break ;
schedule ( ) ;
}
tsk - > state = TASK_RUNNING ;
return sem ;
}
/*
2013-05-07 17:45:53 +04:00
* wait until we successfully acquire the write lock
2005-04-17 02:20:36 +04:00
*/
2011-01-26 23:32:01 +03:00
struct rw_semaphore __sched * rwsem_down_write_failed ( struct rw_semaphore * sem )
2005-04-17 02:20:36 +04:00
{
2013-05-07 17:45:51 +04:00
signed long adjustment = - RWSEM_ACTIVE_WRITE_BIAS ;
struct rwsem_waiter waiter ;
struct task_struct * tsk = current ;
signed long count ;
/* set up my own style of waitqueue */
waiter . task = tsk ;
2013-05-07 17:45:53 +04:00
waiter . type = RWSEM_WAITING_FOR_WRITE ;
2013-05-07 17:45:51 +04:00
raw_spin_lock_irq ( & sem - > wait_lock ) ;
if ( list_empty ( & sem - > wait_list ) )
adjustment + = RWSEM_WAITING_BIAS ;
list_add_tail ( & waiter . list , & sem - > wait_list ) ;
/* we're now waiting on the lock, but no longer actively locking */
count = rwsem_atomic_update ( adjustment , sem ) ;
2013-05-07 17:45:54 +04:00
/* If there were already threads queued before us and there are no
* active writers , the lock must be read owned ; so we try to wake
* any read locks that were queued ahead of us . */
if ( count > RWSEM_WAITING_BIAS & &
adjustment = = - RWSEM_ACTIVE_WRITE_BIAS )
2013-05-07 17:45:51 +04:00
sem = __rwsem_do_wake ( sem , RWSEM_WAKE_READ_OWNED ) ;
2013-05-07 17:45:53 +04:00
/* wait until we successfully acquire the lock */
2013-05-07 17:45:51 +04:00
while ( true ) {
set_task_state ( tsk , TASK_UNINTERRUPTIBLE ) ;
2013-05-07 17:45:53 +04:00
2013-05-07 17:45:54 +04:00
if ( try_get_writer_sem ( sem ) )
2013-05-07 17:45:51 +04:00
break ;
raw_spin_unlock_irq ( & sem - > wait_lock ) ;
schedule ( ) ;
2013-05-07 17:45:53 +04:00
raw_spin_lock_irq ( & sem - > wait_lock ) ;
2013-05-07 17:45:51 +04:00
}
2013-05-07 17:45:53 +04:00
list_del ( & waiter . list ) ;
raw_spin_unlock_irq ( & sem - > wait_lock ) ;
2013-05-07 17:45:51 +04:00
tsk - > state = TASK_RUNNING ;
return sem ;
2005-04-17 02:20:36 +04:00
}
/*
* handle waking up a waiter on the semaphore
* - up_read / up_write has decremented the active part of count if we come here
*/
2011-01-26 23:32:01 +03:00
struct rw_semaphore * rwsem_wake ( struct rw_semaphore * sem )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
2010-02-24 11:54:54 +03:00
raw_spin_lock_irqsave ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
/* do nothing if list empty */
if ( ! list_empty ( & sem - > wait_list ) )
2010-08-10 04:21:17 +04:00
sem = __rwsem_do_wake ( sem , RWSEM_WAKE_ANY ) ;
2005-04-17 02:20:36 +04:00
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
return sem ;
}
/*
* downgrade a write lock into a read lock
* - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue
*/
2011-01-26 23:32:01 +03:00
struct rw_semaphore * rwsem_downgrade_wake ( struct rw_semaphore * sem )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
2010-02-24 11:54:54 +03:00
raw_spin_lock_irqsave ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
/* do nothing if list empty */
if ( ! list_empty ( & sem - > wait_list ) )
2010-08-10 04:21:17 +04:00
sem = __rwsem_do_wake ( sem , RWSEM_WAKE_READ_OWNED ) ;
2005-04-17 02:20:36 +04:00
2010-02-24 11:54:54 +03:00
raw_spin_unlock_irqrestore ( & sem - > wait_lock , flags ) ;
2005-04-17 02:20:36 +04:00
return sem ;
}
EXPORT_SYMBOL ( rwsem_down_read_failed ) ;
EXPORT_SYMBOL ( rwsem_down_write_failed ) ;
EXPORT_SYMBOL ( rwsem_wake ) ;
EXPORT_SYMBOL ( rwsem_downgrade_wake ) ;