2006-01-10 02:59:19 +03:00
/*
* kernel / mutex . c
*
* Mutexes : blocking mutual exclusion locks
*
* Started by Ingo Molnar :
*
* Copyright ( C ) 2004 , 2005 , 2006 Red Hat , Inc . , Ingo Molnar < mingo @ redhat . com >
*
* Many thanks to Arjan van de Ven , Thomas Gleixner , Steven Rostedt and
* David Howells for suggestions and improvements .
*
* Also see Documentation / mutex - design . txt .
*/
# include <linux/mutex.h>
# include <linux/sched.h>
# include <linux/module.h>
# include <linux/spinlock.h>
# include <linux/interrupt.h>
2006-07-03 11:24:33 +04:00
# include <linux/debug_locks.h>
2006-01-10 02:59:19 +03:00
/*
* In the DEBUG case we are using the " NULL fastpath " for mutexes ,
* which forces all calls into the slowpath :
*/
# ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
# include <asm-generic / mutex-null.h>
# else
# include "mutex.h"
# include <asm / mutex.h>
# endif
/***
* mutex_init - initialize the mutex
* @ lock : the mutex to be initialized
*
* Initialize the mutex to unlocked state .
*
* It is not allowed to initialize an already locked mutex .
*/
2006-07-03 11:24:55 +04:00
void
__mutex_init ( struct mutex * lock , const char * name , struct lock_class_key * key )
2006-01-10 02:59:19 +03:00
{
atomic_set ( & lock - > count , 1 ) ;
spin_lock_init ( & lock - > wait_lock ) ;
INIT_LIST_HEAD ( & lock - > wait_list ) ;
2006-07-03 11:24:55 +04:00
debug_mutex_init ( lock , name , key ) ;
2006-01-10 02:59:19 +03:00
}
EXPORT_SYMBOL ( __mutex_init ) ;
2007-10-12 00:11:12 +04:00
# ifndef CONFIG_DEBUG_LOCK_ALLOC
2006-01-10 02:59:19 +03:00
/*
* We split the mutex lock / unlock logic into separate fastpath and
* slowpath functions , to reduce the register pressure on the fastpath .
* We also put the fastpath first in the kernel image , to make sure the
* branch is predicted by the CPU as default - untaken .
*/
2008-02-08 15:19:53 +03:00
static void noinline __sched
2006-07-03 11:24:33 +04:00
__mutex_lock_slowpath ( atomic_t * lock_count ) ;
2006-01-10 02:59:19 +03:00
/***
* mutex_lock - acquire the mutex
* @ lock : the mutex to be acquired
*
* Lock the mutex exclusively for this task . If the mutex is not
* available right now , it will sleep until it can get it .
*
* The mutex must later on be released by the same task that
* acquired it . Recursive locking is not allowed . The task
* may not exit without first unlocking the mutex . Also , kernel
* memory where the mutex resides mutex must not be freed with
* the mutex still locked . The mutex must first be initialized
* ( or statically defined ) before it can be locked . memset ( ) - ing
* the mutex to 0 is not allowed .
*
* ( The CONFIG_DEBUG_MUTEXES . config option turns on debugging
* checks that will enforce the restrictions and will also do
* deadlock debugging . )
*
* This function is similar to ( but not equivalent to ) down ( ) .
*/
2008-02-08 15:19:53 +03:00
void inline __sched mutex_lock ( struct mutex * lock )
2006-01-10 02:59:19 +03:00
{
2006-01-11 00:10:36 +03:00
might_sleep ( ) ;
2006-01-10 02:59:19 +03:00
/*
* The locking fastpath is the 1 - > 0 transition from
* ' unlocked ' into ' locked ' state .
*/
__mutex_fastpath_lock ( & lock - > count , __mutex_lock_slowpath ) ;
}
EXPORT_SYMBOL ( mutex_lock ) ;
2007-10-12 00:11:12 +04:00
# endif
2006-01-10 02:59:19 +03:00
2008-02-08 15:19:53 +03:00
static noinline void __sched __mutex_unlock_slowpath ( atomic_t * lock_count ) ;
2006-01-10 02:59:19 +03:00
/***
* mutex_unlock - release the mutex
* @ lock : the mutex to be released
*
* Unlock a mutex that has been locked by this task previously .
*
* This function must not be used in interrupt context . Unlocking
* of a not locked mutex is not allowed .
*
* This function is similar to ( but not equivalent to ) up ( ) .
*/
2008-02-08 15:19:53 +03:00
void __sched mutex_unlock ( struct mutex * lock )
2006-01-10 02:59:19 +03:00
{
/*
* The unlocking fastpath is the 0 - > 1 transition from ' locked '
* into ' unlocked ' state :
*/
__mutex_fastpath_unlock ( & lock - > count , __mutex_unlock_slowpath ) ;
}
EXPORT_SYMBOL ( mutex_unlock ) ;
/*
* Lock a mutex ( possibly interruptible ) , slowpath :
*/
static inline int __sched
2007-10-12 00:11:12 +04:00
__mutex_lock_common ( struct mutex * lock , long state , unsigned int subclass ,
unsigned long ip )
2006-01-10 02:59:19 +03:00
{
struct task_struct * task = current ;
struct mutex_waiter waiter ;
unsigned int old_val ;
2006-06-26 11:24:31 +04:00
unsigned long flags ;
2006-01-10 02:59:19 +03:00
2006-06-26 11:24:31 +04:00
spin_lock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
2006-07-03 11:24:33 +04:00
debug_mutex_lock_common ( lock , & waiter ) ;
2007-10-12 00:11:12 +04:00
mutex_acquire ( & lock - > dep_map , subclass , 0 , ip ) ;
2007-05-09 13:35:16 +04:00
debug_mutex_add_waiter ( lock , & waiter , task_thread_info ( task ) ) ;
2006-01-10 02:59:19 +03:00
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail ( & waiter . list , & lock - > wait_list ) ;
waiter . task = task ;
2007-07-19 12:48:58 +04:00
old_val = atomic_xchg ( & lock - > count , - 1 ) ;
if ( old_val = = 1 )
goto done ;
2007-10-12 00:11:12 +04:00
lock_contended ( & lock - > dep_map , ip ) ;
2007-07-19 12:48:58 +04:00
2006-01-10 02:59:19 +03:00
for ( ; ; ) {
/*
* Lets try to take the lock again - this is needed even if
* we get here for the first time ( shortly after failing to
* acquire the lock ) , to make sure that we get a wakeup once
* it ' s unlocked . Later on , if we sleep , this is the
* operation that gives us the lock . We xchg it to - 1 , so
* that when we release the lock , we properly wake up the
* other waiters :
*/
old_val = atomic_xchg ( & lock - > count , - 1 ) ;
if ( old_val = = 1 )
break ;
/*
* got a signal ? ( This code gets eliminated in the
* TASK_UNINTERRUPTIBLE case . )
*/
2007-12-07 01:37:59 +03:00
if ( unlikely ( ( state = = TASK_INTERRUPTIBLE & &
signal_pending ( task ) ) | |
( state = = TASK_KILLABLE & &
fatal_signal_pending ( task ) ) ) ) {
mutex_remove_waiter ( lock , & waiter ,
task_thread_info ( task ) ) ;
2007-10-12 00:11:12 +04:00
mutex_release ( & lock - > dep_map , 1 , ip ) ;
2006-06-26 11:24:31 +04:00
spin_unlock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
debug_mutex_free_waiter ( & waiter ) ;
return - EINTR ;
}
__set_task_state ( task , state ) ;
/* didnt get the lock, go to sleep: */
2006-06-26 11:24:31 +04:00
spin_unlock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
schedule ( ) ;
2006-06-26 11:24:31 +04:00
spin_lock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
}
2007-07-19 12:48:58 +04:00
done :
2007-07-19 12:49:00 +04:00
lock_acquired ( & lock - > dep_map ) ;
2006-01-10 02:59:19 +03:00
/* got the lock - rejoice! */
2007-05-09 13:35:16 +04:00
mutex_remove_waiter ( lock , & waiter , task_thread_info ( task ) ) ;
debug_mutex_set_owner ( lock , task_thread_info ( task ) ) ;
2006-01-10 02:59:19 +03:00
/* set it to 0 if there are no waiters left: */
if ( likely ( list_empty ( & lock - > wait_list ) ) )
atomic_set ( & lock - > count , 0 ) ;
2006-06-26 11:24:31 +04:00
spin_unlock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
debug_mutex_free_waiter ( & waiter ) ;
return 0 ;
}
2006-07-03 11:24:55 +04:00
# ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested ( struct mutex * lock , unsigned int subclass )
{
might_sleep ( ) ;
2007-10-12 00:11:12 +04:00
__mutex_lock_common ( lock , TASK_UNINTERRUPTIBLE , subclass , _RET_IP_ ) ;
2006-07-03 11:24:55 +04:00
}
EXPORT_SYMBOL_GPL ( mutex_lock_nested ) ;
2006-12-08 13:36:17 +03:00
2007-12-07 01:37:59 +03:00
int __sched
mutex_lock_killable_nested ( struct mutex * lock , unsigned int subclass )
{
might_sleep ( ) ;
return __mutex_lock_common ( lock , TASK_KILLABLE , subclass , _RET_IP_ ) ;
}
EXPORT_SYMBOL_GPL ( mutex_lock_killable_nested ) ;
2006-12-08 13:36:17 +03:00
int __sched
mutex_lock_interruptible_nested ( struct mutex * lock , unsigned int subclass )
{
might_sleep ( ) ;
2007-10-12 00:11:12 +04:00
return __mutex_lock_common ( lock , TASK_INTERRUPTIBLE , subclass , _RET_IP_ ) ;
2006-12-08 13:36:17 +03:00
}
EXPORT_SYMBOL_GPL ( mutex_lock_interruptible_nested ) ;
2006-07-03 11:24:55 +04:00
# endif
2006-01-10 02:59:19 +03:00
/*
* Release the lock , slowpath :
*/
2008-02-08 15:19:53 +03:00
static inline void
2006-07-03 11:24:55 +04:00
__mutex_unlock_common_slowpath ( atomic_t * lock_count , int nested )
2006-01-10 02:59:19 +03:00
{
2006-01-11 01:15:02 +03:00
struct mutex * lock = container_of ( lock_count , struct mutex , count ) ;
2006-06-26 11:24:31 +04:00
unsigned long flags ;
2006-01-10 02:59:19 +03:00
2006-06-26 11:24:31 +04:00
spin_lock_mutex ( & lock - > wait_lock , flags ) ;
2006-07-03 11:24:55 +04:00
mutex_release ( & lock - > dep_map , nested , _RET_IP_ ) ;
2006-07-03 11:24:33 +04:00
debug_mutex_unlock ( lock ) ;
2006-01-10 02:59:19 +03:00
/*
* some architectures leave the lock unlocked in the fastpath failure
* case , others need to leave it locked . In the later case we have to
* unlock it here
*/
if ( __mutex_slowpath_needs_to_unlock ( ) )
atomic_set ( & lock - > count , 1 ) ;
if ( ! list_empty ( & lock - > wait_list ) ) {
/* get the first entry from the wait-list: */
struct mutex_waiter * waiter =
list_entry ( lock - > wait_list . next ,
struct mutex_waiter , list ) ;
debug_mutex_wake_waiter ( lock , waiter ) ;
wake_up_process ( waiter - > task ) ;
}
debug_mutex_clear_owner ( lock ) ;
2006-06-26 11:24:31 +04:00
spin_unlock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
}
2006-07-03 11:24:33 +04:00
/*
* Release the lock , slowpath :
*/
2008-02-08 15:19:53 +03:00
static noinline void
2006-07-03 11:24:33 +04:00
__mutex_unlock_slowpath ( atomic_t * lock_count )
{
2006-07-03 11:24:55 +04:00
__mutex_unlock_common_slowpath ( lock_count , 1 ) ;
2006-07-03 11:24:33 +04:00
}
2007-10-12 00:11:12 +04:00
# ifndef CONFIG_DEBUG_LOCK_ALLOC
2006-01-10 02:59:19 +03:00
/*
* Here come the less common ( and hence less performance - critical ) APIs :
* mutex_lock_interruptible ( ) and mutex_trylock ( ) .
*/
2008-02-08 15:19:53 +03:00
static noinline int __sched
2007-12-07 01:37:59 +03:00
__mutex_lock_killable_slowpath ( atomic_t * lock_count ) ;
2008-02-08 15:19:53 +03:00
static noinline int __sched
2006-07-03 11:24:33 +04:00
__mutex_lock_interruptible_slowpath ( atomic_t * lock_count ) ;
2006-01-10 02:59:19 +03:00
/***
* mutex_lock_interruptible - acquire the mutex , interruptable
* @ lock : the mutex to be acquired
*
* Lock the mutex like mutex_lock ( ) , and return 0 if the mutex has
* been acquired or sleep until the mutex becomes available . If a
* signal arrives while waiting for the lock then this function
* returns - EINTR .
*
* This function is similar to ( but not equivalent to ) down_interruptible ( ) .
*/
2008-02-08 15:19:53 +03:00
int __sched mutex_lock_interruptible ( struct mutex * lock )
2006-01-10 02:59:19 +03:00
{
2006-01-11 00:10:36 +03:00
might_sleep ( ) ;
2006-01-10 02:59:19 +03:00
return __mutex_fastpath_lock_retval
( & lock - > count , __mutex_lock_interruptible_slowpath ) ;
}
EXPORT_SYMBOL ( mutex_lock_interruptible ) ;
2008-02-08 15:19:53 +03:00
int __sched mutex_lock_killable ( struct mutex * lock )
2007-12-07 01:37:59 +03:00
{
might_sleep ( ) ;
return __mutex_fastpath_lock_retval
( & lock - > count , __mutex_lock_killable_slowpath ) ;
}
EXPORT_SYMBOL ( mutex_lock_killable ) ;
2008-02-08 15:19:53 +03:00
static noinline void __sched
2007-10-12 00:11:12 +04:00
__mutex_lock_slowpath ( atomic_t * lock_count )
{
struct mutex * lock = container_of ( lock_count , struct mutex , count ) ;
__mutex_lock_common ( lock , TASK_UNINTERRUPTIBLE , 0 , _RET_IP_ ) ;
}
2008-02-08 15:19:53 +03:00
static noinline int __sched
2007-12-07 01:37:59 +03:00
__mutex_lock_killable_slowpath ( atomic_t * lock_count )
{
struct mutex * lock = container_of ( lock_count , struct mutex , count ) ;
return __mutex_lock_common ( lock , TASK_KILLABLE , 0 , _RET_IP_ ) ;
}
2008-02-08 15:19:53 +03:00
static noinline int __sched
2006-07-03 11:24:33 +04:00
__mutex_lock_interruptible_slowpath ( atomic_t * lock_count )
2006-01-10 02:59:19 +03:00
{
struct mutex * lock = container_of ( lock_count , struct mutex , count ) ;
2007-10-12 00:11:12 +04:00
return __mutex_lock_common ( lock , TASK_INTERRUPTIBLE , 0 , _RET_IP_ ) ;
2006-01-10 02:59:19 +03:00
}
2007-10-12 00:11:12 +04:00
# endif
2006-01-10 02:59:19 +03:00
/*
* Spinlock based trylock , we take the spinlock and check whether we
* can get the lock :
*/
static inline int __mutex_trylock_slowpath ( atomic_t * lock_count )
{
struct mutex * lock = container_of ( lock_count , struct mutex , count ) ;
2006-06-26 11:24:31 +04:00
unsigned long flags ;
2006-01-10 02:59:19 +03:00
int prev ;
2006-06-26 11:24:31 +04:00
spin_lock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
prev = atomic_xchg ( & lock - > count , - 1 ) ;
2006-07-03 11:24:55 +04:00
if ( likely ( prev = = 1 ) ) {
2006-07-03 11:24:33 +04:00
debug_mutex_set_owner ( lock , current_thread_info ( ) ) ;
2006-07-03 11:24:55 +04:00
mutex_acquire ( & lock - > dep_map , 0 , 1 , _RET_IP_ ) ;
}
2006-01-10 02:59:19 +03:00
/* Set it back to 0 if there are no waiters: */
if ( likely ( list_empty ( & lock - > wait_list ) ) )
atomic_set ( & lock - > count , 0 ) ;
2006-06-26 11:24:31 +04:00
spin_unlock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
return prev = = 1 ;
}
/***
* mutex_trylock - try acquire the mutex , without waiting
* @ lock : the mutex to be acquired
*
* Try to acquire the mutex atomically . Returns 1 if the mutex
* has been acquired successfully , and 0 on contention .
*
* NOTE : this function follows the spin_trylock ( ) convention , so
* it is negated to the down_trylock ( ) return values ! Be careful
* about this when converting semaphore users to mutexes .
*
* This function must not be used in interrupt context . The
* mutex must be released by the same task that acquired it .
*/
2008-02-08 15:19:53 +03:00
int __sched mutex_trylock ( struct mutex * lock )
2006-01-10 02:59:19 +03:00
{
return __mutex_fastpath_trylock ( & lock - > count ,
__mutex_trylock_slowpath ) ;
}
EXPORT_SYMBOL ( mutex_trylock ) ;