2006-01-10 02:59:19 +03:00
/*
* kernel / mutex . c
*
* Mutexes : blocking mutual exclusion locks
*
* Started by Ingo Molnar :
*
* Copyright ( C ) 2004 , 2005 , 2006 Red Hat , Inc . , Ingo Molnar < mingo @ redhat . com >
*
* Many thanks to Arjan van de Ven , Thomas Gleixner , Steven Rostedt and
* David Howells for suggestions and improvements .
*
2009-01-12 16:01:47 +03:00
* - Adaptive spinning for mutexes by Peter Zijlstra . ( Ported to mainline
* from the - rt tree , where it was originally implemented for rtmutexes
* by Steven Rostedt , based on work by Gregory Haskins , Peter Morreale
* and Sven Dietrich .
*
2006-01-10 02:59:19 +03:00
* Also see Documentation / mutex - design . txt .
*/
# include <linux/mutex.h>
# include <linux/sched.h>
# include <linux/module.h>
# include <linux/spinlock.h>
# include <linux/interrupt.h>
2006-07-03 11:24:33 +04:00
# include <linux/debug_locks.h>
2006-01-10 02:59:19 +03:00
/*
* In the DEBUG case we are using the " NULL fastpath " for mutexes ,
* which forces all calls into the slowpath :
*/
# ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
# include <asm-generic / mutex-null.h>
# else
# include "mutex.h"
# include <asm / mutex.h>
# endif
2006-07-03 11:24:55 +04:00
void
__mutex_init ( struct mutex * lock , const char * name , struct lock_class_key * key )
2006-01-10 02:59:19 +03:00
{
atomic_set ( & lock - > count , 1 ) ;
spin_lock_init ( & lock - > wait_lock ) ;
INIT_LIST_HEAD ( & lock - > wait_list ) ;
2009-01-12 16:01:47 +03:00
mutex_clear_owner ( lock ) ;
2006-01-10 02:59:19 +03:00
2006-07-03 11:24:55 +04:00
debug_mutex_init ( lock , name , key ) ;
2006-01-10 02:59:19 +03:00
}
EXPORT_SYMBOL ( __mutex_init ) ;
2007-10-12 00:11:12 +04:00
# ifndef CONFIG_DEBUG_LOCK_ALLOC
2006-01-10 02:59:19 +03:00
/*
* We split the mutex lock / unlock logic into separate fastpath and
* slowpath functions , to reduce the register pressure on the fastpath .
* We also put the fastpath first in the kernel image , to make sure the
* branch is predicted by the CPU as default - untaken .
*/
2008-11-24 11:17:42 +03:00
static __used noinline void __sched
2006-07-03 11:24:33 +04:00
__mutex_lock_slowpath ( atomic_t * lock_count ) ;
2006-01-10 02:59:19 +03:00
2010-09-03 02:48:16 +04:00
/**
2006-01-10 02:59:19 +03:00
* mutex_lock - acquire the mutex
* @ lock : the mutex to be acquired
*
* Lock the mutex exclusively for this task . If the mutex is not
* available right now , it will sleep until it can get it .
*
* The mutex must later on be released by the same task that
* acquired it . Recursive locking is not allowed . The task
* may not exit without first unlocking the mutex . Also , kernel
* memory where the mutex resides mutex must not be freed with
* the mutex still locked . The mutex must first be initialized
* ( or statically defined ) before it can be locked . memset ( ) - ing
* the mutex to 0 is not allowed .
*
* ( The CONFIG_DEBUG_MUTEXES . config option turns on debugging
* checks that will enforce the restrictions and will also do
* deadlock debugging . )
*
* This function is similar to ( but not equivalent to ) down ( ) .
*/
2009-04-02 04:21:56 +04:00
void __sched mutex_lock ( struct mutex * lock )
2006-01-10 02:59:19 +03:00
{
2006-01-11 00:10:36 +03:00
might_sleep ( ) ;
2006-01-10 02:59:19 +03:00
/*
* The locking fastpath is the 1 - > 0 transition from
* ' unlocked ' into ' locked ' state .
*/
__mutex_fastpath_lock ( & lock - > count , __mutex_lock_slowpath ) ;
2009-01-12 16:01:47 +03:00
mutex_set_owner ( lock ) ;
2006-01-10 02:59:19 +03:00
}
EXPORT_SYMBOL ( mutex_lock ) ;
2007-10-12 00:11:12 +04:00
# endif
2006-01-10 02:59:19 +03:00
2008-11-24 11:17:42 +03:00
static __used noinline void __sched __mutex_unlock_slowpath ( atomic_t * lock_count ) ;
2006-01-10 02:59:19 +03:00
2010-09-03 02:48:16 +04:00
/**
2006-01-10 02:59:19 +03:00
* mutex_unlock - release the mutex
* @ lock : the mutex to be released
*
* Unlock a mutex that has been locked by this task previously .
*
* This function must not be used in interrupt context . Unlocking
* of a not locked mutex is not allowed .
*
* This function is similar to ( but not equivalent to ) up ( ) .
*/
2008-02-08 15:19:53 +03:00
void __sched mutex_unlock ( struct mutex * lock )
2006-01-10 02:59:19 +03:00
{
/*
* The unlocking fastpath is the 0 - > 1 transition from ' locked '
* into ' unlocked ' state :
*/
2009-01-12 16:01:47 +03:00
# ifndef CONFIG_DEBUG_MUTEXES
/*
* When debugging is enabled we must not clear the owner before time ,
* the slow path will always be taken , and that clears the owner field
* after verifying that it was indeed current .
*/
mutex_clear_owner ( lock ) ;
# endif
2006-01-10 02:59:19 +03:00
__mutex_fastpath_unlock ( & lock - > count , __mutex_unlock_slowpath ) ;
}
EXPORT_SYMBOL ( mutex_unlock ) ;
/*
* Lock a mutex ( possibly interruptible ) , slowpath :
*/
static inline int __sched
2007-10-12 00:11:12 +04:00
__mutex_lock_common ( struct mutex * lock , long state , unsigned int subclass ,
unsigned long ip )
2006-01-10 02:59:19 +03:00
{
struct task_struct * task = current ;
struct mutex_waiter waiter ;
2006-06-26 11:24:31 +04:00
unsigned long flags ;
2006-01-10 02:59:19 +03:00
2009-01-14 17:36:26 +03:00
preempt_disable ( ) ;
2009-01-12 16:01:47 +03:00
mutex_acquire ( & lock - > dep_map , subclass , 0 , ip ) ;
2009-12-02 22:49:16 +03:00
# ifdef CONFIG_MUTEX_SPIN_ON_OWNER
2009-01-12 16:01:47 +03:00
/*
* Optimistic spinning .
*
* We try to spin for acquisition when we find that there are no
* pending waiters and the lock owner is currently running on a
* ( different ) CPU .
*
* The rationale is that if the lock owner is running , it is likely to
* release the lock soon .
*
* Since this needs the lock owner , and this mutex implementation
* doesn ' t track the owner atomically in the lock field , we need to
* track it non - atomically .
*
* We can ' t do this for DEBUG_MUTEXES because that relies on wait_lock
* to serialize everything .
*/
for ( ; ; ) {
struct thread_info * owner ;
2010-05-19 09:46:36 +04:00
/*
* If we own the BKL , then don ' t spin . The owner of
* the mutex might be waiting on us to release the BKL .
*/
if ( unlikely ( current - > lock_depth > = 0 ) )
break ;
2009-01-12 16:01:47 +03:00
/*
* If there ' s an owner , wait for it to either
* release the lock or go to sleep .
*/
owner = ACCESS_ONCE ( lock - > owner ) ;
if ( owner & & ! mutex_spin_on_owner ( lock , owner ) )
break ;
2009-01-14 19:29:31 +03:00
if ( atomic_cmpxchg ( & lock - > count , 1 , 0 ) = = 1 ) {
lock_acquired ( & lock - > dep_map , ip ) ;
mutex_set_owner ( lock ) ;
preempt_enable ( ) ;
return 0 ;
}
2009-01-12 16:01:47 +03:00
/*
* When there ' s no owner , we might have preempted between the
* owner acquiring the lock and setting the owner field . If
* we ' re an RT task that will live - lock because we won ' t let
* the owner complete .
*/
if ( ! owner & & ( need_resched ( ) | | rt_task ( task ) ) )
break ;
/*
* The cpu_relax ( ) call is a compiler barrier which forces
* everything in this loop to be re - loaded . We don ' t need
* memory barriers as we ' ll eventually observe the right
* values at the cost of a few extra spins .
*/
2010-11-22 17:47:36 +03:00
arch_mutex_cpu_relax ( ) ;
2009-01-12 16:01:47 +03:00
}
# endif
2006-06-26 11:24:31 +04:00
spin_lock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
2006-07-03 11:24:33 +04:00
debug_mutex_lock_common ( lock , & waiter ) ;
2007-05-09 13:35:16 +04:00
debug_mutex_add_waiter ( lock , & waiter , task_thread_info ( task ) ) ;
2006-01-10 02:59:19 +03:00
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail ( & waiter . list , & lock - > wait_list ) ;
waiter . task = task ;
2009-01-14 17:32:51 +03:00
if ( atomic_xchg ( & lock - > count , - 1 ) = = 1 )
2007-07-19 12:48:58 +04:00
goto done ;
2007-10-12 00:11:12 +04:00
lock_contended ( & lock - > dep_map , ip ) ;
2007-07-19 12:48:58 +04:00
2006-01-10 02:59:19 +03:00
for ( ; ; ) {
/*
* Lets try to take the lock again - this is needed even if
* we get here for the first time ( shortly after failing to
* acquire the lock ) , to make sure that we get a wakeup once
* it ' s unlocked . Later on , if we sleep , this is the
* operation that gives us the lock . We xchg it to - 1 , so
* that when we release the lock , we properly wake up the
* other waiters :
*/
2009-01-14 17:32:51 +03:00
if ( atomic_xchg ( & lock - > count , - 1 ) = = 1 )
2006-01-10 02:59:19 +03:00
break ;
/*
* got a signal ? ( This code gets eliminated in the
* TASK_UNINTERRUPTIBLE case . )
*/
2008-06-08 21:20:42 +04:00
if ( unlikely ( signal_pending_state ( state , task ) ) ) {
2007-12-07 01:37:59 +03:00
mutex_remove_waiter ( lock , & waiter ,
task_thread_info ( task ) ) ;
2007-10-12 00:11:12 +04:00
mutex_release ( & lock - > dep_map , 1 , ip ) ;
2006-06-26 11:24:31 +04:00
spin_unlock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
debug_mutex_free_waiter ( & waiter ) ;
2009-01-14 17:36:26 +03:00
preempt_enable ( ) ;
2006-01-10 02:59:19 +03:00
return - EINTR ;
}
__set_task_state ( task , state ) ;
/* didnt get the lock, go to sleep: */
2006-06-26 11:24:31 +04:00
spin_unlock_mutex ( & lock - > wait_lock , flags ) ;
2009-03-13 14:21:26 +03:00
preempt_enable_no_resched ( ) ;
schedule ( ) ;
preempt_disable ( ) ;
2006-06-26 11:24:31 +04:00
spin_lock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
}
2007-07-19 12:48:58 +04:00
done :
2008-10-17 01:17:09 +04:00
lock_acquired ( & lock - > dep_map , ip ) ;
2006-01-10 02:59:19 +03:00
/* got the lock - rejoice! */
2009-01-12 16:01:47 +03:00
mutex_remove_waiter ( lock , & waiter , current_thread_info ( ) ) ;
mutex_set_owner ( lock ) ;
2006-01-10 02:59:19 +03:00
/* set it to 0 if there are no waiters left: */
if ( likely ( list_empty ( & lock - > wait_list ) ) )
atomic_set ( & lock - > count , 0 ) ;
2006-06-26 11:24:31 +04:00
spin_unlock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
debug_mutex_free_waiter ( & waiter ) ;
2009-01-14 17:36:26 +03:00
preempt_enable ( ) ;
2006-01-10 02:59:19 +03:00
return 0 ;
}
2006-07-03 11:24:55 +04:00
# ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested ( struct mutex * lock , unsigned int subclass )
{
might_sleep ( ) ;
2007-10-12 00:11:12 +04:00
__mutex_lock_common ( lock , TASK_UNINTERRUPTIBLE , subclass , _RET_IP_ ) ;
2006-07-03 11:24:55 +04:00
}
EXPORT_SYMBOL_GPL ( mutex_lock_nested ) ;
2006-12-08 13:36:17 +03:00
2007-12-07 01:37:59 +03:00
int __sched
mutex_lock_killable_nested ( struct mutex * lock , unsigned int subclass )
{
might_sleep ( ) ;
return __mutex_lock_common ( lock , TASK_KILLABLE , subclass , _RET_IP_ ) ;
}
EXPORT_SYMBOL_GPL ( mutex_lock_killable_nested ) ;
2006-12-08 13:36:17 +03:00
int __sched
mutex_lock_interruptible_nested ( struct mutex * lock , unsigned int subclass )
{
might_sleep ( ) ;
2009-01-12 16:01:47 +03:00
return __mutex_lock_common ( lock , TASK_INTERRUPTIBLE ,
subclass , _RET_IP_ ) ;
2006-12-08 13:36:17 +03:00
}
EXPORT_SYMBOL_GPL ( mutex_lock_interruptible_nested ) ;
2006-07-03 11:24:55 +04:00
# endif
2006-01-10 02:59:19 +03:00
/*
* Release the lock , slowpath :
*/
2008-02-08 15:19:53 +03:00
static inline void
2006-07-03 11:24:55 +04:00
__mutex_unlock_common_slowpath ( atomic_t * lock_count , int nested )
2006-01-10 02:59:19 +03:00
{
2006-01-11 01:15:02 +03:00
struct mutex * lock = container_of ( lock_count , struct mutex , count ) ;
2006-06-26 11:24:31 +04:00
unsigned long flags ;
2006-01-10 02:59:19 +03:00
2006-06-26 11:24:31 +04:00
spin_lock_mutex ( & lock - > wait_lock , flags ) ;
2006-07-03 11:24:55 +04:00
mutex_release ( & lock - > dep_map , nested , _RET_IP_ ) ;
2006-07-03 11:24:33 +04:00
debug_mutex_unlock ( lock ) ;
2006-01-10 02:59:19 +03:00
/*
* some architectures leave the lock unlocked in the fastpath failure
* case , others need to leave it locked . In the later case we have to
* unlock it here
*/
if ( __mutex_slowpath_needs_to_unlock ( ) )
atomic_set ( & lock - > count , 1 ) ;
if ( ! list_empty ( & lock - > wait_list ) ) {
/* get the first entry from the wait-list: */
struct mutex_waiter * waiter =
list_entry ( lock - > wait_list . next ,
struct mutex_waiter , list ) ;
debug_mutex_wake_waiter ( lock , waiter ) ;
wake_up_process ( waiter - > task ) ;
}
2006-06-26 11:24:31 +04:00
spin_unlock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
}
2006-07-03 11:24:33 +04:00
/*
* Release the lock , slowpath :
*/
2008-11-24 11:17:42 +03:00
static __used noinline void
2006-07-03 11:24:33 +04:00
__mutex_unlock_slowpath ( atomic_t * lock_count )
{
2006-07-03 11:24:55 +04:00
__mutex_unlock_common_slowpath ( lock_count , 1 ) ;
2006-07-03 11:24:33 +04:00
}
2007-10-12 00:11:12 +04:00
# ifndef CONFIG_DEBUG_LOCK_ALLOC
2006-01-10 02:59:19 +03:00
/*
* Here come the less common ( and hence less performance - critical ) APIs :
* mutex_lock_interruptible ( ) and mutex_trylock ( ) .
*/
2008-02-08 15:19:53 +03:00
static noinline int __sched
2007-12-07 01:37:59 +03:00
__mutex_lock_killable_slowpath ( atomic_t * lock_count ) ;
2008-02-08 15:19:53 +03:00
static noinline int __sched
2006-07-03 11:24:33 +04:00
__mutex_lock_interruptible_slowpath ( atomic_t * lock_count ) ;
2006-01-10 02:59:19 +03:00
2010-09-03 02:48:16 +04:00
/**
* mutex_lock_interruptible - acquire the mutex , interruptible
2006-01-10 02:59:19 +03:00
* @ lock : the mutex to be acquired
*
* Lock the mutex like mutex_lock ( ) , and return 0 if the mutex has
* been acquired or sleep until the mutex becomes available . If a
* signal arrives while waiting for the lock then this function
* returns - EINTR .
*
* This function is similar to ( but not equivalent to ) down_interruptible ( ) .
*/
2008-02-08 15:19:53 +03:00
int __sched mutex_lock_interruptible ( struct mutex * lock )
2006-01-10 02:59:19 +03:00
{
2009-01-12 16:01:47 +03:00
int ret ;
2006-01-11 00:10:36 +03:00
might_sleep ( ) ;
2009-01-12 16:01:47 +03:00
ret = __mutex_fastpath_lock_retval
2006-01-10 02:59:19 +03:00
( & lock - > count , __mutex_lock_interruptible_slowpath ) ;
2009-01-12 16:01:47 +03:00
if ( ! ret )
mutex_set_owner ( lock ) ;
return ret ;
2006-01-10 02:59:19 +03:00
}
EXPORT_SYMBOL ( mutex_lock_interruptible ) ;
2008-02-08 15:19:53 +03:00
int __sched mutex_lock_killable ( struct mutex * lock )
2007-12-07 01:37:59 +03:00
{
2009-01-12 16:01:47 +03:00
int ret ;
2007-12-07 01:37:59 +03:00
might_sleep ( ) ;
2009-01-12 16:01:47 +03:00
ret = __mutex_fastpath_lock_retval
2007-12-07 01:37:59 +03:00
( & lock - > count , __mutex_lock_killable_slowpath ) ;
2009-01-12 16:01:47 +03:00
if ( ! ret )
mutex_set_owner ( lock ) ;
return ret ;
2007-12-07 01:37:59 +03:00
}
EXPORT_SYMBOL ( mutex_lock_killable ) ;
2008-11-24 11:17:42 +03:00
static __used noinline void __sched
2007-10-12 00:11:12 +04:00
__mutex_lock_slowpath ( atomic_t * lock_count )
{
struct mutex * lock = container_of ( lock_count , struct mutex , count ) ;
__mutex_lock_common ( lock , TASK_UNINTERRUPTIBLE , 0 , _RET_IP_ ) ;
}
2008-02-08 15:19:53 +03:00
static noinline int __sched
2007-12-07 01:37:59 +03:00
__mutex_lock_killable_slowpath ( atomic_t * lock_count )
{
struct mutex * lock = container_of ( lock_count , struct mutex , count ) ;
return __mutex_lock_common ( lock , TASK_KILLABLE , 0 , _RET_IP_ ) ;
}
2008-02-08 15:19:53 +03:00
static noinline int __sched
2006-07-03 11:24:33 +04:00
__mutex_lock_interruptible_slowpath ( atomic_t * lock_count )
2006-01-10 02:59:19 +03:00
{
struct mutex * lock = container_of ( lock_count , struct mutex , count ) ;
2007-10-12 00:11:12 +04:00
return __mutex_lock_common ( lock , TASK_INTERRUPTIBLE , 0 , _RET_IP_ ) ;
2006-01-10 02:59:19 +03:00
}
2007-10-12 00:11:12 +04:00
# endif
2006-01-10 02:59:19 +03:00
/*
* Spinlock based trylock , we take the spinlock and check whether we
* can get the lock :
*/
static inline int __mutex_trylock_slowpath ( atomic_t * lock_count )
{
struct mutex * lock = container_of ( lock_count , struct mutex , count ) ;
2006-06-26 11:24:31 +04:00
unsigned long flags ;
2006-01-10 02:59:19 +03:00
int prev ;
2006-06-26 11:24:31 +04:00
spin_lock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
prev = atomic_xchg ( & lock - > count , - 1 ) ;
2006-07-03 11:24:55 +04:00
if ( likely ( prev = = 1 ) ) {
2009-01-12 16:01:47 +03:00
mutex_set_owner ( lock ) ;
2006-07-03 11:24:55 +04:00
mutex_acquire ( & lock - > dep_map , 0 , 1 , _RET_IP_ ) ;
}
2009-01-12 16:01:47 +03:00
2006-01-10 02:59:19 +03:00
/* Set it back to 0 if there are no waiters: */
if ( likely ( list_empty ( & lock - > wait_list ) ) )
atomic_set ( & lock - > count , 0 ) ;
2006-06-26 11:24:31 +04:00
spin_unlock_mutex ( & lock - > wait_lock , flags ) ;
2006-01-10 02:59:19 +03:00
return prev = = 1 ;
}
2010-09-03 02:48:16 +04:00
/**
* mutex_trylock - try to acquire the mutex , without waiting
2006-01-10 02:59:19 +03:00
* @ lock : the mutex to be acquired
*
* Try to acquire the mutex atomically . Returns 1 if the mutex
* has been acquired successfully , and 0 on contention .
*
* NOTE : this function follows the spin_trylock ( ) convention , so
2010-09-03 02:48:16 +04:00
* it is negated from the down_trylock ( ) return values ! Be careful
2006-01-10 02:59:19 +03:00
* about this when converting semaphore users to mutexes .
*
* This function must not be used in interrupt context . The
* mutex must be released by the same task that acquired it .
*/
2008-02-08 15:19:53 +03:00
int __sched mutex_trylock ( struct mutex * lock )
2006-01-10 02:59:19 +03:00
{
2009-01-12 16:01:47 +03:00
int ret ;
ret = __mutex_fastpath_trylock ( & lock - > count , __mutex_trylock_slowpath ) ;
if ( ret )
mutex_set_owner ( lock ) ;
return ret ;
2006-01-10 02:59:19 +03:00
}
EXPORT_SYMBOL ( mutex_trylock ) ;
2009-04-30 02:59:58 +04:00
/**
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
* @ cnt : the atomic which we are to dec
* @ lock : the mutex to return holding if we dec to 0
*
* return true and hold lock if we dec to 0 , return false otherwise
*/
int atomic_dec_and_mutex_lock ( atomic_t * cnt , struct mutex * lock )
{
/* dec if we can't possibly hit 0 */
if ( atomic_add_unless ( cnt , - 1 , 1 ) )
return 0 ;
/* we might hit 0, so take the lock */
mutex_lock ( lock ) ;
if ( ! atomic_dec_and_test ( cnt ) ) {
/* when we actually did the dec, we didn't hit 0 */
mutex_unlock ( lock ) ;
return 0 ;
}
/* we hit 0, and we hold the lock */
return 1 ;
}
EXPORT_SYMBOL ( atomic_dec_and_mutex_lock ) ;