2006-01-10 02:59:17 +03:00
/*
2006-10-04 01:01:26 +04:00
* include / asm - generic / mutex - dec . h
2006-01-10 02:59:17 +03:00
*
* Generic implementation of the mutex fastpath , based on atomic
* decrement / increment .
*/
# ifndef _ASM_GENERIC_MUTEX_DEC_H
# define _ASM_GENERIC_MUTEX_DEC_H
/**
* __mutex_fastpath_lock - try to take the lock by moving the count
* from 1 to a 0 value
* @ count : pointer of type atomic_t
* @ fail_fn : function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1 , and call < fail_fn > if
* it wasn ' t 1 originally . This function MUST leave the value lower than
* 1 even when the " 1 " assertion wasn ' t true .
*/
2006-03-31 14:32:13 +04:00
static inline void
2008-02-08 15:19:56 +03:00
__mutex_fastpath_lock ( atomic_t * count , void ( * fail_fn ) ( atomic_t * ) )
2006-03-31 14:32:13 +04:00
{
if ( unlikely ( atomic_dec_return ( count ) < 0 ) )
fail_fn ( count ) ;
}
2006-01-10 02:59:17 +03:00
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @ count : pointer of type atomic_t
*
2013-06-20 15:31:05 +04:00
* Change the count from 1 to a value lower than 1. This function returns 0
* if the fastpath succeeds , or - 1 otherwise .
2006-01-10 02:59:17 +03:00
*/
static inline int
2013-06-20 15:31:05 +04:00
__mutex_fastpath_lock_retval ( atomic_t * count )
2006-01-10 02:59:17 +03:00
{
if ( unlikely ( atomic_dec_return ( count ) < 0 ) )
2013-06-20 15:31:05 +04:00
return - 1 ;
2008-10-21 12:59:15 +04:00
return 0 ;
2006-01-10 02:59:17 +03:00
}
/**
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
* @ count : pointer of type atomic_t
* @ fail_fn : function to call if the original value was not 0
*
* Try to promote the count from 0 to 1. If it wasn ' t 0 , call < fail_fn > .
* In the failure case , this function is allowed to either set the value to
* 1 , or to set it to a value lower than 1.
*
* If the implementation sets it to a value of lower than 1 , then the
* __mutex_slowpath_needs_to_unlock ( ) macro needs to return 1 , it needs
* to return 0 otherwise .
*/
2006-03-31 14:32:13 +04:00
static inline void
2008-02-08 15:19:56 +03:00
__mutex_fastpath_unlock ( atomic_t * count , void ( * fail_fn ) ( atomic_t * ) )
2006-03-31 14:32:13 +04:00
{
if ( unlikely ( atomic_inc_return ( count ) < = 0 ) )
fail_fn ( count ) ;
}
2006-01-10 02:59:17 +03:00
# define __mutex_slowpath_needs_to_unlock() 1
/**
* __mutex_fastpath_trylock - try to acquire the mutex , without waiting
*
* @ count : pointer of type atomic_t
* @ fail_fn : fallback function
*
* Change the count from 1 to a value lower than 1 , and return 0 ( failure )
* if it wasn ' t 1 originally , or return 1 ( success ) otherwise . This function
* MUST leave the value lower than 1 even when the " 1 " assertion wasn ' t true .
* Additionally , if the value was < 0 originally , this function must not leave
* it to 0 on failure .
*
* If the architecture has no effective trylock variant , it should call the
* < fail_fn > spinlock - based trylock variant unconditionally .
*/
static inline int
__mutex_fastpath_trylock ( atomic_t * count , int ( * fail_fn ) ( atomic_t * ) )
{
2008-10-21 12:59:15 +04:00
if ( likely ( atomic_cmpxchg ( count , 1 , 0 ) = = 1 ) )
2006-01-10 02:59:17 +03:00
return 1 ;
return 0 ;
}
# endif