2014-01-21 15:36:00 -08:00
/*
* MCS lock defines
*
* This file contains the main data structure and API definitions of MCS lock .
*
* The MCS lock ( proposed by Mellor - Crummey and Scott ) is a simple spin - lock
* with the desirable properties of being fair , and with each cpu trying
* to acquire the lock spinning on a local variable .
* It avoids expensive cache bouncings that common test - and - set spin - lock
* implementations incur .
*/
# ifndef __LINUX_MCS_SPINLOCK_H
# define __LINUX_MCS_SPINLOCK_H
2014-01-21 15:36:22 -08:00
# include <asm/mcs_spinlock.h>
2014-01-21 15:36:00 -08:00
struct mcs_spinlock {
struct mcs_spinlock * next ;
int locked ; /* 1 if lock acquired */
2015-04-24 14:56:30 -04:00
int count ; /* nesting count, see qspinlock.c */
2014-01-21 15:36:00 -08:00
} ;
2014-01-21 15:36:10 -08:00
# ifndef arch_mcs_spin_lock_contended
/*
* Using smp_load_acquire ( ) provides a memory barrier that ensures
* subsequent operations happen after the lock is acquired .
*/
# define arch_mcs_spin_lock_contended(l) \
do { \
while ( ! ( smp_load_acquire ( l ) ) ) \
2016-10-25 11:03:14 +02:00
cpu_relax ( ) ; \
2014-01-21 15:36:10 -08:00
} while ( 0 )
# endif
# ifndef arch_mcs_spin_unlock_contended
/*
* smp_store_release ( ) provides a memory barrier to ensure all
* operations in the critical section has been completed before
* unlocking .
*/
# define arch_mcs_spin_unlock_contended(l) \
smp_store_release ( ( l ) , 1 )
# endif
2014-01-21 15:36:00 -08:00
/*
* Note : the smp_load_acquire / smp_store_release pair is not
* sufficient to form a full memory barrier across
* cpus for many architectures ( except x86 ) for mcs_unlock and mcs_lock .
* For applications that need a full barrier across multiple cpus
* with mcs_unlock and mcs_lock pair , smp_mb__after_unlock_lock ( ) should be
* used after mcs_lock .
*/
2014-01-21 15:36:05 -08:00
/*
* In order to acquire the lock , the caller should declare a local node and
* pass a reference of the node to this function in addition to the lock .
* If the lock has already been acquired , then this will proceed to spin
* on this node - > locked until the previous lock holder sets the node - > locked
* in mcs_spin_unlock ( ) .
*/
2014-01-21 15:36:00 -08:00
static inline
void mcs_spin_lock ( struct mcs_spinlock * * lock , struct mcs_spinlock * node )
{
struct mcs_spinlock * prev ;
/* Init node */
node - > locked = 0 ;
node - > next = NULL ;
2016-02-01 15:11:28 +01:00
/*
* We rely on the full barrier with global transitivity implied by the
* below xchg ( ) to order the initialization stores above against any
* observation of @ node . And to provide the ACQUIRE ordering associated
* with a LOCK primitive .
*/
prev = xchg ( lock , node ) ;
2014-01-21 15:36:00 -08:00
if ( likely ( prev = = NULL ) ) {
2014-01-21 15:36:05 -08:00
/*
* Lock acquired , don ' t need to set node - > locked to 1. Threads
* only spin on its own node - > locked value for lock acquisition .
* However , since this thread can immediately acquire the lock
* and does not proceed to spin on its own node - > locked , this
* value won ' t be used . If a debug mode is needed to
* audit lock status , then set node - > locked value here .
*/
2014-01-21 15:36:00 -08:00
return ;
}
2015-02-22 19:31:41 -08:00
WRITE_ONCE ( prev - > next , node ) ;
2014-01-21 15:36:10 -08:00
/* Wait until the lock holder passes the lock down. */
arch_mcs_spin_lock_contended ( & node - > locked ) ;
2014-01-21 15:36:00 -08:00
}
2014-01-21 15:36:05 -08:00
/*
* Releases the lock . The caller should pass in the corresponding node that
* was used to acquire the lock .
*/
2014-01-21 15:36:00 -08:00
static inline
void mcs_spin_unlock ( struct mcs_spinlock * * lock , struct mcs_spinlock * node )
{
2015-02-22 19:31:41 -08:00
struct mcs_spinlock * next = READ_ONCE ( node - > next ) ;
2014-01-21 15:36:00 -08:00
if ( likely ( ! next ) ) {
/*
* Release the lock by setting it to NULL
*/
2015-09-30 13:03:14 -07:00
if ( likely ( cmpxchg_release ( lock , node , NULL ) = = node ) )
2014-01-21 15:36:00 -08:00
return ;
/* Wait until the next pointer is set */
2015-02-22 19:31:41 -08:00
while ( ! ( next = READ_ONCE ( node - > next ) ) )
2016-10-25 11:03:14 +02:00
cpu_relax ( ) ;
2014-01-21 15:36:00 -08:00
}
2014-01-21 15:36:10 -08:00
/* Pass lock to next waiter. */
arch_mcs_spin_unlock_contended ( & next - > locked ) ;
2014-01-21 15:36:00 -08:00
}
# endif /* __LINUX_MCS_SPINLOCK_H */