2014-02-03 16:18:49 +04:00
/*
2015-05-11 20:57:11 +03:00
* Queued read / write locks
2014-02-03 16:18:49 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* ( C ) Copyright 2013 - 2014 Hewlett - Packard Development Company , L . P .
*
* Authors : Waiman Long < waiman . long @ hp . com >
*/
# include <linux/smp.h>
# include <linux/bug.h>
# include <linux/cpumask.h>
# include <linux/percpu.h>
# include <linux/hardirq.h>
# include <asm/qrwlock.h>
2015-06-09 18:19:13 +03:00
/*
* This internal data structure is used for optimizing access to some of
* the subfields within the atomic_t cnts .
*/
struct __qrwlock {
union {
atomic_t cnts ;
struct {
# ifdef __LITTLE_ENDIAN
u8 wmode ; /* Writer mode */
u8 rcnts [ 3 ] ; /* Reader counts */
# else
u8 rcnts [ 3 ] ; /* Reader counts */
u8 wmode ; /* Writer mode */
# endif
} ;
} ;
arch_spinlock_t lock ;
} ;
2014-02-03 16:18:49 +04:00
/**
* rspin_until_writer_unlock - inc reader count & spin until writer is gone
* @ lock : Pointer to queue rwlock structure
* @ writer : Current queue rwlock writer status byte
*
* In interrupt context or at the head of the queue , the reader will just
* increment the reader count & wait until the writer releases the lock .
*/
static __always_inline void
rspin_until_writer_unlock ( struct qrwlock * lock , u32 cnts )
{
while ( ( cnts & _QW_WMASK ) = = _QW_LOCKED ) {
2016-10-25 12:03:14 +03:00
cpu_relax ( ) ;
2015-08-06 19:54:42 +03:00
cnts = atomic_read_acquire ( & lock - > cnts ) ;
2014-02-03 16:18:49 +04:00
}
}
/**
2015-06-19 18:50:00 +03:00
* queued_read_lock_slowpath - acquire read lock of a queue rwlock
2014-02-03 16:18:49 +04:00
* @ lock : Pointer to queue rwlock structure
2015-06-19 18:50:01 +03:00
* @ cnts : Current qrwlock lock value
2014-02-03 16:18:49 +04:00
*/
2015-06-19 18:50:01 +03:00
void queued_read_lock_slowpath ( struct qrwlock * lock , u32 cnts )
2014-02-03 16:18:49 +04:00
{
/*
* Readers come here when they cannot get the lock without waiting
*/
if ( unlikely ( in_interrupt ( ) ) ) {
/*
2015-06-19 18:50:01 +03:00
* Readers in interrupt context will get the lock immediately
* if the writer is just waiting ( not holding the lock yet ) .
* The rspin_until_writer_unlock ( ) function returns immediately
2015-08-06 19:54:42 +03:00
* in this case . Otherwise , they will spin ( with ACQUIRE
* semantics ) until the lock is available without waiting in
* the queue .
2014-02-03 16:18:49 +04:00
*/
rspin_until_writer_unlock ( lock , cnts ) ;
return ;
}
atomic_sub ( _QR_BIAS , & lock - > cnts ) ;
/*
* Put the reader into the wait queue
*/
2015-09-14 10:37:22 +03:00
arch_spin_lock ( & lock - > wait_lock ) ;
2014-02-03 16:18:49 +04:00
/*
2015-08-06 19:54:42 +03:00
* The ACQUIRE semantics of the following spinning code ensure
* that accesses can ' t leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write .
2014-02-03 16:18:49 +04:00
*/
2016-04-18 02:27:03 +03:00
cnts = atomic_fetch_add_acquire ( _QR_BIAS , & lock - > cnts ) ;
2014-02-03 16:18:49 +04:00
rspin_until_writer_unlock ( lock , cnts ) ;
/*
* Signal the next one in queue to become queue head
*/
2015-09-14 10:37:22 +03:00
arch_spin_unlock ( & lock - > wait_lock ) ;
2014-02-03 16:18:49 +04:00
}
2015-06-19 18:50:00 +03:00
EXPORT_SYMBOL ( queued_read_lock_slowpath ) ;
2014-02-03 16:18:49 +04:00
/**
2015-06-19 18:50:00 +03:00
* queued_write_lock_slowpath - acquire write lock of a queue rwlock
2014-02-03 16:18:49 +04:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 18:50:00 +03:00
void queued_write_lock_slowpath ( struct qrwlock * lock )
2014-02-03 16:18:49 +04:00
{
u32 cnts ;
/* Put the writer into the wait queue */
2015-09-14 10:37:22 +03:00
arch_spin_lock ( & lock - > wait_lock ) ;
2014-02-03 16:18:49 +04:00
/* Try to acquire the lock directly if no reader is present */
if ( ! atomic_read ( & lock - > cnts ) & &
2015-08-06 19:54:42 +03:00
( atomic_cmpxchg_acquire ( & lock - > cnts , 0 , _QW_LOCKED ) = = 0 ) )
2014-02-03 16:18:49 +04:00
goto unlock ;
/*
* Set the waiting flag to notify readers that a writer is pending ,
* or wait for a previous writer to go away .
*/
for ( ; ; ) {
2015-06-09 18:19:13 +03:00
struct __qrwlock * l = ( struct __qrwlock * ) lock ;
if ( ! READ_ONCE ( l - > wmode ) & &
2015-08-06 19:54:42 +03:00
( cmpxchg_relaxed ( & l - > wmode , 0 , _QW_WAITING ) = = 0 ) )
2014-02-03 16:18:49 +04:00
break ;
2016-10-25 12:03:14 +03:00
cpu_relax ( ) ;
2014-02-03 16:18:49 +04:00
}
/* When no more readers, set the locked flag */
for ( ; ; ) {
cnts = atomic_read ( & lock - > cnts ) ;
if ( ( cnts = = _QW_WAITING ) & &
2015-08-06 19:54:42 +03:00
( atomic_cmpxchg_acquire ( & lock - > cnts , _QW_WAITING ,
_QW_LOCKED ) = = _QW_WAITING ) )
2014-02-03 16:18:49 +04:00
break ;
2016-10-25 12:03:14 +03:00
cpu_relax ( ) ;
2014-02-03 16:18:49 +04:00
}
unlock :
2015-09-14 10:37:22 +03:00
arch_spin_unlock ( & lock - > wait_lock ) ;
2014-02-03 16:18:49 +04:00
}
2015-06-19 18:50:00 +03:00
EXPORT_SYMBOL ( queued_write_lock_slowpath ) ;