2019-05-27 09:55:06 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-02-03 16:18:49 +04:00
/*
2015-05-11 20:57:11 +03:00
* Queued read / write locks
2014-02-03 16:18:49 +04:00
*
* ( C ) Copyright 2013 - 2014 Hewlett - Packard Development Company , L . P .
*
* Authors : Waiman Long < waiman . long @ hp . com >
*/
# include <linux/smp.h>
# include <linux/bug.h>
# include <linux/cpumask.h>
# include <linux/percpu.h>
# include <linux/hardirq.h>
2017-05-25 02:55:10 +03:00
# include <linux/spinlock.h>
2014-02-03 16:18:49 +04:00
/**
2015-06-19 18:50:00 +03:00
* queued_read_lock_slowpath - acquire read lock of a queue rwlock
2014-02-03 16:18:49 +04:00
* @ lock : Pointer to queue rwlock structure
*/
2017-10-12 15:20:49 +03:00
void queued_read_lock_slowpath ( struct qrwlock * lock )
2014-02-03 16:18:49 +04:00
{
/*
* Readers come here when they cannot get the lock without waiting
*/
if ( unlikely ( in_interrupt ( ) ) ) {
/*
2015-06-19 18:50:01 +03:00
* Readers in interrupt context will get the lock immediately
2017-10-12 15:20:49 +03:00
* if the writer is just waiting ( not holding the lock yet ) ,
* so spin with ACQUIRE semantics until the lock is available
* without waiting in the queue .
2014-02-03 16:18:49 +04:00
*/
2017-10-12 15:20:51 +03:00
atomic_cond_read_acquire ( & lock - > cnts , ! ( VAL & _QW_LOCKED ) ) ;
2014-02-03 16:18:49 +04:00
return ;
}
atomic_sub ( _QR_BIAS , & lock - > cnts ) ;
/*
* Put the reader into the wait queue
*/
2015-09-14 10:37:22 +03:00
arch_spin_lock ( & lock - > wait_lock ) ;
2017-10-12 15:20:49 +03:00
atomic_add ( _QR_BIAS , & lock - > cnts ) ;
2014-02-03 16:18:49 +04:00
/*
2015-08-06 19:54:42 +03:00
* The ACQUIRE semantics of the following spinning code ensure
* that accesses can ' t leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write .
2014-02-03 16:18:49 +04:00
*/
2017-10-12 15:20:51 +03:00
atomic_cond_read_acquire ( & lock - > cnts , ! ( VAL & _QW_LOCKED ) ) ;
2014-02-03 16:18:49 +04:00
/*
* Signal the next one in queue to become queue head
*/
2015-09-14 10:37:22 +03:00
arch_spin_unlock ( & lock - > wait_lock ) ;
2014-02-03 16:18:49 +04:00
}
2015-06-19 18:50:00 +03:00
EXPORT_SYMBOL ( queued_read_lock_slowpath ) ;
2014-02-03 16:18:49 +04:00
/**
2015-06-19 18:50:00 +03:00
* queued_write_lock_slowpath - acquire write lock of a queue rwlock
2014-02-03 16:18:49 +04:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 18:50:00 +03:00
void queued_write_lock_slowpath ( struct qrwlock * lock )
2014-02-03 16:18:49 +04:00
{
2021-04-15 20:27:11 +03:00
int cnts ;
2014-02-03 16:18:49 +04:00
/* Put the writer into the wait queue */
2015-09-14 10:37:22 +03:00
arch_spin_lock ( & lock - > wait_lock ) ;
2014-02-03 16:18:49 +04:00
/* Try to acquire the lock directly if no reader is present */
if ( ! atomic_read ( & lock - > cnts ) & &
2015-08-06 19:54:42 +03:00
( atomic_cmpxchg_acquire ( & lock - > cnts , 0 , _QW_LOCKED ) = = 0 ) )
2014-02-03 16:18:49 +04:00
goto unlock ;
2017-10-12 15:20:51 +03:00
/* Set the waiting flag to notify readers that a writer is pending */
atomic_add ( _QW_WAITING , & lock - > cnts ) ;
2014-02-03 16:18:49 +04:00
2017-10-12 15:20:51 +03:00
/* When no more readers or writers, set the locked flag */
2017-10-12 15:20:49 +03:00
do {
2021-04-15 20:27:11 +03:00
cnts = atomic_cond_read_relaxed ( & lock - > cnts , VAL = = _QW_WAITING ) ;
} while ( ! atomic_try_cmpxchg_acquire ( & lock - > cnts , & cnts , _QW_LOCKED ) ) ;
2014-02-03 16:18:49 +04:00
unlock :
2015-09-14 10:37:22 +03:00
arch_spin_unlock ( & lock - > wait_lock ) ;
2014-02-03 16:18:49 +04:00
}
2015-06-19 18:50:00 +03:00
EXPORT_SYMBOL ( queued_write_lock_slowpath ) ;