2019-05-27 08:55:06 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-02-03 13:18:49 +01:00
/*
2015-05-11 13:57:11 -04:00
* Queued read / write locks
2014-02-03 13:18:49 +01:00
*
* ( C ) Copyright 2013 - 2014 Hewlett - Packard Development Company , L . P .
*
* Authors : Waiman Long < waiman . long @ hp . com >
*/
# include <linux/smp.h>
# include <linux/bug.h>
# include <linux/cpumask.h>
# include <linux/percpu.h>
# include <linux/hardirq.h>
2017-05-24 17:55:10 -06:00
# include <linux/spinlock.h>
2022-03-22 11:57:09 -07:00
# include <trace/events/lock.h>
2014-02-03 13:18:49 +01:00
/**
2022-05-10 15:21:33 -04:00
* queued_read_lock_slowpath - acquire read lock of a queued rwlock
* @ lock : Pointer to queued rwlock structure
2014-02-03 13:18:49 +01:00
*/
2022-08-10 15:03:46 -07:00
void __lockfunc queued_read_lock_slowpath ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
/*
* Readers come here when they cannot get the lock without waiting
*/
if ( unlikely ( in_interrupt ( ) ) ) {
/*
2015-06-19 11:50:01 -04:00
* Readers in interrupt context will get the lock immediately
2017-10-12 13:20:49 +01:00
* if the writer is just waiting ( not holding the lock yet ) ,
* so spin with ACQUIRE semantics until the lock is available
* without waiting in the queue .
2014-02-03 13:18:49 +01:00
*/
2017-10-12 13:20:51 +01:00
atomic_cond_read_acquire ( & lock - > cnts , ! ( VAL & _QW_LOCKED ) ) ;
2014-02-03 13:18:49 +01:00
return ;
}
atomic_sub ( _QR_BIAS , & lock - > cnts ) ;
2022-03-22 11:57:09 -07:00
trace_contention_begin ( lock , LCB_F_SPIN | LCB_F_READ ) ;
2014-02-03 13:18:49 +01:00
/*
* Put the reader into the wait queue
*/
2015-09-14 00:37:22 -07:00
arch_spin_lock ( & lock - > wait_lock ) ;
2017-10-12 13:20:49 +01:00
atomic_add ( _QR_BIAS , & lock - > cnts ) ;
2014-02-03 13:18:49 +01:00
/*
2015-08-06 17:54:42 +01:00
* The ACQUIRE semantics of the following spinning code ensure
* that accesses can ' t leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write .
2014-02-03 13:18:49 +01:00
*/
2017-10-12 13:20:51 +01:00
atomic_cond_read_acquire ( & lock - > cnts , ! ( VAL & _QW_LOCKED ) ) ;
2014-02-03 13:18:49 +01:00
/*
* Signal the next one in queue to become queue head
*/
2015-09-14 00:37:22 -07:00
arch_spin_unlock ( & lock - > wait_lock ) ;
2022-03-22 11:57:09 -07:00
trace_contention_end ( lock , 0 ) ;
2014-02-03 13:18:49 +01:00
}
2015-06-19 11:50:00 -04:00
EXPORT_SYMBOL ( queued_read_lock_slowpath ) ;
2014-02-03 13:18:49 +01:00
/**
2022-05-10 15:21:33 -04:00
* queued_write_lock_slowpath - acquire write lock of a queued rwlock
* @ lock : Pointer to queued rwlock structure
2014-02-03 13:18:49 +01:00
*/
2022-08-10 15:03:46 -07:00
void __lockfunc queued_write_lock_slowpath ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
2021-04-15 17:27:11 +00:00
int cnts ;
2022-03-22 11:57:09 -07:00
trace_contention_begin ( lock , LCB_F_SPIN | LCB_F_WRITE ) ;
2014-02-03 13:18:49 +01:00
/* Put the writer into the wait queue */
2015-09-14 00:37:22 -07:00
arch_spin_lock ( & lock - > wait_lock ) ;
2014-02-03 13:18:49 +01:00
/* Try to acquire the lock directly if no reader is present */
2021-04-26 14:50:17 -04:00
if ( ! ( cnts = atomic_read ( & lock - > cnts ) ) & &
atomic_try_cmpxchg_acquire ( & lock - > cnts , & cnts , _QW_LOCKED ) )
2014-02-03 13:18:49 +01:00
goto unlock ;
2017-10-12 13:20:51 +01:00
/* Set the waiting flag to notify readers that a writer is pending */
2021-04-26 14:50:17 -04:00
atomic_or ( _QW_WAITING , & lock - > cnts ) ;
2014-02-03 13:18:49 +01:00
2017-10-12 13:20:51 +01:00
/* When no more readers or writers, set the locked flag */
2017-10-12 13:20:49 +01:00
do {
2021-04-15 17:27:11 +00:00
cnts = atomic_cond_read_relaxed ( & lock - > cnts , VAL = = _QW_WAITING ) ;
} while ( ! atomic_try_cmpxchg_acquire ( & lock - > cnts , & cnts , _QW_LOCKED ) ) ;
2014-02-03 13:18:49 +01:00
unlock :
2015-09-14 00:37:22 -07:00
arch_spin_unlock ( & lock - > wait_lock ) ;
2022-03-22 11:57:09 -07:00
trace_contention_end ( lock , 0 ) ;
2014-02-03 13:18:49 +01:00
}
2015-06-19 11:50:00 -04:00
EXPORT_SYMBOL ( queued_write_lock_slowpath ) ;