2019-05-27 08:55:06 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-02-03 13:18:49 +01:00
/*
2015-05-11 13:57:11 -04:00
* Queued read / write locks
2014-02-03 13:18:49 +01:00
*
* ( C ) Copyright 2013 - 2014 Hewlett - Packard Development Company , L . P .
*
* Authors : Waiman Long < waiman . long @ hp . com >
*/
# include <linux/smp.h>
# include <linux/bug.h>
# include <linux/cpumask.h>
# include <linux/percpu.h>
# include <linux/hardirq.h>
2017-05-24 17:55:10 -06:00
# include <linux/spinlock.h>
2014-02-03 13:18:49 +01:00
# include <asm/qrwlock.h>
/**
2015-06-19 11:50:00 -04:00
* queued_read_lock_slowpath - acquire read lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
*/
2017-10-12 13:20:49 +01:00
void queued_read_lock_slowpath ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
/*
* Readers come here when they cannot get the lock without waiting
*/
if ( unlikely ( in_interrupt ( ) ) ) {
/*
2015-06-19 11:50:01 -04:00
* Readers in interrupt context will get the lock immediately
2017-10-12 13:20:49 +01:00
* if the writer is just waiting ( not holding the lock yet ) ,
* so spin with ACQUIRE semantics until the lock is available
* without waiting in the queue .
2014-02-03 13:18:49 +01:00
*/
2017-10-12 13:20:51 +01:00
atomic_cond_read_acquire ( & lock - > cnts , ! ( VAL & _QW_LOCKED ) ) ;
2014-02-03 13:18:49 +01:00
return ;
}
atomic_sub ( _QR_BIAS , & lock - > cnts ) ;
/*
* Put the reader into the wait queue
*/
2015-09-14 00:37:22 -07:00
arch_spin_lock ( & lock - > wait_lock ) ;
2017-10-12 13:20:49 +01:00
atomic_add ( _QR_BIAS , & lock - > cnts ) ;
2014-02-03 13:18:49 +01:00
/*
2015-08-06 17:54:42 +01:00
* The ACQUIRE semantics of the following spinning code ensure
* that accesses can ' t leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write .
2014-02-03 13:18:49 +01:00
*/
2017-10-12 13:20:51 +01:00
atomic_cond_read_acquire ( & lock - > cnts , ! ( VAL & _QW_LOCKED ) ) ;
2014-02-03 13:18:49 +01:00
/*
* Signal the next one in queue to become queue head
*/
2015-09-14 00:37:22 -07:00
arch_spin_unlock ( & lock - > wait_lock ) ;
2014-02-03 13:18:49 +01:00
}
2015-06-19 11:50:00 -04:00
EXPORT_SYMBOL ( queued_read_lock_slowpath ) ;
2014-02-03 13:18:49 +01:00
/**
2015-06-19 11:50:00 -04:00
* queued_write_lock_slowpath - acquire write lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 11:50:00 -04:00
void queued_write_lock_slowpath ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
/* Put the writer into the wait queue */
2015-09-14 00:37:22 -07:00
arch_spin_lock ( & lock - > wait_lock ) ;
2014-02-03 13:18:49 +01:00
/* Try to acquire the lock directly if no reader is present */
if ( ! atomic_read ( & lock - > cnts ) & &
2015-08-06 17:54:42 +01:00
( atomic_cmpxchg_acquire ( & lock - > cnts , 0 , _QW_LOCKED ) = = 0 ) )
2014-02-03 13:18:49 +01:00
goto unlock ;
2017-10-12 13:20:51 +01:00
/* Set the waiting flag to notify readers that a writer is pending */
atomic_add ( _QW_WAITING , & lock - > cnts ) ;
2014-02-03 13:18:49 +01:00
2017-10-12 13:20:51 +01:00
/* When no more readers or writers, set the locked flag */
2017-10-12 13:20:49 +01:00
do {
atomic_cond_read_acquire ( & lock - > cnts , VAL = = _QW_WAITING ) ;
} while ( atomic_cmpxchg_relaxed ( & lock - > cnts , _QW_WAITING ,
_QW_LOCKED ) ! = _QW_WAITING ) ;
2014-02-03 13:18:49 +01:00
unlock :
2015-09-14 00:37:22 -07:00
arch_spin_unlock ( & lock - > wait_lock ) ;
2014-02-03 13:18:49 +01:00
}
2015-06-19 11:50:00 -04:00
EXPORT_SYMBOL ( queued_write_lock_slowpath ) ;