2019-05-27 08:55:06 +02:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2014-02-03 13:18:49 +01:00
/*
* Queue read / write lock
*
* ( C ) Copyright 2013 - 2014 Hewlett - Packard Development Company , L . P .
*
* Authors : Waiman Long < waiman . long @ hp . com >
*/
# ifndef __ASM_GENERIC_QRWLOCK_H
# define __ASM_GENERIC_QRWLOCK_H
# include <linux/atomic.h>
# include <asm/barrier.h>
# include <asm/processor.h>
# include <asm-generic/qrwlock_types.h>
2021-02-10 13:16:31 -05:00
/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */
2014-02-03 13:18:49 +01:00
/*
2016-07-18 17:47:39 +08:00
* Writer states & reader shift and bias .
2014-02-03 13:18:49 +01:00
*/
2017-10-12 13:20:51 +01:00
# define _QW_WAITING 0x100 /* A writer is waiting */
# define _QW_LOCKED 0x0ff /* A writer holds the lock */
# define _QW_WMASK 0x1ff /* Writer mask */
# define _QR_SHIFT 9 /* Reader count shift */
2014-02-03 13:18:49 +01:00
# define _QR_BIAS (1U << _QR_SHIFT)
/*
* External function declarations
*/
2017-10-12 13:20:49 +01:00
extern void queued_read_lock_slowpath ( struct qrwlock * lock ) ;
2015-06-19 11:50:00 -04:00
extern void queued_write_lock_slowpath ( struct qrwlock * lock ) ;
2014-02-03 13:18:49 +01:00
/**
2015-06-19 11:50:00 -04:00
* queued_read_trylock - try to acquire read lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
* Return : 1 if lock acquired , 0 if failed
*/
2015-06-19 11:50:00 -04:00
static inline int queued_read_trylock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
2020-10-19 09:09:21 +02:00
int cnts ;
2014-02-03 13:18:49 +01:00
cnts = atomic_read ( & lock - > cnts ) ;
if ( likely ( ! ( cnts & _QW_WMASK ) ) ) {
2015-08-06 17:54:42 +01:00
cnts = ( u32 ) atomic_add_return_acquire ( _QR_BIAS , & lock - > cnts ) ;
2014-02-03 13:18:49 +01:00
if ( likely ( ! ( cnts & _QW_WMASK ) ) )
return 1 ;
atomic_sub ( _QR_BIAS , & lock - > cnts ) ;
}
return 0 ;
}
/**
2015-06-19 11:50:00 -04:00
* queued_write_trylock - try to acquire write lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
* Return : 1 if lock acquired , 0 if failed
*/
2015-06-19 11:50:00 -04:00
static inline int queued_write_trylock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
2020-10-19 09:09:21 +02:00
int cnts ;
2014-02-03 13:18:49 +01:00
cnts = atomic_read ( & lock - > cnts ) ;
if ( unlikely ( cnts ) )
return 0 ;
2018-08-20 10:19:14 -04:00
return likely ( atomic_try_cmpxchg_acquire ( & lock - > cnts , & cnts ,
_QW_LOCKED ) ) ;
2014-02-03 13:18:49 +01:00
}
/**
2015-06-19 11:50:00 -04:00
* queued_read_lock - acquire read lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 11:50:00 -04:00
static inline void queued_read_lock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
2020-10-19 09:09:21 +02:00
int cnts ;
2014-02-03 13:18:49 +01:00
2015-08-06 17:54:42 +01:00
cnts = atomic_add_return_acquire ( _QR_BIAS , & lock - > cnts ) ;
2014-02-03 13:18:49 +01:00
if ( likely ( ! ( cnts & _QW_WMASK ) ) )
return ;
/* The slowpath will decrement the reader count, if necessary. */
2017-10-12 13:20:49 +01:00
queued_read_lock_slowpath ( lock ) ;
2014-02-03 13:18:49 +01:00
}
/**
2015-06-19 11:50:00 -04:00
* queued_write_lock - acquire write lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 11:50:00 -04:00
static inline void queued_write_lock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
2020-10-19 09:09:21 +02:00
int cnts = 0 ;
2014-02-03 13:18:49 +01:00
/* Optimize for the unfair lock case where the fair flag is 0. */
2018-08-20 10:19:14 -04:00
if ( likely ( atomic_try_cmpxchg_acquire ( & lock - > cnts , & cnts , _QW_LOCKED ) ) )
2014-02-03 13:18:49 +01:00
return ;
2015-06-19 11:50:00 -04:00
queued_write_lock_slowpath ( lock ) ;
2014-02-03 13:18:49 +01:00
}
/**
2015-06-19 11:50:00 -04:00
* queued_read_unlock - release read lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 11:50:00 -04:00
static inline void queued_read_unlock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
/*
* Atomically decrement the reader count
*/
2015-08-06 17:54:42 +01:00
( void ) atomic_sub_return_release ( _QR_BIAS , & lock - > cnts ) ;
2014-02-03 13:18:49 +01:00
}
/**
2015-06-19 11:50:00 -04:00
* queued_write_unlock - release write lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 11:50:00 -04:00
static inline void queued_write_unlock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
2017-10-12 13:20:51 +01:00
smp_store_release ( & lock - > wlocked , 0 ) ;
2014-02-03 13:18:49 +01:00
}
2021-02-02 10:57:12 -08:00
/**
* queued_rwlock_is_contended - check if the lock is contended
* @ lock : Pointer to queue rwlock structure
* Return : 1 if lock contended , 0 otherwise
*/
static inline int queued_rwlock_is_contended ( struct qrwlock * lock )
{
return arch_spin_is_locked ( & lock - > wait_lock ) ;
}
2014-02-03 13:18:49 +01:00
/*
* Remapping rwlock architecture specific functions to the corresponding
* queue rwlock functions .
*/
2021-02-02 10:57:12 -08:00
# define arch_read_lock(l) queued_read_lock(l)
# define arch_write_lock(l) queued_write_lock(l)
# define arch_read_trylock(l) queued_read_trylock(l)
# define arch_write_trylock(l) queued_write_trylock(l)
# define arch_read_unlock(l) queued_read_unlock(l)
# define arch_write_unlock(l) queued_write_unlock(l)
# define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l)
2014-02-03 13:18:49 +01:00
# endif /* __ASM_GENERIC_QRWLOCK_H */