2014-02-03 13:18:49 +01:00
/*
* Queue read / write lock
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* ( C ) Copyright 2013 - 2014 Hewlett - Packard Development Company , L . P .
*
* Authors : Waiman Long < waiman . long @ hp . com >
*/
# ifndef __ASM_GENERIC_QRWLOCK_H
# define __ASM_GENERIC_QRWLOCK_H
# include <linux/atomic.h>
# include <asm/barrier.h>
# include <asm/processor.h>
# include <asm-generic/qrwlock_types.h>
/*
2016-07-18 17:47:39 +08:00
* Writer states & reader shift and bias .
2014-02-03 13:18:49 +01:00
*/
2017-10-12 13:20:51 +01:00
# define _QW_WAITING 0x100 /* A writer is waiting */
# define _QW_LOCKED 0x0ff /* A writer holds the lock */
# define _QW_WMASK 0x1ff /* Writer mask */
# define _QR_SHIFT 9 /* Reader count shift */
2014-02-03 13:18:49 +01:00
# define _QR_BIAS (1U << _QR_SHIFT)
/*
* External function declarations
*/
2017-10-12 13:20:49 +01:00
extern void queued_read_lock_slowpath ( struct qrwlock * lock ) ;
2015-06-19 11:50:00 -04:00
extern void queued_write_lock_slowpath ( struct qrwlock * lock ) ;
2014-02-03 13:18:49 +01:00
/**
2015-06-19 11:50:00 -04:00
* queued_read_trylock - try to acquire read lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
* Return : 1 if lock acquired , 0 if failed
*/
2015-06-19 11:50:00 -04:00
static inline int queued_read_trylock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
u32 cnts ;
cnts = atomic_read ( & lock - > cnts ) ;
if ( likely ( ! ( cnts & _QW_WMASK ) ) ) {
2015-08-06 17:54:42 +01:00
cnts = ( u32 ) atomic_add_return_acquire ( _QR_BIAS , & lock - > cnts ) ;
2014-02-03 13:18:49 +01:00
if ( likely ( ! ( cnts & _QW_WMASK ) ) )
return 1 ;
atomic_sub ( _QR_BIAS , & lock - > cnts ) ;
}
return 0 ;
}
/**
2015-06-19 11:50:00 -04:00
* queued_write_trylock - try to acquire write lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
* Return : 1 if lock acquired , 0 if failed
*/
2015-06-19 11:50:00 -04:00
static inline int queued_write_trylock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
u32 cnts ;
cnts = atomic_read ( & lock - > cnts ) ;
if ( unlikely ( cnts ) )
return 0 ;
2018-08-20 10:19:14 -04:00
return likely ( atomic_try_cmpxchg_acquire ( & lock - > cnts , & cnts ,
_QW_LOCKED ) ) ;
2014-02-03 13:18:49 +01:00
}
/**
2015-06-19 11:50:00 -04:00
* queued_read_lock - acquire read lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 11:50:00 -04:00
static inline void queued_read_lock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
u32 cnts ;
2015-08-06 17:54:42 +01:00
cnts = atomic_add_return_acquire ( _QR_BIAS , & lock - > cnts ) ;
2014-02-03 13:18:49 +01:00
if ( likely ( ! ( cnts & _QW_WMASK ) ) )
return ;
/* The slowpath will decrement the reader count, if necessary. */
2017-10-12 13:20:49 +01:00
queued_read_lock_slowpath ( lock ) ;
2014-02-03 13:18:49 +01:00
}
/**
2015-06-19 11:50:00 -04:00
* queued_write_lock - acquire write lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 11:50:00 -04:00
static inline void queued_write_lock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
2018-08-20 10:19:14 -04:00
u32 cnts = 0 ;
2014-02-03 13:18:49 +01:00
/* Optimize for the unfair lock case where the fair flag is 0. */
2018-08-20 10:19:14 -04:00
if ( likely ( atomic_try_cmpxchg_acquire ( & lock - > cnts , & cnts , _QW_LOCKED ) ) )
2014-02-03 13:18:49 +01:00
return ;
2015-06-19 11:50:00 -04:00
queued_write_lock_slowpath ( lock ) ;
2014-02-03 13:18:49 +01:00
}
/**
2015-06-19 11:50:00 -04:00
* queued_read_unlock - release read lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 11:50:00 -04:00
static inline void queued_read_unlock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
/*
* Atomically decrement the reader count
*/
2015-08-06 17:54:42 +01:00
( void ) atomic_sub_return_release ( _QR_BIAS , & lock - > cnts ) ;
2014-02-03 13:18:49 +01:00
}
/**
2015-06-19 11:50:00 -04:00
* queued_write_unlock - release write lock of a queue rwlock
2014-02-03 13:18:49 +01:00
* @ lock : Pointer to queue rwlock structure
*/
2015-06-19 11:50:00 -04:00
static inline void queued_write_unlock ( struct qrwlock * lock )
2014-02-03 13:18:49 +01:00
{
2017-10-12 13:20:51 +01:00
smp_store_release ( & lock - > wlocked , 0 ) ;
2014-02-03 13:18:49 +01:00
}
/*
* Remapping rwlock architecture specific functions to the corresponding
* queue rwlock functions .
*/
2015-06-19 11:50:00 -04:00
# define arch_read_lock(l) queued_read_lock(l)
# define arch_write_lock(l) queued_write_lock(l)
# define arch_read_trylock(l) queued_read_trylock(l)
# define arch_write_trylock(l) queued_write_trylock(l)
# define arch_read_unlock(l) queued_read_unlock(l)
# define arch_write_unlock(l) queued_write_unlock(l)
2014-02-03 13:18:49 +01:00
# endif /* __ASM_GENERIC_QRWLOCK_H */