2019-05-27 08:55:06 +02:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2015-04-24 14:56:30 -04:00
/*
* Queued spinlock
*
* ( C ) Copyright 2013 - 2015 Hewlett - Packard Development Company , L . P .
2015-11-09 19:09:21 -05:00
* ( C ) Copyright 2015 Hewlett - Packard Enterprise Development LP
2015-04-24 14:56:30 -04:00
*
2015-11-09 19:09:21 -05:00
* Authors : Waiman Long < waiman . long @ hpe . com >
2015-04-24 14:56:30 -04:00
*/
# ifndef __ASM_GENERIC_QSPINLOCK_H
# define __ASM_GENERIC_QSPINLOCK_H
# include <asm-generic/qspinlock_types.h>
/**
* queued_spin_is_locked - is the spinlock locked ?
* @ lock : Pointer to queued spinlock structure
* Return : 1 if it is locked , 0 otherwise
*/
static __always_inline int queued_spin_is_locked ( struct qspinlock * lock )
{
2016-05-20 18:04:36 +02:00
/*
2016-06-08 10:19:51 +02:00
* Any ! 0 state indicates it is locked , even if _Q_LOCKED_VAL
* isn ' t immediately observable .
2016-05-20 18:04:36 +02:00
*/
2016-06-08 10:19:51 +02:00
return atomic_read ( & lock - > val ) ;
2015-04-24 14:56:30 -04:00
}
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked ?
* @ lock : queued spinlock structure
* Return : 1 if it is unlocked , 0 otherwise
*
* N . B . Whenever there are tasks waiting for the lock , it is considered
* locked wrt the lockref code to avoid lock stealing by the lockref
* code and change things underneath the lock . This also allows some
* optimizations to be applied without conflict with lockref .
*/
static __always_inline int queued_spin_value_unlocked ( struct qspinlock lock )
{
return ! atomic_read ( & lock . val ) ;
}
/**
* queued_spin_is_contended - check if the lock is contended
* @ lock : Pointer to queued spinlock structure
* Return : 1 if lock contended , 0 otherwise
*/
static __always_inline int queued_spin_is_contended ( struct qspinlock * lock )
{
return atomic_read ( & lock - > val ) & ~ _Q_LOCKED_MASK ;
}
/**
* queued_spin_trylock - try to acquire the queued spinlock
* @ lock : Pointer to queued spinlock structure
* Return : 1 if lock acquired , 0 if failed
*/
static __always_inline int queued_spin_trylock ( struct qspinlock * lock )
{
2018-08-20 10:19:14 -04:00
u32 val = atomic_read ( & lock - > val ) ;
if ( unlikely ( val ) )
return 0 ;
return likely ( atomic_try_cmpxchg_acquire ( & lock - > val , & val , _Q_LOCKED_VAL ) ) ;
2015-04-24 14:56:30 -04:00
}
extern void queued_spin_lock_slowpath ( struct qspinlock * lock , u32 val ) ;
/**
* queued_spin_lock - acquire a queued spinlock
* @ lock : Pointer to queued spinlock structure
*/
static __always_inline void queued_spin_lock ( struct qspinlock * lock )
{
2018-08-20 10:19:14 -04:00
u32 val = 0 ;
2015-04-24 14:56:30 -04:00
2018-08-20 10:19:14 -04:00
if ( likely ( atomic_try_cmpxchg_acquire ( & lock - > val , & val , _Q_LOCKED_VAL ) ) )
2015-04-24 14:56:30 -04:00
return ;
2018-08-20 10:19:14 -04:00
2015-04-24 14:56:30 -04:00
queued_spin_lock_slowpath ( lock , val ) ;
}
# ifndef queued_spin_unlock
/**
* queued_spin_unlock - release a queued spinlock
* @ lock : Pointer to queued spinlock structure
*/
static __always_inline void queued_spin_unlock ( struct qspinlock * lock )
{
/*
2016-06-03 16:38:14 +08:00
* unlock ( ) needs release semantics :
2015-04-24 14:56:30 -04:00
*/
2018-04-26 11:34:24 +01:00
smp_store_release ( & lock - > locked , 0 ) ;
2015-04-24 14:56:30 -04:00
}
# endif
2015-09-04 17:25:23 +02:00
# ifndef virt_spin_lock
static __always_inline bool virt_spin_lock ( struct qspinlock * lock )
2015-04-24 14:56:36 -04:00
{
return false ;
}
# endif
2015-04-24 14:56:30 -04:00
/*
* Remapping spinlock architecture specific functions to the corresponding
* queued spinlock functions .
*/
# define arch_spin_is_locked(l) queued_spin_is_locked(l)
# define arch_spin_is_contended(l) queued_spin_is_contended(l)
# define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
# define arch_spin_lock(l) queued_spin_lock(l)
# define arch_spin_trylock(l) queued_spin_trylock(l)
# define arch_spin_unlock(l) queued_spin_unlock(l)
# endif /* __ASM_GENERIC_QSPINLOCK_H */