2015-04-24 21:56:30 +03:00
/*
* Queued spinlock
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* ( C ) Copyright 2013 - 2015 Hewlett - Packard Development Company , L . P .
2015-11-10 03:09:21 +03:00
* ( C ) Copyright 2015 Hewlett - Packard Enterprise Development LP
2015-04-24 21:56:30 +03:00
*
2015-11-10 03:09:21 +03:00
* Authors : Waiman Long < waiman . long @ hpe . com >
2015-04-24 21:56:30 +03:00
*/
# ifndef __ASM_GENERIC_QSPINLOCK_H
# define __ASM_GENERIC_QSPINLOCK_H
# include <asm-generic/qspinlock_types.h>
2016-06-08 11:19:51 +03:00
/**
* queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
* @ lock : Pointer to queued spinlock structure
*
* There is a very slight possibility of live - lock if the lockers keep coming
* and the waiter is just unfortunate enough to not see any unlock state .
*/
# ifndef queued_spin_unlock_wait
extern void queued_spin_unlock_wait ( struct qspinlock * lock ) ;
# endif
2015-04-24 21:56:30 +03:00
/**
* queued_spin_is_locked - is the spinlock locked ?
* @ lock : Pointer to queued spinlock structure
* Return : 1 if it is locked , 0 otherwise
*/
2016-06-08 11:19:51 +03:00
# ifndef queued_spin_is_locked
2015-04-24 21:56:30 +03:00
static __always_inline int queued_spin_is_locked ( struct qspinlock * lock )
{
2016-05-20 19:04:36 +03:00
/*
2016-06-08 11:19:51 +03:00
* See queued_spin_unlock_wait ( ) .
2016-05-20 19:04:36 +03:00
*
2016-06-08 11:19:51 +03:00
* Any ! 0 state indicates it is locked , even if _Q_LOCKED_VAL
* isn ' t immediately observable .
2016-05-20 19:04:36 +03:00
*/
2016-06-08 11:19:51 +03:00
return atomic_read ( & lock - > val ) ;
2015-04-24 21:56:30 +03:00
}
2016-06-08 11:19:51 +03:00
# endif
2015-04-24 21:56:30 +03:00
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked ?
* @ lock : queued spinlock structure
* Return : 1 if it is unlocked , 0 otherwise
*
* N . B . Whenever there are tasks waiting for the lock , it is considered
* locked wrt the lockref code to avoid lock stealing by the lockref
* code and change things underneath the lock . This also allows some
* optimizations to be applied without conflict with lockref .
*/
static __always_inline int queued_spin_value_unlocked ( struct qspinlock lock )
{
return ! atomic_read ( & lock . val ) ;
}
/**
* queued_spin_is_contended - check if the lock is contended
* @ lock : Pointer to queued spinlock structure
* Return : 1 if lock contended , 0 otherwise
*/
static __always_inline int queued_spin_is_contended ( struct qspinlock * lock )
{
return atomic_read ( & lock - > val ) & ~ _Q_LOCKED_MASK ;
}
/**
* queued_spin_trylock - try to acquire the queued spinlock
* @ lock : Pointer to queued spinlock structure
* Return : 1 if lock acquired , 0 if failed
*/
static __always_inline int queued_spin_trylock ( struct qspinlock * lock )
{
if ( ! atomic_read ( & lock - > val ) & &
2015-11-10 03:09:21 +03:00
( atomic_cmpxchg_acquire ( & lock - > val , 0 , _Q_LOCKED_VAL ) = = 0 ) )
2015-04-24 21:56:30 +03:00
return 1 ;
return 0 ;
}
extern void queued_spin_lock_slowpath ( struct qspinlock * lock , u32 val ) ;
/**
* queued_spin_lock - acquire a queued spinlock
* @ lock : Pointer to queued spinlock structure
*/
static __always_inline void queued_spin_lock ( struct qspinlock * lock )
{
u32 val ;
2015-11-10 03:09:21 +03:00
val = atomic_cmpxchg_acquire ( & lock - > val , 0 , _Q_LOCKED_VAL ) ;
2015-04-24 21:56:30 +03:00
if ( likely ( val = = 0 ) )
return ;
queued_spin_lock_slowpath ( lock , val ) ;
}
# ifndef queued_spin_unlock
/**
* queued_spin_unlock - release a queued spinlock
* @ lock : Pointer to queued spinlock structure
*/
static __always_inline void queued_spin_unlock ( struct qspinlock * lock )
{
/*
* smp_mb__before_atomic ( ) in order to guarantee release semantics
*/
2015-11-10 03:09:21 +03:00
smp_mb__before_atomic ( ) ;
2015-04-24 21:56:30 +03:00
atomic_sub ( _Q_LOCKED_VAL , & lock - > val ) ;
}
# endif
2015-09-04 18:25:23 +03:00
# ifndef virt_spin_lock
static __always_inline bool virt_spin_lock ( struct qspinlock * lock )
2015-04-24 21:56:36 +03:00
{
return false ;
}
# endif
2015-04-24 21:56:30 +03:00
/*
* Remapping spinlock architecture specific functions to the corresponding
* queued spinlock functions .
*/
# define arch_spin_is_locked(l) queued_spin_is_locked(l)
# define arch_spin_is_contended(l) queued_spin_is_contended(l)
# define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
# define arch_spin_lock(l) queued_spin_lock(l)
# define arch_spin_trylock(l) queued_spin_trylock(l)
# define arch_spin_unlock(l) queued_spin_unlock(l)
# define arch_spin_lock_flags(l, f) queued_spin_lock(l)
# define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
# endif /* __ASM_GENERIC_QSPINLOCK_H */