2005-04-16 15:20:36 -07:00
/*
* S390 version
2012-07-20 11:15:04 +02:00
* Copyright IBM Corp . 1999
2005-04-16 15:20:36 -07:00
* Author ( s ) : Martin Schwidefsky ( schwidefsky @ de . ibm . com )
*
* Derived from " include/asm-i386/spinlock.h "
*/
# ifndef __ASM_SPINLOCK_H
# define __ASM_SPINLOCK_H
2006-09-30 23:27:45 -07:00
# include <linux/smp.h>
2016-11-28 15:50:48 +01:00
# include <asm/atomic_ops.h>
2016-05-26 10:35:03 +02:00
# include <asm/barrier.h>
# include <asm/processor.h>
2006-09-30 23:27:45 -07:00
2014-04-07 18:25:23 +02:00
# define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
2011-10-30 15:17:13 +01:00
extern int spin_retry ;
2016-11-02 05:08:32 -04:00
# ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted ( int cpu ) { return false ; }
# else
bool arch_vcpu_is_preempted ( int cpu ) ;
# endif
# define vcpu_is_preempted arch_vcpu_is_preempted
2005-04-16 15:20:36 -07:00
/*
* Simple spin lock operations . There are two variants , one clears IRQ ' s
* on the local processor , one does not .
*
* We make no fairness assumptions . They have a cost .
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-10 00:25:56 -07:00
*
* ( the type definitions are in asm / spinlock_types . h )
2005-04-16 15:20:36 -07:00
*/
2016-11-28 15:50:48 +01:00
void arch_lock_relax ( int cpu ) ;
2014-09-19 14:29:31 +02:00
2014-04-07 18:25:23 +02:00
void arch_spin_lock_wait ( arch_spinlock_t * ) ;
int arch_spin_trylock_retry ( arch_spinlock_t * ) ;
void arch_spin_lock_wait_flags ( arch_spinlock_t * , unsigned long flags ) ;
2005-07-27 11:44:57 -07:00
2014-09-19 14:29:31 +02:00
static inline void arch_spin_relax ( arch_spinlock_t * lock )
{
arch_lock_relax ( lock - > lock ) ;
}
2014-04-07 18:25:23 +02:00
static inline u32 arch_spin_lockval ( int cpu )
{
return ~ cpu ;
}
2013-09-05 13:26:17 +02:00
static inline int arch_spin_value_unlocked ( arch_spinlock_t lock )
{
2014-04-07 18:25:23 +02:00
return lock . lock = = 0 ;
2013-09-05 13:26:17 +02:00
}
2014-04-07 18:25:23 +02:00
static inline int arch_spin_is_locked ( arch_spinlock_t * lp )
{
2017-02-10 12:34:49 +01:00
return READ_ONCE ( lp - > lock ) ! = 0 ;
2014-04-07 18:25:23 +02:00
}
static inline int arch_spin_trylock_once ( arch_spinlock_t * lp )
2005-04-16 15:20:36 -07:00
{
2014-05-15 11:00:44 +02:00
barrier ( ) ;
return likely ( arch_spin_value_unlocked ( * lp ) & &
2016-11-28 15:50:48 +01:00
__atomic_cmpxchg_bool ( & lp - > lock , 0 , SPINLOCK_LOCKVAL ) ) ;
2005-04-16 15:20:36 -07:00
}
2014-04-07 18:25:23 +02:00
static inline void arch_spin_lock ( arch_spinlock_t * lp )
2005-04-16 15:20:36 -07:00
{
2014-05-15 11:00:44 +02:00
if ( ! arch_spin_trylock_once ( lp ) )
2014-04-07 18:25:23 +02:00
arch_spin_lock_wait ( lp ) ;
}
2005-07-27 11:44:57 -07:00
2014-04-07 18:25:23 +02:00
static inline void arch_spin_lock_flags ( arch_spinlock_t * lp ,
unsigned long flags )
{
2014-05-15 11:00:44 +02:00
if ( ! arch_spin_trylock_once ( lp ) )
2014-04-07 18:25:23 +02:00
arch_spin_lock_wait_flags ( lp , flags ) ;
}
static inline int arch_spin_trylock ( arch_spinlock_t * lp )
{
2014-05-15 11:00:44 +02:00
if ( ! arch_spin_trylock_once ( lp ) )
2014-04-07 18:25:23 +02:00
return arch_spin_trylock_retry ( lp ) ;
return 1 ;
2005-04-16 15:20:36 -07:00
}
2009-12-02 20:01:25 +01:00
static inline void arch_spin_unlock ( arch_spinlock_t * lp )
2005-04-16 15:20:36 -07:00
{
2016-11-28 15:50:48 +01:00
typecheck ( int , lp - > lock ) ;
2014-09-08 08:20:43 +02:00
asm volatile (
2017-04-19 14:54:05 +02:00
# ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
" .long 0xb2fa0070 \n " /* NIAI 7 */
# endif
" st %1,%0 \n "
: " =Q " ( lp - > lock ) : " d " ( 0 ) : " cc " , " memory " ) ;
2005-04-16 15:20:36 -07:00
}
2014-04-07 18:25:23 +02:00
static inline void arch_spin_unlock_wait ( arch_spinlock_t * lock )
{
while ( arch_spin_is_locked ( lock ) )
arch_spin_relax ( lock ) ;
2016-05-26 10:35:03 +02:00
smp_acquire__after_ctrl_dep ( ) ;
2014-04-07 18:25:23 +02:00
}
2005-04-16 15:20:36 -07:00
/*
* Read - write spinlocks , allowing multiple readers
* but only one writer .
*
* NOTE ! it is quite common to have readers in interrupts
* but no interrupt writers . For those circumstances we
* can " mix " irq - safe locks - any writer needs to get a
* irq - safe write - lock , but readers can get non - irqsafe
* read - locks .
*/
/**
* read_can_lock - would read_trylock ( ) succeed ?
* @ lock : the rwlock in question .
*/
2009-12-03 20:08:46 +01:00
# define arch_read_can_lock(x) ((int)(x)->lock >= 0)
2005-04-16 15:20:36 -07:00
/**
* write_can_lock - would write_trylock ( ) succeed ?
* @ lock : the rwlock in question .
*/
2009-12-03 20:08:46 +01:00
# define arch_write_can_lock(x) ((x)->lock == 0)
2005-04-16 15:20:36 -07:00
2014-09-22 14:45:11 +02:00
extern int _raw_read_trylock_retry ( arch_rwlock_t * lp ) ;
2009-12-03 20:01:19 +01:00
extern int _raw_write_trylock_retry ( arch_rwlock_t * lp ) ;
2014-09-22 14:45:11 +02:00
# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
2014-05-15 11:00:44 +02:00
static inline int arch_read_trylock_once ( arch_rwlock_t * rw )
{
2016-11-28 15:50:48 +01:00
int old = ACCESS_ONCE ( rw - > lock ) ;
return likely ( old > = 0 & &
__atomic_cmpxchg_bool ( & rw - > lock , old , old + 1 ) ) ;
2014-05-15 11:00:44 +02:00
}
static inline int arch_write_trylock_once ( arch_rwlock_t * rw )
{
2016-11-28 15:50:48 +01:00
int old = ACCESS_ONCE ( rw - > lock ) ;
2014-05-15 11:00:44 +02:00
return likely ( old = = 0 & &
2016-11-28 15:50:48 +01:00
__atomic_cmpxchg_bool ( & rw - > lock , 0 , 0x80000000 ) ) ;
2014-05-15 11:00:44 +02:00
}
2014-09-22 16:34:38 +02:00
# ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
# define __RAW_OP_OR "lao"
# define __RAW_OP_AND "lan"
# define __RAW_OP_ADD "laa"
# define __RAW_LOCK(ptr, op_val, op_string) \
( { \
2016-11-28 15:50:48 +01:00
int old_val ; \
2014-09-22 16:34:38 +02:00
\
2016-11-28 15:50:48 +01:00
typecheck ( int * , ptr ) ; \
2014-09-22 16:34:38 +02:00
asm volatile ( \
op_string " %0,%2,%1 \n " \
" bcr 14,0 \n " \
: " =d " ( old_val ) , " +Q " ( * ptr ) \
: " d " ( op_val ) \
: " cc " , " memory " ) ; \
old_val ; \
} )
# define __RAW_UNLOCK(ptr, op_val, op_string) \
( { \
2016-11-28 15:50:48 +01:00
int old_val ; \
2014-09-22 16:34:38 +02:00
\
2016-11-28 15:50:48 +01:00
typecheck ( int * , ptr ) ; \
2014-09-22 16:34:38 +02:00
asm volatile ( \
op_string " %0,%2,%1 \n " \
: " =d " ( old_val ) , " +Q " ( * ptr ) \
: " d " ( op_val ) \
: " cc " , " memory " ) ; \
old_val ; \
} )
extern void _raw_read_lock_wait ( arch_rwlock_t * lp ) ;
2016-11-28 15:50:48 +01:00
extern void _raw_write_lock_wait ( arch_rwlock_t * lp , int prev ) ;
2014-09-22 16:34:38 +02:00
static inline void arch_read_lock ( arch_rwlock_t * rw )
{
2016-11-28 15:50:48 +01:00
int old ;
2014-09-22 16:34:38 +02:00
old = __RAW_LOCK ( & rw - > lock , 1 , __RAW_OP_ADD ) ;
2016-11-28 15:50:48 +01:00
if ( old < 0 )
2014-09-22 16:34:38 +02:00
_raw_read_lock_wait ( rw ) ;
}
static inline void arch_read_unlock ( arch_rwlock_t * rw )
{
__RAW_UNLOCK ( & rw - > lock , - 1 , __RAW_OP_ADD ) ;
}
static inline void arch_write_lock ( arch_rwlock_t * rw )
{
2016-11-28 15:50:48 +01:00
int old ;
2014-09-22 16:34:38 +02:00
old = __RAW_LOCK ( & rw - > lock , 0x80000000 , __RAW_OP_OR ) ;
if ( old ! = 0 )
_raw_write_lock_wait ( rw , old ) ;
rw - > owner = SPINLOCK_LOCKVAL ;
}
static inline void arch_write_unlock ( arch_rwlock_t * rw )
{
rw - > owner = 0 ;
__RAW_UNLOCK ( & rw - > lock , 0x7fffffff , __RAW_OP_AND ) ;
}
# else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
extern void _raw_read_lock_wait ( arch_rwlock_t * lp ) ;
extern void _raw_write_lock_wait ( arch_rwlock_t * lp ) ;
2009-12-03 20:08:46 +01:00
static inline void arch_read_lock ( arch_rwlock_t * rw )
2005-07-27 11:44:57 -07:00
{
2014-05-15 11:00:44 +02:00
if ( ! arch_read_trylock_once ( rw ) )
2005-07-27 11:44:57 -07:00
_raw_read_lock_wait ( rw ) ;
}
2009-12-03 20:08:46 +01:00
static inline void arch_read_unlock ( arch_rwlock_t * rw )
2005-07-27 11:44:57 -07:00
{
2016-11-28 15:50:48 +01:00
int old ;
2005-07-27 11:44:57 -07:00
do {
2014-04-07 18:25:23 +02:00
old = ACCESS_ONCE ( rw - > lock ) ;
2016-11-28 15:50:48 +01:00
} while ( ! __atomic_cmpxchg_bool ( & rw - > lock , old , old - 1 ) ) ;
2005-07-27 11:44:57 -07:00
}
2009-12-03 20:08:46 +01:00
static inline void arch_write_lock ( arch_rwlock_t * rw )
2005-07-27 11:44:57 -07:00
{
2014-05-15 11:00:44 +02:00
if ( ! arch_write_trylock_once ( rw ) )
2005-07-27 11:44:57 -07:00
_raw_write_lock_wait ( rw ) ;
2014-09-19 14:29:31 +02:00
rw - > owner = SPINLOCK_LOCKVAL ;
2005-07-27 11:44:57 -07:00
}
2009-12-03 20:08:46 +01:00
static inline void arch_write_unlock ( arch_rwlock_t * rw )
2005-07-27 11:44:57 -07:00
{
2016-11-28 15:50:48 +01:00
typecheck ( int , rw - > lock ) ;
2014-09-19 14:29:31 +02:00
rw - > owner = 0 ;
2014-09-08 08:20:43 +02:00
asm volatile (
" st %1,%0 \n "
: " +Q " ( rw - > lock )
: " d " ( 0 )
: " cc " , " memory " ) ;
2005-07-27 11:44:57 -07:00
}
2014-09-22 16:34:38 +02:00
# endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
2009-12-03 20:08:46 +01:00
static inline int arch_read_trylock ( arch_rwlock_t * rw )
2005-07-27 11:44:57 -07:00
{
2014-05-15 11:00:44 +02:00
if ( ! arch_read_trylock_once ( rw ) )
return _raw_read_trylock_retry ( rw ) ;
return 1 ;
2005-07-27 11:44:57 -07:00
}
2009-12-03 20:08:46 +01:00
static inline int arch_write_trylock ( arch_rwlock_t * rw )
2005-04-16 15:20:36 -07:00
{
2014-09-19 14:29:31 +02:00
if ( ! arch_write_trylock_once ( rw ) & & ! _raw_write_trylock_retry ( rw ) )
return 0 ;
rw - > owner = SPINLOCK_LOCKVAL ;
2014-05-15 11:00:44 +02:00
return 1 ;
2005-04-16 15:20:36 -07:00
}
2014-09-19 14:29:31 +02:00
static inline void arch_read_relax ( arch_rwlock_t * rw )
{
arch_lock_relax ( rw - > owner ) ;
}
static inline void arch_write_relax ( arch_rwlock_t * rw )
{
arch_lock_relax ( rw - > owner ) ;
}
2006-09-30 23:27:43 -07:00
2005-04-16 15:20:36 -07:00
# endif /* __ASM_SPINLOCK_H */