2009-06-13 01:10:05 +04:00
/*
* Generic implementation of 64 - bit atomics using spinlocks ,
* useful on processors that don ' t have 64 - bit atomic instructions .
*
* Copyright © 2009 Paul Mackerras , IBM Corp . < paulus @ au1 . ibm . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/types.h>
# include <linux/cache.h>
# include <linux/spinlock.h>
# include <linux/init.h>
2011-11-17 06:29:17 +04:00
# include <linux/export.h>
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2009-06-13 01:10:05 +04:00
/*
* We use a hashed array of spinlocks to provide exclusive access
* to each atomic64_t variable . Since this is expected to used on
* systems with small numbers of CPUs ( < = 4 or so ) , we use a
* relatively small array of 16 spinlocks to avoid wasting too much
* memory on the spinlock array .
*/
# define NR_LOCKS 16
/*
* Ensure each lock is in a separate cacheline .
*/
static union {
2011-09-01 07:32:03 +04:00
raw_spinlock_t lock ;
2009-06-13 01:10:05 +04:00
char pad [ L1_CACHE_BYTES ] ;
2012-12-20 11:39:48 +04:00
} atomic64_lock [ NR_LOCKS ] __cacheline_aligned_in_smp = {
[ 0 . . . ( NR_LOCKS - 1 ) ] = {
. lock = __RAW_SPIN_LOCK_UNLOCKED ( atomic64_lock . lock ) ,
} ,
} ;
2009-06-13 01:10:05 +04:00
2011-09-14 11:49:24 +04:00
static inline raw_spinlock_t * lock_addr ( const atomic64_t * v )
2009-06-13 01:10:05 +04:00
{
unsigned long addr = ( unsigned long ) v ;
addr > > = L1_CACHE_SHIFT ;
addr ^ = ( addr > > 8 ) ^ ( addr > > 16 ) ;
return & atomic64_lock [ addr & ( NR_LOCKS - 1 ) ] . lock ;
}
long long atomic64_read ( const atomic64_t * v )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_read ) ;
2009-06-13 01:10:05 +04:00
void atomic64_set ( atomic64_t * v , long long i )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
v - > counter = i ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_set ) ;
2009-06-13 01:10:05 +04:00
2014-04-23 18:12:30 +04:00
# define ATOMIC64_OP(op, c_op) \
void atomic64_ # # op ( long long a , atomic64_t * v ) \
{ \
unsigned long flags ; \
raw_spinlock_t * lock = lock_addr ( v ) ; \
\
raw_spin_lock_irqsave ( lock , flags ) ; \
v - > counter c_op a ; \
raw_spin_unlock_irqrestore ( lock , flags ) ; \
} \
EXPORT_SYMBOL ( atomic64_ # # op ) ;
# define ATOMIC64_OP_RETURN(op, c_op) \
long long atomic64_ # # op # # _return ( long long a , atomic64_t * v ) \
{ \
unsigned long flags ; \
raw_spinlock_t * lock = lock_addr ( v ) ; \
long long val ; \
\
raw_spin_lock_irqsave ( lock , flags ) ; \
val = ( v - > counter c_op a ) ; \
raw_spin_unlock_irqrestore ( lock , flags ) ; \
return val ; \
} \
EXPORT_SYMBOL ( atomic64_ # # op # # _return ) ;
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Now that all the architectures have implemented support for these new
atomic primitives add on the generic infrastructure to expose and use
it.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 01:54:38 +03:00
# define ATOMIC64_FETCH_OP(op, c_op) \
long long atomic64_fetch_ # # op ( long long a , atomic64_t * v ) \
{ \
unsigned long flags ; \
raw_spinlock_t * lock = lock_addr ( v ) ; \
long long val ; \
\
raw_spin_lock_irqsave ( lock , flags ) ; \
val = v - > counter ; \
v - > counter c_op a ; \
raw_spin_unlock_irqrestore ( lock , flags ) ; \
return val ; \
} \
EXPORT_SYMBOL ( atomic64_fetch_ # # op ) ;
2014-04-23 18:12:30 +04:00
# define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP ( op , c_op ) \
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Now that all the architectures have implemented support for these new
atomic primitives add on the generic infrastructure to expose and use
it.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 01:54:38 +03:00
ATOMIC64_OP_RETURN ( op , c_op ) \
ATOMIC64_FETCH_OP ( op , c_op )
2014-04-23 18:12:30 +04:00
ATOMIC64_OPS ( add , + = )
ATOMIC64_OPS ( sub , - = )
# undef ATOMIC64_OPS
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Now that all the architectures have implemented support for these new
atomic primitives add on the generic infrastructure to expose and use
it.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 01:54:38 +03:00
# define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP ( op , c_op ) \
ATOMIC64_OP_RETURN ( op , c_op ) \
ATOMIC64_FETCH_OP ( op , c_op )
ATOMIC64_OPS ( and , & = )
ATOMIC64_OPS ( or , | = )
ATOMIC64_OPS ( xor , ^ = )
# undef ATOMIC64_OPS
# undef ATOMIC64_FETCH_OP
2014-04-23 18:12:30 +04:00
# undef ATOMIC64_OP_RETURN
# undef ATOMIC64_OP
2009-06-13 01:10:05 +04:00
long long atomic64_dec_if_positive ( atomic64_t * v )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter - 1 ;
if ( val > = 0 )
v - > counter = val ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_dec_if_positive ) ;
2009-06-13 01:10:05 +04:00
long long atomic64_cmpxchg ( atomic64_t * v , long long o , long long n )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
if ( val = = o )
v - > counter = n ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_cmpxchg ) ;
2009-06-13 01:10:05 +04:00
long long atomic64_xchg ( atomic64_t * v , long long new )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
v - > counter = new ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_xchg ) ;
2009-06-13 01:10:05 +04:00
2018-06-21 15:13:11 +03:00
long long atomic64_fetch_add_unless ( atomic64_t * v , long long a , long long u )
2009-06-13 01:10:05 +04:00
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2018-06-21 15:13:11 +03:00
long long val ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2018-06-21 15:13:11 +03:00
val = v - > counter ;
if ( val ! = u )
2009-06-13 01:10:05 +04:00
v - > counter + = a ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2018-06-21 15:13:11 +03:00
return val ;
2009-06-13 01:10:05 +04:00
}
2018-06-21 15:13:11 +03:00
EXPORT_SYMBOL ( atomic64_fetch_add_unless ) ;