2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2009-06-13 01:10:05 +04:00
/*
* Generic implementation of 64 - bit atomics using spinlocks ,
* useful on processors that don ' t have 64 - bit atomic instructions .
*
* Copyright © 2009 Paul Mackerras , IBM Corp . < paulus @ au1 . ibm . com >
*/
# include <linux/types.h>
# include <linux/cache.h>
# include <linux/spinlock.h>
# include <linux/init.h>
2011-11-17 06:29:17 +04:00
# include <linux/export.h>
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2009-06-13 01:10:05 +04:00
/*
* We use a hashed array of spinlocks to provide exclusive access
* to each atomic64_t variable . Since this is expected to used on
* systems with small numbers of CPUs ( < = 4 or so ) , we use a
* relatively small array of 16 spinlocks to avoid wasting too much
* memory on the spinlock array .
*/
# define NR_LOCKS 16
/*
* Ensure each lock is in a separate cacheline .
*/
static union {
2011-09-01 07:32:03 +04:00
raw_spinlock_t lock ;
2009-06-13 01:10:05 +04:00
char pad [ L1_CACHE_BYTES ] ;
2012-12-20 11:39:48 +04:00
} atomic64_lock [ NR_LOCKS ] __cacheline_aligned_in_smp = {
[ 0 . . . ( NR_LOCKS - 1 ) ] = {
. lock = __RAW_SPIN_LOCK_UNLOCKED ( atomic64_lock . lock ) ,
} ,
} ;
2009-06-13 01:10:05 +04:00
2011-09-14 11:49:24 +04:00
static inline raw_spinlock_t * lock_addr ( const atomic64_t * v )
2009-06-13 01:10:05 +04:00
{
unsigned long addr = ( unsigned long ) v ;
addr > > = L1_CACHE_SHIFT ;
addr ^ = ( addr > > 8 ) ^ ( addr > > 16 ) ;
return & atomic64_lock [ addr & ( NR_LOCKS - 1 ) ] . lock ;
}
2021-05-25 17:02:09 +03:00
s64 generic_atomic64_read ( const atomic64_t * v )
2009-06-13 01:10:05 +04:00
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2019-05-22 16:22:35 +03:00
s64 val ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2021-05-25 17:02:09 +03:00
EXPORT_SYMBOL ( generic_atomic64_read ) ;
2009-06-13 01:10:05 +04:00
2021-05-25 17:02:09 +03:00
void generic_atomic64_set ( atomic64_t * v , s64 i )
2009-06-13 01:10:05 +04:00
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
v - > counter = i ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
}
2021-05-25 17:02:09 +03:00
EXPORT_SYMBOL ( generic_atomic64_set ) ;
2009-06-13 01:10:05 +04:00
2014-04-23 18:12:30 +04:00
# define ATOMIC64_OP(op, c_op) \
2021-05-25 17:02:09 +03:00
void generic_atomic64_ # # op ( s64 a , atomic64_t * v ) \
2014-04-23 18:12:30 +04:00
{ \
unsigned long flags ; \
raw_spinlock_t * lock = lock_addr ( v ) ; \
\
raw_spin_lock_irqsave ( lock , flags ) ; \
v - > counter c_op a ; \
raw_spin_unlock_irqrestore ( lock , flags ) ; \
} \
2021-05-25 17:02:09 +03:00
EXPORT_SYMBOL ( generic_atomic64_ # # op ) ;
2014-04-23 18:12:30 +04:00
# define ATOMIC64_OP_RETURN(op, c_op) \
2021-05-25 17:02:09 +03:00
s64 generic_atomic64_ # # op # # _return ( s64 a , atomic64_t * v ) \
2014-04-23 18:12:30 +04:00
{ \
unsigned long flags ; \
raw_spinlock_t * lock = lock_addr ( v ) ; \
2019-05-22 16:22:35 +03:00
s64 val ; \
2014-04-23 18:12:30 +04:00
\
raw_spin_lock_irqsave ( lock , flags ) ; \
val = ( v - > counter c_op a ) ; \
raw_spin_unlock_irqrestore ( lock , flags ) ; \
return val ; \
} \
2021-05-25 17:02:09 +03:00
EXPORT_SYMBOL ( generic_atomic64_ # # op # # _return ) ;
2014-04-23 18:12:30 +04:00
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Now that all the architectures have implemented support for these new
atomic primitives add on the generic infrastructure to expose and use
it.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 01:54:38 +03:00
# define ATOMIC64_FETCH_OP(op, c_op) \
2021-05-25 17:02:09 +03:00
s64 generic_atomic64_fetch_ # # op ( s64 a , atomic64_t * v ) \
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Now that all the architectures have implemented support for these new
atomic primitives add on the generic infrastructure to expose and use
it.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 01:54:38 +03:00
{ \
unsigned long flags ; \
raw_spinlock_t * lock = lock_addr ( v ) ; \
2019-05-22 16:22:35 +03:00
s64 val ; \
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Now that all the architectures have implemented support for these new
atomic primitives add on the generic infrastructure to expose and use
it.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 01:54:38 +03:00
\
raw_spin_lock_irqsave ( lock , flags ) ; \
val = v - > counter ; \
v - > counter c_op a ; \
raw_spin_unlock_irqrestore ( lock , flags ) ; \
return val ; \
} \
2021-05-25 17:02:09 +03:00
EXPORT_SYMBOL ( generic_atomic64_fetch_ # # op ) ;
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Now that all the architectures have implemented support for these new
atomic primitives add on the generic infrastructure to expose and use
it.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 01:54:38 +03:00
2014-04-23 18:12:30 +04:00
# define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP ( op , c_op ) \
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Now that all the architectures have implemented support for these new
atomic primitives add on the generic infrastructure to expose and use
it.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 01:54:38 +03:00
ATOMIC64_OP_RETURN ( op , c_op ) \
ATOMIC64_FETCH_OP ( op , c_op )
2014-04-23 18:12:30 +04:00
ATOMIC64_OPS ( add , + = )
ATOMIC64_OPS ( sub , - = )
# undef ATOMIC64_OPS
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Now that all the architectures have implemented support for these new
atomic primitives add on the generic infrastructure to expose and use
it.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 01:54:38 +03:00
# define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP ( op , c_op ) \
ATOMIC64_FETCH_OP ( op , c_op )
ATOMIC64_OPS ( and , & = )
ATOMIC64_OPS ( or , | = )
ATOMIC64_OPS ( xor , ^ = )
# undef ATOMIC64_OPS
# undef ATOMIC64_FETCH_OP
2014-04-23 18:12:30 +04:00
# undef ATOMIC64_OP
2009-06-13 01:10:05 +04:00
2021-05-25 17:02:09 +03:00
s64 generic_atomic64_dec_if_positive ( atomic64_t * v )
2009-06-13 01:10:05 +04:00
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2019-05-22 16:22:35 +03:00
s64 val ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter - 1 ;
if ( val > = 0 )
v - > counter = val ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2021-05-25 17:02:09 +03:00
EXPORT_SYMBOL ( generic_atomic64_dec_if_positive ) ;
2009-06-13 01:10:05 +04:00
2021-05-25 17:02:09 +03:00
s64 generic_atomic64_cmpxchg ( atomic64_t * v , s64 o , s64 n )
2009-06-13 01:10:05 +04:00
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2019-05-22 16:22:35 +03:00
s64 val ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
if ( val = = o )
v - > counter = n ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2021-05-25 17:02:09 +03:00
EXPORT_SYMBOL ( generic_atomic64_cmpxchg ) ;
2009-06-13 01:10:05 +04:00
2021-05-25 17:02:09 +03:00
s64 generic_atomic64_xchg ( atomic64_t * v , s64 new )
2009-06-13 01:10:05 +04:00
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2019-05-22 16:22:35 +03:00
s64 val ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
v - > counter = new ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2021-05-25 17:02:09 +03:00
EXPORT_SYMBOL ( generic_atomic64_xchg ) ;
2009-06-13 01:10:05 +04:00
2021-05-25 17:02:09 +03:00
s64 generic_atomic64_fetch_add_unless ( atomic64_t * v , s64 a , s64 u )
2009-06-13 01:10:05 +04:00
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2019-05-22 16:22:35 +03:00
s64 val ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2018-06-21 15:13:11 +03:00
val = v - > counter ;
if ( val ! = u )
2009-06-13 01:10:05 +04:00
v - > counter + = a ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2018-06-21 15:13:11 +03:00
return val ;
2009-06-13 01:10:05 +04:00
}
2021-05-25 17:02:09 +03:00
EXPORT_SYMBOL ( generic_atomic64_fetch_add_unless ) ;