2009-06-13 01:10:05 +04:00
/*
* Generic implementation of 64 - bit atomics using spinlocks ,
* useful on processors that don ' t have 64 - bit atomic instructions .
*
* Copyright © 2009 Paul Mackerras , IBM Corp . < paulus @ au1 . ibm . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/types.h>
# include <linux/cache.h>
# include <linux/spinlock.h>
# include <linux/init.h>
2011-11-17 06:29:17 +04:00
# include <linux/export.h>
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2009-06-13 01:10:05 +04:00
/*
* We use a hashed array of spinlocks to provide exclusive access
* to each atomic64_t variable . Since this is expected to used on
* systems with small numbers of CPUs ( < = 4 or so ) , we use a
* relatively small array of 16 spinlocks to avoid wasting too much
* memory on the spinlock array .
*/
# define NR_LOCKS 16
/*
* Ensure each lock is in a separate cacheline .
*/
static union {
2011-09-01 07:32:03 +04:00
raw_spinlock_t lock ;
2009-06-13 01:10:05 +04:00
char pad [ L1_CACHE_BYTES ] ;
2012-12-20 11:39:48 +04:00
} atomic64_lock [ NR_LOCKS ] __cacheline_aligned_in_smp = {
[ 0 . . . ( NR_LOCKS - 1 ) ] = {
. lock = __RAW_SPIN_LOCK_UNLOCKED ( atomic64_lock . lock ) ,
} ,
} ;
2009-06-13 01:10:05 +04:00
2011-09-14 11:49:24 +04:00
static inline raw_spinlock_t * lock_addr ( const atomic64_t * v )
2009-06-13 01:10:05 +04:00
{
unsigned long addr = ( unsigned long ) v ;
addr > > = L1_CACHE_SHIFT ;
addr ^ = ( addr > > 8 ) ^ ( addr > > 16 ) ;
return & atomic64_lock [ addr & ( NR_LOCKS - 1 ) ] . lock ;
}
long long atomic64_read ( const atomic64_t * v )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_read ) ;
2009-06-13 01:10:05 +04:00
void atomic64_set ( atomic64_t * v , long long i )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
v - > counter = i ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_set ) ;
2009-06-13 01:10:05 +04:00
2014-04-23 18:12:30 +04:00
# define ATOMIC64_OP(op, c_op) \
void atomic64_ # # op ( long long a , atomic64_t * v ) \
{ \
unsigned long flags ; \
raw_spinlock_t * lock = lock_addr ( v ) ; \
\
raw_spin_lock_irqsave ( lock , flags ) ; \
v - > counter c_op a ; \
raw_spin_unlock_irqrestore ( lock , flags ) ; \
} \
EXPORT_SYMBOL ( atomic64_ # # op ) ;
# define ATOMIC64_OP_RETURN(op, c_op) \
long long atomic64_ # # op # # _return ( long long a , atomic64_t * v ) \
{ \
unsigned long flags ; \
raw_spinlock_t * lock = lock_addr ( v ) ; \
long long val ; \
\
raw_spin_lock_irqsave ( lock , flags ) ; \
val = ( v - > counter c_op a ) ; \
raw_spin_unlock_irqrestore ( lock , flags ) ; \
return val ; \
} \
EXPORT_SYMBOL ( atomic64_ # # op # # _return ) ;
# define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP ( op , c_op ) \
ATOMIC64_OP_RETURN ( op , c_op )
ATOMIC64_OPS ( add , + = )
ATOMIC64_OPS ( sub , - = )
2014-04-23 21:32:50 +04:00
ATOMIC64_OP ( and , & = )
ATOMIC64_OP ( or , | = )
ATOMIC64_OP ( xor , ^ = )
2014-04-23 18:12:30 +04:00
# undef ATOMIC64_OPS
# undef ATOMIC64_OP_RETURN
# undef ATOMIC64_OP
2009-06-13 01:10:05 +04:00
long long atomic64_dec_if_positive ( atomic64_t * v )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter - 1 ;
if ( val > = 0 )
v - > counter = val ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_dec_if_positive ) ;
2009-06-13 01:10:05 +04:00
long long atomic64_cmpxchg ( atomic64_t * v , long long o , long long n )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
if ( val = = o )
v - > counter = n ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_cmpxchg ) ;
2009-06-13 01:10:05 +04:00
long long atomic64_xchg ( atomic64_t * v , long long new )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
v - > counter = new ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_xchg ) ;
2009-06-13 01:10:05 +04:00
int atomic64_add_unless ( atomic64_t * v , long long a , long long u )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2010-03-01 21:55:47 +03:00
int ret = 0 ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
if ( v - > counter ! = u ) {
v - > counter + = a ;
2010-03-01 21:55:47 +03:00
ret = 1 ;
2009-06-13 01:10:05 +04:00
}
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return ret ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_add_unless ) ;