2009-06-13 01:10:05 +04:00
/*
* Generic implementation of 64 - bit atomics using spinlocks ,
* useful on processors that don ' t have 64 - bit atomic instructions .
*
* Copyright © 2009 Paul Mackerras , IBM Corp . < paulus @ au1 . ibm . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/types.h>
# include <linux/cache.h>
# include <linux/spinlock.h>
# include <linux/init.h>
2009-07-30 02:04:02 +04:00
# include <linux/module.h>
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2009-06-13 01:10:05 +04:00
/*
* We use a hashed array of spinlocks to provide exclusive access
* to each atomic64_t variable . Since this is expected to used on
* systems with small numbers of CPUs ( < = 4 or so ) , we use a
* relatively small array of 16 spinlocks to avoid wasting too much
* memory on the spinlock array .
*/
# define NR_LOCKS 16
/*
* Ensure each lock is in a separate cacheline .
*/
static union {
2011-09-01 07:32:03 +04:00
raw_spinlock_t lock ;
2009-06-13 01:10:05 +04:00
char pad [ L1_CACHE_BYTES ] ;
} atomic64_lock [ NR_LOCKS ] __cacheline_aligned_in_smp ;
2011-09-14 11:49:24 +04:00
static inline raw_spinlock_t * lock_addr ( const atomic64_t * v )
2009-06-13 01:10:05 +04:00
{
unsigned long addr = ( unsigned long ) v ;
addr > > = L1_CACHE_SHIFT ;
addr ^ = ( addr > > 8 ) ^ ( addr > > 16 ) ;
return & atomic64_lock [ addr & ( NR_LOCKS - 1 ) ] . lock ;
}
long long atomic64_read ( const atomic64_t * v )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_read ) ;
2009-06-13 01:10:05 +04:00
void atomic64_set ( atomic64_t * v , long long i )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
v - > counter = i ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_set ) ;
2009-06-13 01:10:05 +04:00
void atomic64_add ( long long a , atomic64_t * v )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
v - > counter + = a ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_add ) ;
2009-06-13 01:10:05 +04:00
long long atomic64_add_return ( long long a , atomic64_t * v )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter + = a ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_add_return ) ;
2009-06-13 01:10:05 +04:00
void atomic64_sub ( long long a , atomic64_t * v )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
v - > counter - = a ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_sub ) ;
2009-06-13 01:10:05 +04:00
long long atomic64_sub_return ( long long a , atomic64_t * v )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter - = a ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_sub_return ) ;
2009-06-13 01:10:05 +04:00
long long atomic64_dec_if_positive ( atomic64_t * v )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter - 1 ;
if ( val > = 0 )
v - > counter = val ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_dec_if_positive ) ;
2009-06-13 01:10:05 +04:00
long long atomic64_cmpxchg ( atomic64_t * v , long long o , long long n )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
if ( val = = o )
v - > counter = n ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_cmpxchg ) ;
2009-06-13 01:10:05 +04:00
long long atomic64_xchg ( atomic64_t * v , long long new )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2009-06-13 01:10:05 +04:00
long long val ;
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
val = v - > counter ;
v - > counter = new ;
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return val ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_xchg ) ;
2009-06-13 01:10:05 +04:00
int atomic64_add_unless ( atomic64_t * v , long long a , long long u )
{
unsigned long flags ;
2011-09-14 11:49:24 +04:00
raw_spinlock_t * lock = lock_addr ( v ) ;
2010-03-01 21:55:47 +03:00
int ret = 0 ;
2009-06-13 01:10:05 +04:00
2011-09-01 07:32:03 +04:00
raw_spin_lock_irqsave ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
if ( v - > counter ! = u ) {
v - > counter + = a ;
2010-03-01 21:55:47 +03:00
ret = 1 ;
2009-06-13 01:10:05 +04:00
}
2011-09-01 07:32:03 +04:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2009-06-13 01:10:05 +04:00
return ret ;
}
2009-07-30 02:04:02 +04:00
EXPORT_SYMBOL ( atomic64_add_unless ) ;
2009-06-13 01:10:05 +04:00
static int init_atomic64_lock ( void )
{
int i ;
for ( i = 0 ; i < NR_LOCKS ; + + i )
2011-09-01 07:32:03 +04:00
raw_spin_lock_init ( & atomic64_lock [ i ] . lock ) ;
2009-06-13 01:10:05 +04:00
return 0 ;
}
pure_initcall ( init_atomic64_lock ) ;