2005-04-17 02:20:36 +04:00
/*
* atomic32 . c : 32 - bit atomic_t implementation
*
* Copyright ( C ) 2004 Keith M Wesolowski
2007-05-29 13:51:13 +04:00
* Copyright ( C ) 2007 Kyle McMartin
2005-04-17 02:20:36 +04:00
*
* Based on asm - parisc / atomic . h Copyright ( C ) 2000 Philipp Rumpf
*/
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2005-04-17 02:20:36 +04:00
# include <linux/spinlock.h>
# include <linux/module.h>
# ifdef CONFIG_SMP
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
spinlock_t __atomic_hash [ ATOMIC_HASH_SIZE ] = {
2011-01-23 17:19:12 +03:00
[ 0 . . . ( ATOMIC_HASH_SIZE - 1 ) ] = __SPIN_LOCK_UNLOCKED ( __atomic_hash )
2005-04-17 02:20:36 +04:00
} ;
# else /* SMP */
2005-09-10 00:10:41 +04:00
static DEFINE_SPINLOCK ( dummy ) ;
2005-04-17 02:20:36 +04:00
# define ATOMIC_HASH_SIZE 1
# define ATOMIC_HASH(a) (&dummy)
# endif /* SMP */
int __atomic_add_return ( int i , atomic_t * v )
{
int ret ;
unsigned long flags ;
spin_lock_irqsave ( ATOMIC_HASH ( v ) , flags ) ;
ret = ( v - > counter + = i ) ;
spin_unlock_irqrestore ( ATOMIC_HASH ( v ) , flags ) ;
return ret ;
}
2005-11-14 03:07:24 +03:00
EXPORT_SYMBOL ( __atomic_add_return ) ;
2005-04-17 02:20:36 +04:00
2005-11-14 03:07:24 +03:00
int atomic_cmpxchg ( atomic_t * v , int old , int new )
2005-04-17 02:20:36 +04:00
{
2005-11-14 03:07:24 +03:00
int ret ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
2005-11-14 03:07:24 +03:00
spin_lock_irqsave ( ATOMIC_HASH ( v ) , flags ) ;
ret = v - > counter ;
if ( likely ( ret = = old ) )
v - > counter = new ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( ATOMIC_HASH ( v ) , flags ) ;
2005-11-14 03:07:24 +03:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2007-03-27 06:10:43 +04:00
EXPORT_SYMBOL ( atomic_cmpxchg ) ;
2005-04-17 02:20:36 +04:00
2011-07-27 23:49:44 +04:00
int __atomic_add_unless ( atomic_t * v , int a , int u )
2005-11-14 03:07:25 +03:00
{
int ret ;
unsigned long flags ;
spin_lock_irqsave ( ATOMIC_HASH ( v ) , flags ) ;
ret = v - > counter ;
if ( ret ! = u )
v - > counter + = a ;
spin_unlock_irqrestore ( ATOMIC_HASH ( v ) , flags ) ;
2011-08-04 13:47:40 +04:00
return ret ;
2005-11-14 03:07:25 +03:00
}
2011-07-27 23:49:44 +04:00
EXPORT_SYMBOL ( __atomic_add_unless ) ;
2005-11-14 03:07:25 +03:00
/* Atomic operations are already serializing */
2005-11-14 03:07:24 +03:00
void atomic_set ( atomic_t * v , int i )
{
unsigned long flags ;
2005-04-17 02:20:36 +04:00
2005-11-14 03:07:24 +03:00
spin_lock_irqsave ( ATOMIC_HASH ( v ) , flags ) ;
v - > counter = i ;
spin_unlock_irqrestore ( ATOMIC_HASH ( v ) , flags ) ;
}
EXPORT_SYMBOL ( atomic_set ) ;
2006-12-18 03:18:47 +03:00
unsigned long ___set_bit ( unsigned long * addr , unsigned long mask )
{
unsigned long old , flags ;
spin_lock_irqsave ( ATOMIC_HASH ( addr ) , flags ) ;
old = * addr ;
* addr = old | mask ;
spin_unlock_irqrestore ( ATOMIC_HASH ( addr ) , flags ) ;
return old & mask ;
}
EXPORT_SYMBOL ( ___set_bit ) ;
unsigned long ___clear_bit ( unsigned long * addr , unsigned long mask )
{
unsigned long old , flags ;
spin_lock_irqsave ( ATOMIC_HASH ( addr ) , flags ) ;
old = * addr ;
* addr = old & ~ mask ;
spin_unlock_irqrestore ( ATOMIC_HASH ( addr ) , flags ) ;
return old & mask ;
}
EXPORT_SYMBOL ( ___clear_bit ) ;
unsigned long ___change_bit ( unsigned long * addr , unsigned long mask )
{
unsigned long old , flags ;
spin_lock_irqsave ( ATOMIC_HASH ( addr ) , flags ) ;
old = * addr ;
* addr = old ^ mask ;
spin_unlock_irqrestore ( ATOMIC_HASH ( addr ) , flags ) ;
return old & mask ;
}
EXPORT_SYMBOL ( ___change_bit ) ;
2007-05-29 13:51:13 +04:00
unsigned long __cmpxchg_u32 ( volatile u32 * ptr , u32 old , u32 new )
{
unsigned long flags ;
u32 prev ;
2007-05-31 12:19:24 +04:00
spin_lock_irqsave ( ATOMIC_HASH ( ptr ) , flags ) ;
2007-05-29 13:51:13 +04:00
if ( ( prev = * ptr ) = = old )
* ptr = new ;
2007-05-31 12:19:24 +04:00
spin_unlock_irqrestore ( ATOMIC_HASH ( ptr ) , flags ) ;
2007-05-29 13:51:13 +04:00
return ( unsigned long ) prev ;
}
EXPORT_SYMBOL ( __cmpxchg_u32 ) ;