2005-04-16 15:20:36 -07:00
/*
* bitops . c : atomic operations which got too long to be inlined all over
* the place .
*
* Copyright 1999 Philipp Rumpf ( prumpf @ tux . org )
* Copyright 2000 Grant Grundler ( grundler @ cup . hp . com )
*/
# include <linux/kernel.h>
# include <linux/spinlock.h>
2011-07-26 16:09:06 -07:00
# include <linux/atomic.h>
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
2009-12-02 19:49:50 +01:00
arch_spinlock_t __atomic_hash [ ATOMIC_HASH_SIZE ] __lock_aligned = {
2009-12-03 12:38:57 +01:00
[ 0 . . . ( ATOMIC_HASH_SIZE - 1 ) ] = __ARCH_SPIN_LOCK_UNLOCKED
2005-04-16 15:20:36 -07:00
} ;
# endif
2007-01-28 14:58:52 +01:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
unsigned long __xchg64 ( unsigned long x , unsigned long * ptr )
{
unsigned long temp , flags ;
_atomic_spin_lock_irqsave ( ptr , flags ) ;
temp = * ptr ;
* ptr = x ;
_atomic_spin_unlock_irqrestore ( ptr , flags ) ;
return temp ;
}
# endif
unsigned long __xchg32 ( int x , int * ptr )
{
unsigned long flags ;
long temp ;
_atomic_spin_lock_irqsave ( ptr , flags ) ;
temp = ( long ) * ptr ; /* XXX - sign extension wanted? */
* ptr = x ;
_atomic_spin_unlock_irqrestore ( ptr , flags ) ;
return ( unsigned long ) temp ;
}
unsigned long __xchg8 ( char x , char * ptr )
{
unsigned long flags ;
long temp ;
_atomic_spin_lock_irqsave ( ptr , flags ) ;
temp = ( long ) * ptr ; /* XXX - sign extension wanted? */
* ptr = x ;
_atomic_spin_unlock_irqrestore ( ptr , flags ) ;
return ( unsigned long ) temp ;
}
2007-01-28 14:58:52 +01:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
unsigned long __cmpxchg_u64 ( volatile unsigned long * ptr , unsigned long old , unsigned long new )
{
unsigned long flags ;
unsigned long prev ;
_atomic_spin_lock_irqsave ( ptr , flags ) ;
if ( ( prev = * ptr ) = = old )
* ptr = new ;
_atomic_spin_unlock_irqrestore ( ptr , flags ) ;
return prev ;
}
# endif
unsigned long __cmpxchg_u32 ( volatile unsigned int * ptr , unsigned int old , unsigned int new )
{
unsigned long flags ;
unsigned int prev ;
_atomic_spin_lock_irqsave ( ptr , flags ) ;
if ( ( prev = * ptr ) = = old )
* ptr = new ;
_atomic_spin_unlock_irqrestore ( ptr , flags ) ;
return ( unsigned long ) prev ;
}