2006-03-26 13:39:06 +04:00
# ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
# define _ASM_GENERIC_BITOPS_ATOMIC_H_
# include <asm/types.h>
2012-03-28 21:30:03 +04:00
# include <linux/irqflags.h>
2006-03-26 13:39:06 +04:00
# ifdef CONFIG_SMP
# include <asm/spinlock.h>
# include <asm/cache.h> /* we use L1_CACHE_BYTES */
/* Use an array of spinlocks for our atomic_ts.
* Hash function to index into a different SPINLOCK .
* Since " a " is usually an address , use one spinlock per cacheline .
*/
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a) / L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
2009-12-02 21:49:50 +03:00
extern arch_spinlock_t __atomic_hash [ ATOMIC_HASH_SIZE ] __lock_aligned ;
2006-03-26 13:39:06 +04:00
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
# define _atomic_spin_lock_irqsave(l,f) do { \
2009-12-02 21:49:50 +03:00
arch_spinlock_t * s = ATOMIC_HASH ( l ) ; \
2006-03-26 13:39:06 +04:00
local_irq_save ( f ) ; \
2009-12-02 22:01:25 +03:00
arch_spin_lock ( s ) ; \
2006-03-26 13:39:06 +04:00
} while ( 0 )
# define _atomic_spin_unlock_irqrestore(l,f) do { \
2009-12-02 21:49:50 +03:00
arch_spinlock_t * s = ATOMIC_HASH ( l ) ; \
2009-12-02 22:01:25 +03:00
arch_spin_unlock ( s ) ; \
2006-03-26 13:39:06 +04:00
local_irq_restore ( f ) ; \
} while ( 0 )
# else
# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
# endif
/*
* NMI events can occur at any time , including when interrupts have been
* disabled by * _irqsave ( ) . So you can get NMI events occurring while a
* * _bit function is holding a spin lock . If the NMI handler also wants
* to do bit manipulation ( and they do ) then you can get a deadlock
* between the original caller of * _bit ( ) and the NMI handler .
*
* by Keith Owens
*/
/**
* set_bit - Atomically set a bit in memory
* @ nr : the bit to set
* @ addr : the address to start counting from
*
* This function is atomic and may not be reordered . See __set_bit ( )
* if you do not require the atomic guarantees .
*
* Note : there are no guarantees that this function will not be reordered
2007-05-09 09:14:03 +04:00
* on non x86 architectures , so if you are writing portable code ,
2006-03-26 13:39:06 +04:00
* make sure not to rely on its reordering guarantees .
*
* Note that @ nr may be almost arbitrarily large ; this function is not
* restricted to acting on a single - word quantity .
*/
static inline void set_bit ( int nr , volatile unsigned long * addr )
{
2007-10-19 10:40:31 +04:00
unsigned long mask = BIT_MASK ( nr ) ;
unsigned long * p = ( ( unsigned long * ) addr ) + BIT_WORD ( nr ) ;
2006-03-26 13:39:06 +04:00
unsigned long flags ;
_atomic_spin_lock_irqsave ( p , flags ) ;
* p | = mask ;
_atomic_spin_unlock_irqrestore ( p , flags ) ;
}
/**
* clear_bit - Clears a bit in memory
* @ nr : Bit to clear
* @ addr : Address to start counting from
*
* clear_bit ( ) is atomic and may not be reordered . However , it does
* not contain a memory barrier , so if it is used for locking purposes ,
2014-03-17 21:06:10 +04:00
* you should call smp_mb__before_atomic ( ) and / or smp_mb__after_atomic ( )
2006-03-26 13:39:06 +04:00
* in order to ensure changes are visible on other processors .
*/
static inline void clear_bit ( int nr , volatile unsigned long * addr )
{
2007-10-19 10:40:31 +04:00
unsigned long mask = BIT_MASK ( nr ) ;
unsigned long * p = ( ( unsigned long * ) addr ) + BIT_WORD ( nr ) ;
2006-03-26 13:39:06 +04:00
unsigned long flags ;
_atomic_spin_lock_irqsave ( p , flags ) ;
* p & = ~ mask ;
_atomic_spin_unlock_irqrestore ( p , flags ) ;
}
/**
* change_bit - Toggle a bit in memory
* @ nr : Bit to change
* @ addr : Address to start counting from
*
* change_bit ( ) is atomic and may not be reordered . It may be
* reordered on other architectures than x86 .
* Note that @ nr may be almost arbitrarily large ; this function is not
* restricted to acting on a single - word quantity .
*/
static inline void change_bit ( int nr , volatile unsigned long * addr )
{
2007-10-19 10:40:31 +04:00
unsigned long mask = BIT_MASK ( nr ) ;
unsigned long * p = ( ( unsigned long * ) addr ) + BIT_WORD ( nr ) ;
2006-03-26 13:39:06 +04:00
unsigned long flags ;
_atomic_spin_lock_irqsave ( p , flags ) ;
* p ^ = mask ;
_atomic_spin_unlock_irqrestore ( p , flags ) ;
}
/**
* test_and_set_bit - Set a bit and return its old value
* @ nr : Bit to set
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It may be reordered on other architectures than x86 .
* It also implies a memory barrier .
*/
static inline int test_and_set_bit ( int nr , volatile unsigned long * addr )
{
2007-10-19 10:40:31 +04:00
unsigned long mask = BIT_MASK ( nr ) ;
unsigned long * p = ( ( unsigned long * ) addr ) + BIT_WORD ( nr ) ;
2006-03-26 13:39:06 +04:00
unsigned long old ;
unsigned long flags ;
_atomic_spin_lock_irqsave ( p , flags ) ;
old = * p ;
* p = old | mask ;
_atomic_spin_unlock_irqrestore ( p , flags ) ;
return ( old & mask ) ! = 0 ;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @ nr : Bit to clear
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It can be reorderdered on other architectures other than x86 .
* It also implies a memory barrier .
*/
static inline int test_and_clear_bit ( int nr , volatile unsigned long * addr )
{
2007-10-19 10:40:31 +04:00
unsigned long mask = BIT_MASK ( nr ) ;
unsigned long * p = ( ( unsigned long * ) addr ) + BIT_WORD ( nr ) ;
2006-03-26 13:39:06 +04:00
unsigned long old ;
unsigned long flags ;
_atomic_spin_lock_irqsave ( p , flags ) ;
old = * p ;
* p = old & ~ mask ;
_atomic_spin_unlock_irqrestore ( p , flags ) ;
return ( old & mask ) ! = 0 ;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @ nr : Bit to change
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It also implies a memory barrier .
*/
static inline int test_and_change_bit ( int nr , volatile unsigned long * addr )
{
2007-10-19 10:40:31 +04:00
unsigned long mask = BIT_MASK ( nr ) ;
unsigned long * p = ( ( unsigned long * ) addr ) + BIT_WORD ( nr ) ;
2006-03-26 13:39:06 +04:00
unsigned long old ;
unsigned long flags ;
_atomic_spin_lock_irqsave ( p , flags ) ;
old = * p ;
* p = old ^ mask ;
_atomic_spin_unlock_irqrestore ( p , flags ) ;
return ( old & mask ) ! = 0 ;
}
# endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */