2005-04-17 02:20:36 +04:00
/*
2008-08-02 13:55:55 +04:00
* arch / arm / include / asm / atomic . h
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 1996 Russell King .
* Copyright ( C ) 2002 Deep Blue Solutions Ltd .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# ifndef __ASM_ARM_ATOMIC_H
# define __ASM_ARM_ATOMIC_H
2005-11-16 20:23:57 +03:00
# include <linux/compiler.h>
2007-05-08 11:34:38 +04:00
# include <asm/system.h>
2005-04-17 02:20:36 +04:00
typedef struct { volatile int counter ; } atomic_t ;
# define ATOMIC_INIT(i) { (i) }
# ifdef __KERNEL__
# define atomic_read(v) ((v)->counter)
# if __LINUX_ARM_ARCH__ >= 6
/*
* ARMv6 UP and SMP safe atomic ops . We use load exclusive and
* store exclusive to ensure that these are atomic . We may loop
* to ensure that the update happens . Writing to ' v - > counter '
* without using the following operations WILL break the atomic
* nature of these ops .
*/
static inline void atomic_set ( atomic_t * v , int i )
{
unsigned long tmp ;
__asm__ __volatile__ ( " @ atomic_set \n "
" 1: ldrex %0, [%1] \n "
" strex %0, %2, [%1] \n "
" teq %0, #0 \n "
" bne 1b "
: " =&r " ( tmp )
: " r " ( & v - > counter ) , " r " ( i )
: " cc " ) ;
}
static inline int atomic_add_return ( int i , atomic_t * v )
{
unsigned long tmp ;
int result ;
__asm__ __volatile__ ( " @ atomic_add_return \n "
" 1: ldrex %0, [%2] \n "
" add %0, %0, %3 \n "
" strex %1, %0, [%2] \n "
" teq %1, #0 \n "
" bne 1b "
: " =&r " ( result ) , " =&r " ( tmp )
: " r " ( & v - > counter ) , " Ir " ( i )
: " cc " ) ;
return result ;
}
static inline int atomic_sub_return ( int i , atomic_t * v )
{
unsigned long tmp ;
int result ;
__asm__ __volatile__ ( " @ atomic_sub_return \n "
" 1: ldrex %0, [%2] \n "
" sub %0, %0, %3 \n "
" strex %1, %0, [%2] \n "
" teq %1, #0 \n "
" bne 1b "
: " =&r " ( result ) , " =&r " ( tmp )
: " r " ( & v - > counter ) , " Ir " ( i )
: " cc " ) ;
return result ;
}
2005-11-14 03:07:24 +03:00
static inline int atomic_cmpxchg ( atomic_t * ptr , int old , int new )
{
2005-11-16 21:03:10 +03:00
unsigned long oldval , res ;
2005-11-14 03:07:24 +03:00
do {
__asm__ __volatile__ ( " @ atomic_cmpxchg \n "
" ldrex %1, [%2] \n "
2005-11-16 18:05:11 +03:00
" mov %0, #0 \n "
2005-11-14 03:07:24 +03:00
" teq %1, %3 \n "
" strexeq %0, %4, [%2] \n "
: " =&r " ( res ) , " =&r " ( oldval )
: " r " ( & ptr - > counter ) , " Ir " ( old ) , " r " ( new )
: " cc " ) ;
} while ( res ) ;
return oldval ;
}
2005-04-17 02:20:36 +04:00
static inline void atomic_clear_mask ( unsigned long mask , unsigned long * addr )
{
unsigned long tmp , tmp2 ;
__asm__ __volatile__ ( " @ atomic_clear_mask \n "
2007-03-15 18:54:27 +03:00
" 1: ldrex %0, [%2] \n "
2005-04-17 02:20:36 +04:00
" bic %0, %0, %3 \n "
2007-03-15 18:54:27 +03:00
" strex %1, %0, [%2] \n "
2005-04-17 02:20:36 +04:00
" teq %1, #0 \n "
" bne 1b "
: " =&r " ( tmp ) , " =&r " ( tmp2 )
: " r " ( addr ) , " Ir " ( mask )
: " cc " ) ;
}
# else /* ARM_ARCH_6 */
# include <asm/system.h>
# ifdef CONFIG_SMP
# error SMP not supported on pre-ARMv6 CPUs
# endif
# define atomic_set(v,i) (((v)->counter) = (i))
static inline int atomic_add_return ( int i , atomic_t * v )
{
unsigned long flags ;
int val ;
2006-09-16 13:47:18 +04:00
raw_local_irq_save ( flags ) ;
2005-04-17 02:20:36 +04:00
val = v - > counter ;
v - > counter = val + = i ;
2006-09-16 13:47:18 +04:00
raw_local_irq_restore ( flags ) ;
2005-04-17 02:20:36 +04:00
return val ;
}
static inline int atomic_sub_return ( int i , atomic_t * v )
{
unsigned long flags ;
int val ;
2006-09-16 13:47:18 +04:00
raw_local_irq_save ( flags ) ;
2005-04-17 02:20:36 +04:00
val = v - > counter ;
v - > counter = val - = i ;
2006-09-16 13:47:18 +04:00
raw_local_irq_restore ( flags ) ;
2005-04-17 02:20:36 +04:00
return val ;
}
2005-11-14 03:07:24 +03:00
static inline int atomic_cmpxchg ( atomic_t * v , int old , int new )
{
int ret ;
unsigned long flags ;
2006-09-16 13:47:18 +04:00
raw_local_irq_save ( flags ) ;
2005-11-14 03:07:24 +03:00
ret = v - > counter ;
if ( likely ( ret = = old ) )
v - > counter = new ;
2006-09-16 13:47:18 +04:00
raw_local_irq_restore ( flags ) ;
2005-11-14 03:07:24 +03:00
return ret ;
}
2005-04-17 02:20:36 +04:00
static inline void atomic_clear_mask ( unsigned long mask , unsigned long * addr )
{
unsigned long flags ;
2006-09-16 13:47:18 +04:00
raw_local_irq_save ( flags ) ;
2005-04-17 02:20:36 +04:00
* addr & = ~ mask ;
2006-09-16 13:47:18 +04:00
raw_local_irq_restore ( flags ) ;
2005-04-17 02:20:36 +04:00
}
# endif /* __LINUX_ARM_ARCH__ */
2006-01-10 02:59:17 +03:00
# define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2005-11-14 03:07:25 +03:00
static inline int atomic_add_unless ( atomic_t * v , int a , int u )
{
int c , old ;
c = atomic_read ( v ) ;
while ( c ! = u & & ( old = atomic_cmpxchg ( ( v ) , c , c + a ) ) ! = c )
c = old ;
return c ! = u ;
}
# define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2005-04-17 02:20:36 +04:00
# define atomic_add(i, v) (void) atomic_add_return(i, v)
# define atomic_inc(v) (void) atomic_add_return(1, v)
# define atomic_sub(i, v) (void) atomic_sub_return(i, v)
# define atomic_dec(v) (void) atomic_sub_return(1, v)
# define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
# define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
# define atomic_inc_return(v) (atomic_add_return(1, v))
# define atomic_dec_return(v) (atomic_sub_return(1, v))
# define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
# define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
/* Atomic operations are already serializing on ARM */
# define smp_mb__before_atomic_dec() barrier()
# define smp_mb__after_atomic_dec() barrier()
# define smp_mb__before_atomic_inc() barrier()
# define smp_mb__after_atomic_inc() barrier()
2006-01-06 11:11:20 +03:00
# include <asm-generic/atomic.h>
2005-04-17 02:20:36 +04:00
# endif
# endif