2005-04-16 15:20:36 -07:00
/*
2008-08-02 10:55:55 +01:00
* arch / arm / include / asm / atomic . h
2005-04-16 15:20:36 -07:00
*
* Copyright ( C ) 1996 Russell King .
* Copyright ( C ) 2002 Deep Blue Solutions Ltd .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# ifndef __ASM_ARM_ATOMIC_H
# define __ASM_ARM_ATOMIC_H
2005-11-16 17:23:57 +00:00
# include <linux/compiler.h>
2009-01-06 14:40:39 -08:00
# include <linux/types.h>
2007-05-08 00:34:38 -07:00
# include <asm/system.h>
2005-04-16 15:20:36 -07:00
# define ATOMIC_INIT(i) { (i) }
# ifdef __KERNEL__
2009-09-18 23:27:05 +01:00
/*
* On ARM , ordinary assignment ( str instruction ) doesn ' t clear the local
* strex / ldrex monitor on some implementations . The reason we can use it for
* atomic_set ( ) is the clrex or dummy strex done on every exception return .
*/
2005-04-16 15:20:36 -07:00
# define atomic_read(v) ((v)->counter)
2009-09-18 23:27:05 +01:00
# define atomic_set(v,i) (((v)->counter) = (i))
2005-04-16 15:20:36 -07:00
# if __LINUX_ARM_ARCH__ >= 6
/*
* ARMv6 UP and SMP safe atomic ops . We use load exclusive and
* store exclusive to ensure that these are atomic . We may loop
2009-09-18 23:27:05 +01:00
* to ensure that the update happens .
2005-04-16 15:20:36 -07:00
*/
2009-05-25 20:58:00 +01:00
static inline void atomic_add ( int i , atomic_t * v )
{
unsigned long tmp ;
int result ;
__asm__ __volatile__ ( " @ atomic_add \n "
" 1: ldrex %0, [%2] \n "
" add %0, %0, %3 \n "
" strex %1, %0, [%2] \n "
" teq %1, #0 \n "
" bne 1b "
: " =&r " ( result ) , " =&r " ( tmp )
: " r " ( & v - > counter ) , " Ir " ( i )
: " cc " ) ;
}
2005-04-16 15:20:36 -07:00
static inline int atomic_add_return ( int i , atomic_t * v )
{
unsigned long tmp ;
int result ;
2009-05-25 20:58:00 +01:00
smp_mb ( ) ;
2005-04-16 15:20:36 -07:00
__asm__ __volatile__ ( " @ atomic_add_return \n "
" 1: ldrex %0, [%2] \n "
" add %0, %0, %3 \n "
" strex %1, %0, [%2] \n "
" teq %1, #0 \n "
" bne 1b "
: " =&r " ( result ) , " =&r " ( tmp )
: " r " ( & v - > counter ) , " Ir " ( i )
: " cc " ) ;
2009-05-25 20:58:00 +01:00
smp_mb ( ) ;
2005-04-16 15:20:36 -07:00
return result ;
}
2009-05-25 20:58:00 +01:00
static inline void atomic_sub ( int i , atomic_t * v )
{
unsigned long tmp ;
int result ;
__asm__ __volatile__ ( " @ atomic_sub \n "
" 1: ldrex %0, [%2] \n "
" sub %0, %0, %3 \n "
" strex %1, %0, [%2] \n "
" teq %1, #0 \n "
" bne 1b "
: " =&r " ( result ) , " =&r " ( tmp )
: " r " ( & v - > counter ) , " Ir " ( i )
: " cc " ) ;
}
2005-04-16 15:20:36 -07:00
static inline int atomic_sub_return ( int i , atomic_t * v )
{
unsigned long tmp ;
int result ;
2009-05-25 20:58:00 +01:00
smp_mb ( ) ;
2005-04-16 15:20:36 -07:00
__asm__ __volatile__ ( " @ atomic_sub_return \n "
" 1: ldrex %0, [%2] \n "
" sub %0, %0, %3 \n "
" strex %1, %0, [%2] \n "
" teq %1, #0 \n "
" bne 1b "
: " =&r " ( result ) , " =&r " ( tmp )
: " r " ( & v - > counter ) , " Ir " ( i )
: " cc " ) ;
2009-05-25 20:58:00 +01:00
smp_mb ( ) ;
2005-04-16 15:20:36 -07:00
return result ;
}
2005-11-13 16:07:24 -08:00
static inline int atomic_cmpxchg ( atomic_t * ptr , int old , int new )
{
2005-11-16 18:03:10 +00:00
unsigned long oldval , res ;
2005-11-13 16:07:24 -08:00
2009-05-25 20:58:00 +01:00
smp_mb ( ) ;
2005-11-13 16:07:24 -08:00
do {
__asm__ __volatile__ ( " @ atomic_cmpxchg \n "
" ldrex %1, [%2] \n "
2005-11-16 15:05:11 +00:00
" mov %0, #0 \n "
2005-11-13 16:07:24 -08:00
" teq %1, %3 \n "
" strexeq %0, %4, [%2] \n "
: " =&r " ( res ) , " =&r " ( oldval )
: " r " ( & ptr - > counter ) , " Ir " ( old ) , " r " ( new )
: " cc " ) ;
} while ( res ) ;
2009-05-25 20:58:00 +01:00
smp_mb ( ) ;
2005-11-13 16:07:24 -08:00
return oldval ;
}
2005-04-16 15:20:36 -07:00
static inline void atomic_clear_mask ( unsigned long mask , unsigned long * addr )
{
unsigned long tmp , tmp2 ;
__asm__ __volatile__ ( " @ atomic_clear_mask \n "
2007-03-15 16:54:27 +01:00
" 1: ldrex %0, [%2] \n "
2005-04-16 15:20:36 -07:00
" bic %0, %0, %3 \n "
2007-03-15 16:54:27 +01:00
" strex %1, %0, [%2] \n "
2005-04-16 15:20:36 -07:00
" teq %1, #0 \n "
" bne 1b "
: " =&r " ( tmp ) , " =&r " ( tmp2 )
: " r " ( addr ) , " Ir " ( mask )
: " cc " ) ;
}
# else /* ARM_ARCH_6 */
# ifdef CONFIG_SMP
# error SMP not supported on pre-ARMv6 CPUs
# endif
static inline int atomic_add_return ( int i , atomic_t * v )
{
unsigned long flags ;
int val ;
2006-09-16 10:47:18 +01:00
raw_local_irq_save ( flags ) ;
2005-04-16 15:20:36 -07:00
val = v - > counter ;
v - > counter = val + = i ;
2006-09-16 10:47:18 +01:00
raw_local_irq_restore ( flags ) ;
2005-04-16 15:20:36 -07:00
return val ;
}
2009-05-25 20:58:00 +01:00
# define atomic_add(i, v) (void) atomic_add_return(i, v)
2005-04-16 15:20:36 -07:00
static inline int atomic_sub_return ( int i , atomic_t * v )
{
unsigned long flags ;
int val ;
2006-09-16 10:47:18 +01:00
raw_local_irq_save ( flags ) ;
2005-04-16 15:20:36 -07:00
val = v - > counter ;
v - > counter = val - = i ;
2006-09-16 10:47:18 +01:00
raw_local_irq_restore ( flags ) ;
2005-04-16 15:20:36 -07:00
return val ;
}
2009-05-25 20:58:00 +01:00
# define atomic_sub(i, v) (void) atomic_sub_return(i, v)
2005-04-16 15:20:36 -07:00
2005-11-13 16:07:24 -08:00
static inline int atomic_cmpxchg ( atomic_t * v , int old , int new )
{
int ret ;
unsigned long flags ;
2006-09-16 10:47:18 +01:00
raw_local_irq_save ( flags ) ;
2005-11-13 16:07:24 -08:00
ret = v - > counter ;
if ( likely ( ret = = old ) )
v - > counter = new ;
2006-09-16 10:47:18 +01:00
raw_local_irq_restore ( flags ) ;
2005-11-13 16:07:24 -08:00
return ret ;
}
2005-04-16 15:20:36 -07:00
static inline void atomic_clear_mask ( unsigned long mask , unsigned long * addr )
{
unsigned long flags ;
2006-09-16 10:47:18 +01:00
raw_local_irq_save ( flags ) ;
2005-04-16 15:20:36 -07:00
* addr & = ~ mask ;
2006-09-16 10:47:18 +01:00
raw_local_irq_restore ( flags ) ;
2005-04-16 15:20:36 -07:00
}
# endif /* __LINUX_ARM_ARCH__ */
2006-01-09 15:59:17 -08:00
# define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2005-11-13 16:07:25 -08:00
static inline int atomic_add_unless ( atomic_t * v , int a , int u )
{
int c , old ;
c = atomic_read ( v ) ;
while ( c ! = u & & ( old = atomic_cmpxchg ( ( v ) , c , c + a ) ) ! = c )
c = old ;
return c ! = u ;
}
# define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2009-05-25 20:58:00 +01:00
# define atomic_inc(v) atomic_add(1, v)
# define atomic_dec(v) atomic_sub(1, v)
2005-04-16 15:20:36 -07:00
# define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
# define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
# define atomic_inc_return(v) (atomic_add_return(1, v))
# define atomic_dec_return(v) (atomic_sub_return(1, v))
# define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
# define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
2009-05-25 20:58:00 +01:00
# define smp_mb__before_atomic_dec() smp_mb()
# define smp_mb__after_atomic_dec() smp_mb()
# define smp_mb__before_atomic_inc() smp_mb()
# define smp_mb__after_atomic_inc() smp_mb()
2005-04-16 15:20:36 -07:00
2009-05-13 22:56:29 +00:00
# include <asm-generic/atomic-long.h>
2005-04-16 15:20:36 -07:00
# endif
# endif