2005-04-17 02:20:36 +04:00
/* system.h: FR-V CPU control definitions
*
* Copyright ( C ) 2003 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# ifndef _ASM_SYSTEM_H
# define _ASM_SYSTEM_H
2007-06-06 13:39:40 +04:00
# include <linux/types.h>
2005-04-17 02:20:36 +04:00
# include <linux/linkage.h>
2008-02-09 02:00:45 +03:00
# include <linux/kernel.h>
2005-04-17 02:20:36 +04:00
struct thread_struct ;
/*
* switch_to ( prev , next ) should switch from task ` prev ' to ` next '
* ` prev ' will never be the same as ` next ' .
* The ` mb ' is to tell GCC not to cache ` current ' across this call .
*/
extern asmlinkage
struct task_struct * __switch_to ( struct thread_struct * prev_thread ,
struct thread_struct * next_thread ,
struct task_struct * prev ) ;
# define switch_to(prev, next, last) \
do { \
( prev ) - > thread . sched_lr = \
( unsigned long ) __builtin_return_address ( 0 ) ; \
( last ) = __switch_to ( & ( prev ) - > thread , & ( next ) - > thread , ( prev ) ) ; \
mb ( ) ; \
} while ( 0 )
/*
* Force strict CPU ordering .
*/
# define nop() asm volatile ("nop"::)
# define mb() asm volatile ("membar" : : :"memory")
# define rmb() asm volatile ("membar" : : :"memory")
# define wmb() asm volatile ("membar" : : :"memory")
2008-05-14 08:35:11 +04:00
# define read_barrier_depends() do { } while (0)
2005-04-17 02:20:36 +04:00
2008-04-10 19:11:05 +04:00
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do {} while(0)
# define set_mb(var, value) \
do { var = ( value ) ; barrier ( ) ; } while ( 0 )
2005-04-17 02:20:36 +04:00
extern void die_if_kernel ( const char * , . . . ) __attribute__ ( ( format ( printf , 1 , 2 ) ) ) ;
extern void free_initmem ( void ) ;
# define arch_align_stack(x) (x)
2007-05-08 11:34:38 +04:00
/*****************************************************************************/
/*
* compare and conditionally exchange value with memory
* - if ( * ptr = = test ) then orig = * ptr ; * ptr = test ;
* - if ( * ptr ! = test ) then orig = * ptr ;
*/
FRV: Implement atomic64_t
Implement atomic64_t and its ops for FRV. Tested with the following patch:
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index 55e4fab..086d50d 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -746,6 +746,52 @@ static void __init parse_cmdline_early(char *cmdline)
} /* end parse_cmdline_early() */
+static atomic64_t xxx;
+
+static void test_atomic64(void)
+{
+ atomic64_set(&xxx, 0x12300000023LL);
+
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0x12300000023LL);
+ mb();
+ if (atomic64_inc_return(&xxx) != 0x12300000024LL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0x12300000024LL);
+ mb();
+ if (atomic64_sub_return(0x36900000050LL, &xxx) != -0x2460000002cLL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != -0x2460000002cLL);
+ mb();
+ if (atomic64_dec_return(&xxx) != -0x2460000002dLL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != -0x2460000002dLL);
+ mb();
+ if (atomic64_add_return(0x36800000001LL, &xxx) != 0x121ffffffd4LL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
+ mb();
+ if (atomic64_cmpxchg(&xxx, 0x123456789abcdefLL, 0x121ffffffd4LL) != 0x121ffffffd4LL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
+ mb();
+ if (atomic64_cmpxchg(&xxx, 0x121ffffffd4LL, 0x123456789abcdefLL) != 0x121ffffffd4LL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0x123456789abcdefLL);
+ mb();
+ if (atomic64_xchg(&xxx, 0xabcdef123456789LL) != 0x123456789abcdefLL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0xabcdef123456789LL);
+ mb();
+}
+
/*****************************************************************************/
/*
*
@@ -845,6 +891,8 @@ void __init setup_arch(char **cmdline_p)
// asm volatile("movgs %0,timerd" :: "r"(10000000));
// __set_HSR(0, __get_HSR(0) | HSR0_ETMD);
+ test_atomic64();
+
} /* end setup_arch() */
#if 0
Note that this doesn't cover all the trivial wrappers, but does cover all the
substantial implementations.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-02 03:46:16 +04:00
extern uint64_t __cmpxchg_64 ( uint64_t test , uint64_t new , volatile uint64_t * v ) ;
2007-05-08 11:34:38 +04:00
# ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
# define cmpxchg(ptr, test, new) \
( { \
__typeof__ ( ptr ) __xg_ptr = ( ptr ) ; \
__typeof__ ( * ( ptr ) ) __xg_orig , __xg_tmp ; \
__typeof__ ( * ( ptr ) ) __xg_test = ( test ) ; \
__typeof__ ( * ( ptr ) ) __xg_new = ( new ) ; \
\
switch ( sizeof ( __xg_orig ) ) { \
case 4 : \
asm volatile ( \
" 0: \n " \
" orcc gr0,gr0,gr0,icc3 \n " \
" ckeq icc3,cc7 \n " \
" ld.p %M0,%1 \n " \
" orcr cc7,cc7,cc3 \n " \
" sub%I4cc %1,%4,%2,icc0 \n " \
" bne icc0,#0,1f \n " \
" cst.p %3,%M0 ,cc3,#1 \n " \
" corcc gr29,gr29,gr0 ,cc3,#1 \n " \
" beq icc3,#0,0b \n " \
" 1: \n " \
: " +U " ( * __xg_ptr ) , " =&r " ( __xg_orig ) , " =&r " ( __xg_tmp ) \
: " r " ( __xg_new ) , " NPr " ( __xg_test ) \
: " memory " , " cc7 " , " cc3 " , " icc3 " , " icc0 " \
) ; \
break ; \
\
default : \
2008-03-29 06:08:18 +03:00
__xg_orig = ( __typeof__ ( __xg_orig ) ) 0 ; \
2007-05-08 11:34:38 +04:00
asm volatile ( " break " ) ; \
break ; \
} \
\
__xg_orig ; \
} )
# else
extern uint32_t __cmpxchg_32 ( uint32_t * v , uint32_t test , uint32_t new ) ;
# define cmpxchg(ptr, test, new) \
( { \
__typeof__ ( ptr ) __xg_ptr = ( ptr ) ; \
__typeof__ ( * ( ptr ) ) __xg_orig ; \
__typeof__ ( * ( ptr ) ) __xg_test = ( test ) ; \
__typeof__ ( * ( ptr ) ) __xg_new = ( new ) ; \
\
switch ( sizeof ( __xg_orig ) ) { \
2007-10-14 22:35:10 +04:00
case 4 : __xg_orig = ( __force __typeof__ ( * ptr ) ) \
__cmpxchg_32 ( ( __force uint32_t * ) __xg_ptr , \
( __force uint32_t ) __xg_test , \
( __force uint32_t ) __xg_new ) ; break ; \
2007-05-08 11:34:38 +04:00
default : \
2008-03-29 06:08:18 +03:00
__xg_orig = ( __typeof__ ( __xg_orig ) ) 0 ; \
2007-05-08 11:34:38 +04:00
asm volatile ( " break " ) ; \
break ; \
} \
\
__xg_orig ; \
} )
# endif
2008-02-07 11:16:14 +03:00
# include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local ( volatile void * ptr ,
unsigned long old ,
unsigned long new , int size )
{
switch ( size ) {
case 4 :
2008-02-09 02:00:45 +03:00
return cmpxchg ( ( unsigned long * ) ptr , old , new ) ;
2008-02-07 11:16:14 +03:00
default :
return __cmpxchg_local_generic ( ptr , old , new , size ) ;
}
return old ;
}
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU . Always make
* them available .
*/
# define cmpxchg_local(ptr, o, n) \
( ( __typeof__ ( * ( ptr ) ) ) __cmpxchg_local ( ( ptr ) , ( unsigned long ) ( o ) , \
( unsigned long ) ( n ) , sizeof ( * ( ptr ) ) ) )
# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2007-05-08 11:34:38 +04:00
2005-04-17 02:20:36 +04:00
# endif /* _ASM_SYSTEM_H */