2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1994 , 95 , 96 , 97 , 98 , 99 , 2003 by Ralf Baechle
* Copyright ( C ) 1996 by Paul M . Antoine
* Copyright ( C ) 1999 Silicon Graphics
* Kevin D . Kissell , kevink @ mips . org and Carsten Langgaard , carstenl @ mips . com
* Copyright ( C ) 2000 MIPS Technologies , Inc .
*/
# ifndef _ASM_SYSTEM_H
# define _ASM_SYSTEM_H
# include <linux/types.h>
# include <asm/addrspace.h>
# include <asm/cpu-features.h>
2005-05-31 15:49:19 +04:00
# include <asm/dsp.h>
2005-04-17 02:20:36 +04:00
# include <asm/ptrace.h>
# include <asm/war.h>
# include <asm/interrupt.h>
/*
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on .
*
* No data - dependent reads from memory - like regions are ever reordered
* over this barrier . All reads preceding this primitive are guaranteed
* to access memory ( but not necessarily other CPUs ' caches ) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads . This primitive is much lighter weight than
* rmb ( ) on most CPUs , and is never heavier weight than is
* rmb ( ) .
*
* These ordering constraints are respected by both the local CPU
* and the compiler .
*
* Ordering is not guaranteed by anything other than these primitives ,
* not even by data dependencies . See the documentation for
* memory_barrier ( ) for examples and URLs to more information .
*
* For example , the following code would force ordering ( the initial
* value of " a " is zero , " b " is one , and " p " is " &a " ) :
*
* < programlisting >
* CPU 0 CPU 1
*
* b = 2 ;
* memory_barrier ( ) ;
* p = & b ; q = p ;
* read_barrier_depends ( ) ;
* d = * q ;
* < / programlisting >
*
* because the read of " *q " depends on the read of " p " and these
* two reads are separated by a read_barrier_depends ( ) . However ,
* the following code , with the same initial values for " a " and " b " :
*
* < programlisting >
* CPU 0 CPU 1
*
* a = 2 ;
* memory_barrier ( ) ;
* b = 3 ; y = b ;
* read_barrier_depends ( ) ;
* x = a ;
* < / programlisting >
*
* does not enforce ordering , since there is no data dependency between
* the read of " a " and the read of " b " . Therefore , on some CPUs , such
* as Alpha , " y " could be set to 3 and " x " to 0. Use rmb ( )
2005-08-16 20:54:12 +04:00
* in cases like this where there are no data dependencies .
2005-04-17 02:20:36 +04:00
*/
# define read_barrier_depends() do { } while(0)
# ifdef CONFIG_CPU_HAS_SYNC
# define __sync() \
__asm__ __volatile__ ( \
" .set push \n \t " \
" .set noreorder \n \t " \
" .set mips2 \n \t " \
" sync \n \t " \
" .set pop " \
: /* no output */ \
: /* no input */ \
: " memory " )
# else
# define __sync() do { } while(0)
# endif
# define __fast_iob() \
__asm__ __volatile__ ( \
" .set push \n \t " \
" .set noreorder \n \t " \
" lw $0,%0 \n \t " \
" nop \n \t " \
" .set pop " \
: /* no output */ \
: " m " ( * ( int * ) CKSEG1 ) \
: " memory " )
# define fast_wmb() __sync()
# define fast_rmb() __sync()
# define fast_mb() __sync()
# define fast_iob() \
do { \
__sync ( ) ; \
__fast_iob ( ) ; \
} while ( 0 )
# ifdef CONFIG_CPU_HAS_WB
# include <asm/wbflush.h>
# define wmb() fast_wmb()
# define rmb() fast_rmb()
# define mb() wbflush()
# define iob() wbflush()
# else /* !CONFIG_CPU_HAS_WB */
# define wmb() fast_wmb()
# define rmb() fast_rmb()
# define mb() fast_mb()
# define iob() fast_iob()
# endif /* !CONFIG_CPU_HAS_WB */
# ifdef CONFIG_SMP
# define smp_mb() mb()
# define smp_rmb() rmb()
# define smp_wmb() wmb()
# define smp_read_barrier_depends() read_barrier_depends()
# else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0)
# endif
# define set_mb(var, value) \
do { var = value ; mb ( ) ; } while ( 0 )
# define set_wmb(var, value) \
do { var = value ; wmb ( ) ; } while ( 0 )
/*
* switch_to ( n ) should switch tasks to task nr n , first
* checking that n isn ' t the current task , in which case it does nothing .
*/
extern asmlinkage void * resume ( void * last , void * next , void * next_ti ) ;
struct task_struct ;
2006-04-05 12:45:47 +04:00
# ifdef CONFIG_MIPS_MT_FPAFF
/*
* Handle the scheduler resume end of FPU affinity management . We do this
* inline to try to keep the overhead down . If we have been forced to run on
* a " CPU " with an FPU because of a previous high level of FP computation ,
* but did not actually use the FPU during the most recent time - slice ( CU1
* isn ' t set ) , we undo the restriction on cpus_allowed .
*
* We ' re not calling set_cpus_allowed ( ) here , because we have no need to
* force prompt migration - we ' re already switching the current CPU to a
* different thread .
*/
# define switch_to(prev,next,last) \
do { \
if ( cpu_has_fpu & & \
( prev - > thread . mflags & MF_FPUBOUND ) & & \
( ! ( KSTK_STATUS ( prev ) & ST0_CU1 ) ) ) { \
prev - > thread . mflags & = ~ MF_FPUBOUND ; \
prev - > cpus_allowed = prev - > thread . user_cpus_allowed ; \
} \
if ( cpu_has_dsp ) \
__save_dsp ( prev ) ; \
next - > thread . emulated_fp = 0 ; \
( last ) = resume ( prev , next , next - > thread_info ) ; \
if ( cpu_has_dsp ) \
__restore_dsp ( current ) ; \
} while ( 0 )
# else
2005-05-31 15:49:19 +04:00
# define switch_to(prev,next,last) \
do { \
if ( cpu_has_dsp ) \
__save_dsp ( prev ) ; \
2006-01-12 12:06:07 +03:00
( last ) = resume ( prev , next , task_thread_info ( next ) ) ; \
2005-05-31 15:49:19 +04:00
if ( cpu_has_dsp ) \
__restore_dsp ( current ) ; \
2005-04-17 02:20:36 +04:00
} while ( 0 )
2006-04-05 12:45:47 +04:00
# endif
2005-04-17 02:20:36 +04:00
2006-01-12 12:05:27 +03:00
/*
* On SMP systems , when the scheduler does migration - cost autodetection ,
* it needs a way to flush as much of the CPU ' s caches as possible .
*
* TODO : fill this in !
*/
static inline void sched_cacheflush ( void )
{
}
2005-04-17 02:20:36 +04:00
static inline unsigned long __xchg_u32 ( volatile int * m , unsigned int val )
{
__u32 retval ;
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: ll %0, %3 # xchg_u32 \n "
2005-06-29 17:35:19 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
" move %2, %z4 \n "
2005-06-29 17:35:19 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" sc %2, %1 \n "
" beqzl %2, 1b \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else if ( cpu_has_llsc ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: ll %0, %3 # xchg_u32 \n "
2005-06-29 17:35:19 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
" move %2, %z4 \n "
2005-06-29 17:35:19 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" sc %2, %1 \n "
" beqz %2, 1b \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else {
unsigned long flags ;
local_irq_save ( flags ) ;
retval = * m ;
* m = val ;
local_irq_restore ( flags ) ; /* implies memory barrier */
}
return retval ;
}
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
static inline __u64 __xchg_u64 ( volatile __u64 * m , __u64 val )
{
__u64 retval ;
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-14 21:35:03 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: lld %0, %3 # xchg_u64 \n "
" move %2, %z4 \n "
" scd %2, %1 \n "
" beqzl %2, 1b \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else if ( cpu_has_llsc ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-14 21:35:03 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: lld %0, %3 # xchg_u64 \n "
" move %2, %z4 \n "
" scd %2, %1 \n "
" beqz %2, 1b \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else {
unsigned long flags ;
local_irq_save ( flags ) ;
retval = * m ;
* m = val ;
local_irq_restore ( flags ) ; /* implies memory barrier */
}
return retval ;
}
# else
extern __u64 __xchg_u64_unsupported_on_32bit_kernels ( volatile __u64 * m , __u64 val ) ;
# define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
# endif
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid xchg ( ) . */
extern void __xchg_called_with_bad_pointer ( void ) ;
static inline unsigned long __xchg ( unsigned long x , volatile void * ptr , int size )
{
switch ( size ) {
2006-03-03 12:42:05 +03:00
case 4 :
return __xchg_u32 ( ptr , x ) ;
case 8 :
return __xchg_u64 ( ptr , x ) ;
2005-04-17 02:20:36 +04:00
}
__xchg_called_with_bad_pointer ( ) ;
return x ;
}
# define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
# define tas(ptr) (xchg((ptr),1))
# define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long __cmpxchg_u32 ( volatile int * m , unsigned long old ,
unsigned long new )
{
__u32 retval ;
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
__asm__ __volatile__ (
2005-06-14 21:35:03 +04:00
" .set push \n "
2005-04-17 02:20:36 +04:00
" .set noat \n "
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: ll %0, %2 # __cmpxchg_u32 \n "
" bne %0, %z3, 2f \n "
2005-08-25 20:22:09 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
" move $1, %z4 \n "
2005-08-25 20:22:09 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" sc $1, %1 \n "
" beqzl $1, 1b \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
" 2: \n "
2005-06-14 21:35:03 +04:00
" .set pop \n "
2006-02-21 21:32:14 +03:00
: " =&r " ( retval ) , " =R " ( * m )
2005-04-17 02:20:36 +04:00
: " R " ( * m ) , " Jr " ( old ) , " Jr " ( new )
: " memory " ) ;
} else if ( cpu_has_llsc ) {
__asm__ __volatile__ (
2005-06-14 21:35:03 +04:00
" .set push \n "
2005-04-17 02:20:36 +04:00
" .set noat \n "
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: ll %0, %2 # __cmpxchg_u32 \n "
" bne %0, %z3, 2f \n "
2005-08-25 20:22:09 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
" move $1, %z4 \n "
2005-08-25 20:22:09 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" sc $1, %1 \n "
" beqz $1, 1b \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
" 2: \n "
2005-06-14 21:35:03 +04:00
" .set pop \n "
2006-02-21 21:32:14 +03:00
: " =&r " ( retval ) , " =R " ( * m )
2005-04-17 02:20:36 +04:00
: " R " ( * m ) , " Jr " ( old ) , " Jr " ( new )
: " memory " ) ;
} else {
unsigned long flags ;
local_irq_save ( flags ) ;
retval = * m ;
if ( retval = = old )
* m = new ;
local_irq_restore ( flags ) ; /* implies memory barrier */
}
return retval ;
}
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
static inline unsigned long __cmpxchg_u64 ( volatile int * m , unsigned long old ,
unsigned long new )
{
__u64 retval ;
if ( cpu_has_llsc ) {
__asm__ __volatile__ (
2005-06-14 21:35:03 +04:00
" .set push \n "
2005-04-17 02:20:36 +04:00
" .set noat \n "
2005-06-14 21:35:03 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: lld %0, %2 # __cmpxchg_u64 \n "
" bne %0, %z3, 2f \n "
" move $1, %z4 \n "
" scd $1, %1 \n "
" beqzl $1, 1b \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
" 2: \n "
2005-06-14 21:35:03 +04:00
" .set pop \n "
2006-02-21 21:32:14 +03:00
: " =&r " ( retval ) , " =R " ( * m )
2005-04-17 02:20:36 +04:00
: " R " ( * m ) , " Jr " ( old ) , " Jr " ( new )
: " memory " ) ;
} else if ( cpu_has_llsc ) {
__asm__ __volatile__ (
2005-06-14 21:35:03 +04:00
" .set push \n "
2005-04-17 02:20:36 +04:00
" .set noat \n "
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: lld %0, %2 # __cmpxchg_u64 \n "
" bne %0, %z3, 2f \n "
" move $1, %z4 \n "
" scd $1, %1 \n "
" beqz $1, 1b \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
" 2: \n "
2005-06-14 21:35:03 +04:00
" .set pop \n "
2006-02-21 21:32:14 +03:00
: " =&r " ( retval ) , " =R " ( * m )
2005-04-17 02:20:36 +04:00
: " R " ( * m ) , " Jr " ( old ) , " Jr " ( new )
: " memory " ) ;
} else {
unsigned long flags ;
local_irq_save ( flags ) ;
retval = * m ;
if ( retval = = old )
* m = new ;
local_irq_restore ( flags ) ; /* implies memory barrier */
}
return retval ;
}
# else
extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels (
volatile int * m , unsigned long old , unsigned long new ) ;
# define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
# endif
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg ( ) . */
extern void __cmpxchg_called_with_bad_pointer ( void ) ;
static inline unsigned long __cmpxchg ( volatile void * ptr , unsigned long old ,
unsigned long new , int size )
{
switch ( size ) {
case 4 :
return __cmpxchg_u32 ( ptr , old , new ) ;
case 8 :
return __cmpxchg_u64 ( ptr , old , new ) ;
}
__cmpxchg_called_with_bad_pointer ( ) ;
return old ;
}
# define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
2005-07-14 19:57:16 +04:00
extern void set_handler ( unsigned long offset , void * addr , unsigned long len ) ;
extern void set_uncached_handler ( unsigned long offset , void * addr , unsigned long len ) ;
extern void * set_vi_handler ( int n , void * addr ) ;
2005-04-17 02:20:36 +04:00
extern void * set_except_vector ( int n , void * addr ) ;
2006-03-29 21:53:00 +04:00
extern unsigned long ebase ;
2005-04-17 02:20:36 +04:00
extern void per_cpu_trap_init ( void ) ;
2005-10-13 20:07:54 +04:00
extern NORET_TYPE void die ( const char * , struct pt_regs * ) ;
static inline void die_if_kernel ( const char * str , struct pt_regs * regs )
{
if ( unlikely ( ! user_mode ( regs ) ) )
die ( str , regs ) ;
}
2005-04-17 02:20:36 +04:00
extern int stop_a_enabled ;
/*
2005-06-26 01:57:23 +04:00
* See include / asm - ia64 / system . h ; prevents deadlock on SMP
2005-04-17 02:20:36 +04:00
* systems .
*/
2005-06-26 01:57:23 +04:00
# define __ARCH_WANT_UNLOCKED_CTXSW
2005-04-17 02:20:36 +04:00
# define arch_align_stack(x) (x)
# endif /* _ASM_SYSTEM_H */