2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
2006-10-31 03:45:07 +00:00
* Copyright ( C ) 1994 , 95 , 96 , 97 , 98 , 99 , 2003 , 06 by Ralf Baechle
2005-04-16 15:20:36 -07:00
* Copyright ( C ) 1996 by Paul M . Antoine
* Copyright ( C ) 1999 Silicon Graphics
* Kevin D . Kissell , kevink @ mips . org and Carsten Langgaard , carstenl @ mips . com
* Copyright ( C ) 2000 MIPS Technologies , Inc .
*/
# ifndef _ASM_SYSTEM_H
# define _ASM_SYSTEM_H
# include <linux/types.h>
2006-07-07 14:07:18 +01:00
# include <linux/irqflags.h>
2005-04-16 15:20:36 -07:00
# include <asm/addrspace.h>
2006-10-31 03:45:07 +00:00
# include <asm/barrier.h>
2005-04-16 15:20:36 -07:00
# include <asm/cpu-features.h>
2005-05-31 11:49:19 +00:00
# include <asm/dsp.h>
2005-04-16 15:20:36 -07:00
# include <asm/war.h>
/*
* switch_to ( n ) should switch tasks to task nr n , first
* checking that n isn ' t the current task , in which case it does nothing .
*/
extern asmlinkage void * resume ( void * last , void * next , void * next_ti ) ;
struct task_struct ;
2006-04-05 09:45:47 +01:00
# ifdef CONFIG_MIPS_MT_FPAFF
/*
* Handle the scheduler resume end of FPU affinity management . We do this
* inline to try to keep the overhead down . If we have been forced to run on
* a " CPU " with an FPU because of a previous high level of FP computation ,
* but did not actually use the FPU during the most recent time - slice ( CU1
* isn ' t set ) , we undo the restriction on cpus_allowed .
*
* We ' re not calling set_cpus_allowed ( ) here , because we have no need to
* force prompt migration - we ' re already switching the current CPU to a
* different thread .
*/
# define switch_to(prev,next,last) \
do { \
if ( cpu_has_fpu & & \
( prev - > thread . mflags & MF_FPUBOUND ) & & \
( ! ( KSTK_STATUS ( prev ) & ST0_CU1 ) ) ) { \
prev - > thread . mflags & = ~ MF_FPUBOUND ; \
prev - > cpus_allowed = prev - > thread . user_cpus_allowed ; \
} \
if ( cpu_has_dsp ) \
__save_dsp ( prev ) ; \
next - > thread . emulated_fp = 0 ; \
( last ) = resume ( prev , next , next - > thread_info ) ; \
if ( cpu_has_dsp ) \
__restore_dsp ( current ) ; \
} while ( 0 )
# else
2005-05-31 11:49:19 +00:00
# define switch_to(prev,next,last) \
do { \
if ( cpu_has_dsp ) \
__save_dsp ( prev ) ; \
2006-01-12 01:06:07 -08:00
( last ) = resume ( prev , next , task_thread_info ( next ) ) ; \
2005-05-31 11:49:19 +00:00
if ( cpu_has_dsp ) \
__restore_dsp ( current ) ; \
2005-04-16 15:20:36 -07:00
} while ( 0 )
2006-04-05 09:45:47 +01:00
# endif
2005-04-16 15:20:36 -07:00
2006-01-12 01:05:27 -08:00
/*
* On SMP systems , when the scheduler does migration - cost autodetection ,
* it needs a way to flush as much of the CPU ' s caches as possible .
*
* TODO : fill this in !
*/
static inline void sched_cacheflush ( void )
{
}
2005-04-16 15:20:36 -07:00
static inline unsigned long __xchg_u32 ( volatile int * m , unsigned int val )
{
__u32 retval ;
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: ll %0, %3 # xchg_u32 \n "
2005-06-29 13:35:19 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
" move %2, %z4 \n "
2005-06-29 13:35:19 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" sc %2, %1 \n "
" beqzl %2, 1b \n "
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else if ( cpu_has_llsc ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: ll %0, %3 # xchg_u32 \n "
2005-06-29 13:35:19 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
" move %2, %z4 \n "
2005-06-29 13:35:19 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" sc %2, %1 \n "
2006-09-28 01:45:21 +01:00
" beqz %2, 2f \n "
" .subsection 2 \n "
" 2: b 1b \n "
" .previous \n "
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else {
unsigned long flags ;
local_irq_save ( flags ) ;
retval = * m ;
* m = val ;
local_irq_restore ( flags ) ; /* implies memory barrier */
}
2006-10-31 03:45:07 +00:00
smp_mb ( ) ;
2005-04-16 15:20:36 -07:00
return retval ;
}
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
static inline __u64 __xchg_u64 ( volatile __u64 * m , __u64 val )
{
__u64 retval ;
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-14 17:35:03 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: lld %0, %3 # xchg_u64 \n "
" move %2, %z4 \n "
" scd %2, %1 \n "
" beqzl %2, 1b \n "
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else if ( cpu_has_llsc ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-14 17:35:03 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: lld %0, %3 # xchg_u64 \n "
" move %2, %z4 \n "
" scd %2, %1 \n "
2006-09-28 01:45:21 +01:00
" beqz %2, 2f \n "
" .subsection 2 \n "
" 2: b 1b \n "
" .previous \n "
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else {
unsigned long flags ;
local_irq_save ( flags ) ;
retval = * m ;
* m = val ;
local_irq_restore ( flags ) ; /* implies memory barrier */
}
2006-10-31 03:45:07 +00:00
smp_mb ( ) ;
2005-04-16 15:20:36 -07:00
return retval ;
}
# else
extern __u64 __xchg_u64_unsupported_on_32bit_kernels ( volatile __u64 * m , __u64 val ) ;
# define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
# endif
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid xchg ( ) . */
extern void __xchg_called_with_bad_pointer ( void ) ;
static inline unsigned long __xchg ( unsigned long x , volatile void * ptr , int size )
{
switch ( size ) {
2006-03-03 09:42:05 +00:00
case 4 :
return __xchg_u32 ( ptr , x ) ;
case 8 :
return __xchg_u64 ( ptr , x ) ;
2005-04-16 15:20:36 -07:00
}
__xchg_called_with_bad_pointer ( ) ;
return x ;
}
# define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
# define tas(ptr) (xchg((ptr),1))
# define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long __cmpxchg_u32 ( volatile int * m , unsigned long old ,
unsigned long new )
{
__u32 retval ;
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
__asm__ __volatile__ (
2005-06-14 17:35:03 +00:00
" .set push \n "
2005-04-16 15:20:36 -07:00
" .set noat \n "
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: ll %0, %2 # __cmpxchg_u32 \n "
" bne %0, %z3, 2f \n "
2005-08-25 16:22:09 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
" move $1, %z4 \n "
2005-08-25 16:22:09 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" sc $1, %1 \n "
" beqzl $1, 1b \n "
" 2: \n "
2005-06-14 17:35:03 +00:00
" .set pop \n "
2006-02-21 18:32:14 +00:00
: " =&r " ( retval ) , " =R " ( * m )
2005-04-16 15:20:36 -07:00
: " R " ( * m ) , " Jr " ( old ) , " Jr " ( new )
: " memory " ) ;
} else if ( cpu_has_llsc ) {
__asm__ __volatile__ (
2005-06-14 17:35:03 +00:00
" .set push \n "
2005-04-16 15:20:36 -07:00
" .set noat \n "
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: ll %0, %2 # __cmpxchg_u32 \n "
" bne %0, %z3, 2f \n "
2005-08-25 16:22:09 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
" move $1, %z4 \n "
2005-08-25 16:22:09 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" sc $1, %1 \n "
2006-09-28 01:45:21 +01:00
" beqz $1, 3f \n "
2005-04-16 15:20:36 -07:00
" 2: \n "
2006-09-28 01:45:21 +01:00
" .subsection 2 \n "
" 3: b 1b \n "
" .previous \n "
2005-06-14 17:35:03 +00:00
" .set pop \n "
2006-02-21 18:32:14 +00:00
: " =&r " ( retval ) , " =R " ( * m )
2005-04-16 15:20:36 -07:00
: " R " ( * m ) , " Jr " ( old ) , " Jr " ( new )
: " memory " ) ;
} else {
unsigned long flags ;
local_irq_save ( flags ) ;
retval = * m ;
if ( retval = = old )
* m = new ;
local_irq_restore ( flags ) ; /* implies memory barrier */
}
2006-10-31 03:45:07 +00:00
smp_mb ( ) ;
2005-04-16 15:20:36 -07:00
return retval ;
}
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
static inline unsigned long __cmpxchg_u64 ( volatile int * m , unsigned long old ,
unsigned long new )
{
__u64 retval ;
2006-10-13 11:32:50 +01:00
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
2005-04-16 15:20:36 -07:00
__asm__ __volatile__ (
2005-06-14 17:35:03 +00:00
" .set push \n "
2005-04-16 15:20:36 -07:00
" .set noat \n "
2005-06-14 17:35:03 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: lld %0, %2 # __cmpxchg_u64 \n "
" bne %0, %z3, 2f \n "
" move $1, %z4 \n "
" scd $1, %1 \n "
" beqzl $1, 1b \n "
" 2: \n "
2005-06-14 17:35:03 +00:00
" .set pop \n "
2006-02-21 18:32:14 +00:00
: " =&r " ( retval ) , " =R " ( * m )
2005-04-16 15:20:36 -07:00
: " R " ( * m ) , " Jr " ( old ) , " Jr " ( new )
: " memory " ) ;
} else if ( cpu_has_llsc ) {
__asm__ __volatile__ (
2005-06-14 17:35:03 +00:00
" .set push \n "
2005-04-16 15:20:36 -07:00
" .set noat \n "
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: lld %0, %2 # __cmpxchg_u64 \n "
" bne %0, %z3, 2f \n "
" move $1, %z4 \n "
" scd $1, %1 \n "
2006-09-28 01:45:21 +01:00
" beqz $1, 3f \n "
2005-04-16 15:20:36 -07:00
" 2: \n "
2006-09-28 01:45:21 +01:00
" .subsection 2 \n "
" 3: b 1b \n "
" .previous \n "
2005-06-14 17:35:03 +00:00
" .set pop \n "
2006-02-21 18:32:14 +00:00
: " =&r " ( retval ) , " =R " ( * m )
2005-04-16 15:20:36 -07:00
: " R " ( * m ) , " Jr " ( old ) , " Jr " ( new )
: " memory " ) ;
} else {
unsigned long flags ;
local_irq_save ( flags ) ;
retval = * m ;
if ( retval = = old )
* m = new ;
local_irq_restore ( flags ) ; /* implies memory barrier */
}
2006-10-31 03:45:07 +00:00
smp_mb ( ) ;
2005-04-16 15:20:36 -07:00
return retval ;
}
# else
extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels (
volatile int * m , unsigned long old , unsigned long new ) ;
# define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
# endif
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg ( ) . */
extern void __cmpxchg_called_with_bad_pointer ( void ) ;
static inline unsigned long __cmpxchg ( volatile void * ptr , unsigned long old ,
unsigned long new , int size )
{
switch ( size ) {
case 4 :
return __cmpxchg_u32 ( ptr , old , new ) ;
case 8 :
return __cmpxchg_u64 ( ptr , old , new ) ;
}
__cmpxchg_called_with_bad_pointer ( ) ;
return old ;
}
# define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
2005-07-14 15:57:16 +00:00
extern void set_handler ( unsigned long offset , void * addr , unsigned long len ) ;
extern void set_uncached_handler ( unsigned long offset , void * addr , unsigned long len ) ;
extern void * set_vi_handler ( int n , void * addr ) ;
2005-04-16 15:20:36 -07:00
extern void * set_except_vector ( int n , void * addr ) ;
2006-03-29 18:53:00 +01:00
extern unsigned long ebase ;
2005-04-16 15:20:36 -07:00
extern void per_cpu_trap_init ( void ) ;
extern int stop_a_enabled ;
/*
2005-06-25 14:57:23 -07:00
* See include / asm - ia64 / system . h ; prevents deadlock on SMP
2005-04-16 15:20:36 -07:00
* systems .
*/
2005-06-25 14:57:23 -07:00
# define __ARCH_WANT_UNLOCKED_CTXSW
2005-04-16 15:20:36 -07:00
# define arch_align_stack(x) (x)
# endif /* _ASM_SYSTEM_H */