2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
2006-10-31 06:45:07 +03:00
* Copyright ( C ) 1994 , 95 , 96 , 97 , 98 , 99 , 2003 , 06 by Ralf Baechle
2005-04-17 02:20:36 +04:00
* Copyright ( C ) 1996 by Paul M . Antoine
* Copyright ( C ) 1999 Silicon Graphics
* Kevin D . Kissell , kevink @ mips . org and Carsten Langgaard , carstenl @ mips . com
* Copyright ( C ) 2000 MIPS Technologies , Inc .
*/
# ifndef _ASM_SYSTEM_H
# define _ASM_SYSTEM_H
# include <linux/types.h>
2006-07-07 17:07:18 +04:00
# include <linux/irqflags.h>
2005-04-17 02:20:36 +04:00
# include <asm/addrspace.h>
2006-10-31 06:45:07 +03:00
# include <asm/barrier.h>
2007-10-01 07:15:00 +04:00
# include <asm/cmpxchg.h>
2005-04-17 02:20:36 +04:00
# include <asm/cpu-features.h>
2005-05-31 15:49:19 +04:00
# include <asm/dsp.h>
2005-04-17 02:20:36 +04:00
# include <asm/war.h>
/*
* switch_to ( n ) should switch tasks to task nr n , first
* checking that n isn ' t the current task , in which case it does nothing .
*/
extern asmlinkage void * resume ( void * last , void * next , void * next_ti ) ;
struct task_struct ;
2006-04-05 12:45:47 +04:00
# ifdef CONFIG_MIPS_MT_FPAFF
/*
* Handle the scheduler resume end of FPU affinity management . We do this
* inline to try to keep the overhead down . If we have been forced to run on
* a " CPU " with an FPU because of a previous high level of FP computation ,
* but did not actually use the FPU during the most recent time - slice ( CU1
* isn ' t set ) , we undo the restriction on cpus_allowed .
*
* We ' re not calling set_cpus_allowed ( ) here , because we have no need to
* force prompt migration - we ' re already switching the current CPU to a
* different thread .
*/
2007-07-10 20:33:02 +04:00
# define __mips_mt_fpaff_switch_to(prev) \
2006-04-05 12:45:47 +04:00
do { \
2007-07-25 19:19:33 +04:00
struct thread_info * __prev_ti = task_thread_info ( prev ) ; \
\
2006-04-05 12:45:47 +04:00
if ( cpu_has_fpu & & \
2007-07-25 19:19:33 +04:00
test_ti_thread_flag ( __prev_ti , TIF_FPUBOUND ) & & \
( ! ( KSTK_STATUS ( prev ) & ST0_CU1 ) ) ) { \
clear_ti_thread_flag ( __prev_ti , TIF_FPUBOUND ) ; \
2006-04-05 12:45:47 +04:00
prev - > cpus_allowed = prev - > thread . user_cpus_allowed ; \
} \
next - > thread . emulated_fp = 0 ; \
} while ( 0 )
# else
2007-07-10 11:59:17 +04:00
# define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
2007-07-10 20:33:02 +04:00
# endif
2007-10-12 02:46:15 +04:00
# define switch_to(prev, next, last) \
2005-05-31 15:49:19 +04:00
do { \
2007-07-10 20:33:02 +04:00
__mips_mt_fpaff_switch_to ( prev ) ; \
2005-05-31 15:49:19 +04:00
if ( cpu_has_dsp ) \
__save_dsp ( prev ) ; \
2006-01-12 12:06:07 +03:00
( last ) = resume ( prev , next , task_thread_info ( next ) ) ; \
2007-10-30 20:25:26 +03:00
} while ( 0 )
# define finish_arch_switch(prev) \
do { \
2005-05-31 15:49:19 +04:00
if ( cpu_has_dsp ) \
__restore_dsp ( current ) ; \
2007-07-10 20:33:02 +04:00
if ( cpu_has_userlocal ) \
2007-10-30 20:25:26 +03:00
write_c0_userlocal ( current_thread_info ( ) - > tp_value ) ; \
} while ( 0 )
2005-04-17 02:20:36 +04:00
static inline unsigned long __xchg_u32 ( volatile int * m , unsigned int val )
{
__u32 retval ;
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: ll %0, %3 # xchg_u32 \n "
2005-06-29 17:35:19 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
" move %2, %z4 \n "
2005-06-29 17:35:19 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" sc %2, %1 \n "
" beqzl %2, 1b \n "
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else if ( cpu_has_llsc ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: ll %0, %3 # xchg_u32 \n "
2005-06-29 17:35:19 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
" move %2, %z4 \n "
2005-06-29 17:35:19 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" sc %2, %1 \n "
2006-09-28 04:45:21 +04:00
" beqz %2, 2f \n "
" .subsection 2 \n "
" 2: b 1b \n "
" .previous \n "
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else {
unsigned long flags ;
2007-03-16 19:10:36 +03:00
raw_local_irq_save ( flags ) ;
2005-04-17 02:20:36 +04:00
retval = * m ;
* m = val ;
2007-03-16 19:10:36 +03:00
raw_local_irq_restore ( flags ) ; /* implies memory barrier */
2005-04-17 02:20:36 +04:00
}
2007-07-14 16:24:05 +04:00
smp_llsc_mb ( ) ;
2006-10-31 06:45:07 +03:00
2005-04-17 02:20:36 +04:00
return retval ;
}
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
static inline __u64 __xchg_u64 ( volatile __u64 * m , __u64 val )
{
__u64 retval ;
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-14 21:35:03 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: lld %0, %3 # xchg_u64 \n "
" move %2, %z4 \n "
" scd %2, %1 \n "
" beqzl %2, 1b \n "
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else if ( cpu_has_llsc ) {
unsigned long dummy ;
__asm__ __volatile__ (
2005-06-14 21:35:03 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: lld %0, %3 # xchg_u64 \n "
" move %2, %z4 \n "
" scd %2, %1 \n "
2006-09-28 04:45:21 +04:00
" beqz %2, 2f \n "
" .subsection 2 \n "
" 2: b 1b \n "
" .previous \n "
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
: " =&r " ( retval ) , " =m " ( * m ) , " =&r " ( dummy )
: " R " ( * m ) , " Jr " ( val )
: " memory " ) ;
} else {
unsigned long flags ;
2007-03-16 19:10:36 +03:00
raw_local_irq_save ( flags ) ;
2005-04-17 02:20:36 +04:00
retval = * m ;
* m = val ;
2007-03-16 19:10:36 +03:00
raw_local_irq_restore ( flags ) ; /* implies memory barrier */
2005-04-17 02:20:36 +04:00
}
2007-07-14 16:24:05 +04:00
smp_llsc_mb ( ) ;
2006-10-31 06:45:07 +03:00
2005-04-17 02:20:36 +04:00
return retval ;
}
# else
extern __u64 __xchg_u64_unsupported_on_32bit_kernels ( volatile __u64 * m , __u64 val ) ;
# define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
# endif
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid xchg ( ) . */
extern void __xchg_called_with_bad_pointer ( void ) ;
static inline unsigned long __xchg ( unsigned long x , volatile void * ptr , int size )
{
switch ( size ) {
2006-03-03 12:42:05 +03:00
case 4 :
return __xchg_u32 ( ptr , x ) ;
case 8 :
return __xchg_u64 ( ptr , x ) ;
2005-04-17 02:20:36 +04:00
}
__xchg_called_with_bad_pointer ( ) ;
return x ;
}
2007-10-12 02:46:15 +04:00
# define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
2005-04-17 02:20:36 +04:00
2007-10-12 02:46:15 +04:00
extern void set_handler ( unsigned long offset , void * addr , unsigned long len ) ;
extern void set_uncached_handler ( unsigned long offset , void * addr , unsigned long len ) ;
2007-05-06 21:31:18 +04:00
typedef void ( * vi_handler_t ) ( void ) ;
2007-10-12 02:46:15 +04:00
extern void * set_vi_handler ( int n , vi_handler_t addr ) ;
2007-05-06 21:31:18 +04:00
2005-04-17 02:20:36 +04:00
extern void * set_except_vector ( int n , void * addr ) ;
2006-03-29 21:53:00 +04:00
extern unsigned long ebase ;
2005-04-17 02:20:36 +04:00
extern void per_cpu_trap_init ( void ) ;
/*
2005-06-26 01:57:23 +04:00
* See include / asm - ia64 / system . h ; prevents deadlock on SMP
2005-04-17 02:20:36 +04:00
* systems .
*/
2005-06-26 01:57:23 +04:00
# define __ARCH_WANT_UNLOCKED_CTXSW
2005-04-17 02:20:36 +04:00
2007-07-19 16:04:21 +04:00
extern unsigned long arch_align_stack ( unsigned long sp ) ;
2005-04-17 02:20:36 +04:00
# endif /* _ASM_SYSTEM_H */