2007-11-10 13:46:31 +03:00
# ifndef __ASM_SH_SYSTEM_32_H
# define __ASM_SH_SYSTEM_32_H
# include <linux/types.h>
struct task_struct * __switch_to ( struct task_struct * prev ,
struct task_struct * next ) ;
/*
* switch_to ( ) should switch tasks to task nr n , first
*/
2007-12-17 04:52:37 +03:00
# define switch_to(prev, next, last) \
do { \
register u32 * __ts1 __asm__ ( " r1 " ) = ( u32 * ) & prev - > thread . sp ; \
register u32 * __ts2 __asm__ ( " r2 " ) = ( u32 * ) & prev - > thread . pc ; \
register u32 * __ts4 __asm__ ( " r4 " ) = ( u32 * ) prev ; \
register u32 * __ts5 __asm__ ( " r5 " ) = ( u32 * ) next ; \
register u32 * __ts6 __asm__ ( " r6 " ) = ( u32 * ) & next - > thread . sp ; \
register u32 __ts7 __asm__ ( " r7 " ) = next - > thread . pc ; \
struct task_struct * __last ; \
\
__asm__ __volatile__ ( \
" .balign 4 \n \t " \
" stc.l gbr, @-r15 \n \t " \
" sts.l pr, @-r15 \n \t " \
" mov.l r8, @-r15 \n \t " \
" mov.l r9, @-r15 \n \t " \
" mov.l r10, @-r15 \n \t " \
" mov.l r11, @-r15 \n \t " \
" mov.l r12, @-r15 \n \t " \
" mov.l r13, @-r15 \n \t " \
" mov.l r14, @-r15 \n \t " \
" mov.l r15, @r1 \t ! save SP \n \t " \
" mov.l @r6, r15 \t ! change to new stack \n \t " \
" mova 1f, %0 \n \t " \
" mov.l %0, @r2 \t ! save PC \n \t " \
" mov.l 2f, %0 \n \t " \
" jmp @%0 \t ! call __switch_to \n \t " \
" lds r7, pr \t ! with return to new PC \n \t " \
" .balign 4 \n " \
" 2: \n \t " \
" .long __switch_to \n " \
" 1: \n \t " \
" mov.l @r15+, r14 \n \t " \
" mov.l @r15+, r13 \n \t " \
" mov.l @r15+, r12 \n \t " \
" mov.l @r15+, r11 \n \t " \
" mov.l @r15+, r10 \n \t " \
" mov.l @r15+, r9 \n \t " \
" mov.l @r15+, r8 \n \t " \
" lds.l @r15+, pr \n \t " \
" ldc.l @r15+, gbr \n \t " \
: " =z " ( __last ) \
: " r " ( __ts1 ) , " r " ( __ts2 ) , " r " ( __ts4 ) , \
" r " ( __ts5 ) , " r " ( __ts6 ) , " r " ( __ts7 ) \
: " r3 " , " t " ) ; \
\
last = __last ; \
2007-11-10 13:46:31 +03:00
} while ( 0 )
2007-11-30 11:06:36 +03:00
# define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text")))
2007-11-10 13:46:31 +03:00
/*
2007-11-30 11:06:36 +03:00
* Jump to uncached area .
* When handling TLB or caches , we need to do it from an uncached area .
2007-11-10 13:46:31 +03:00
*/
2007-11-30 11:06:36 +03:00
# define jump_to_uncached() \
do { \
unsigned long __dummy ; \
\
__asm__ __volatile__ ( \
" mova 1f, %0 \n \t " \
" add %1, %0 \n \t " \
" jmp @%0 \n \t " \
" nop \n \t " \
" .balign 4 \n " \
" 1: " \
: " =&z " ( __dummy ) \
: " r " ( cached_to_uncached ) ) ; \
2007-11-10 13:46:31 +03:00
} while ( 0 )
/*
2007-11-30 11:06:36 +03:00
* Back to cached area .
2007-11-10 13:46:31 +03:00
*/
2007-11-30 11:06:36 +03:00
# define back_to_cached() \
2007-11-10 13:46:31 +03:00
do { \
unsigned long __dummy ; \
ctrl_barrier ( ) ; \
__asm__ __volatile__ ( \
" mov.l 1f, %0 \n \t " \
" jmp @%0 \n \t " \
" nop \n \t " \
" .balign 4 \n " \
" 1: .long 2f \n " \
" 2: " \
: " =&r " ( __dummy ) ) ; \
} while ( 0 )
2008-02-07 14:18:21 +03:00
int handle_unaligned_access ( opcode_t instruction , struct pt_regs * regs ,
struct mem_access * ma ) ;
2007-11-10 13:46:31 +03:00
# endif /* __ASM_SH_SYSTEM_32_H */