2011-01-15 13:15:45 +03:00
/ *
* linux/ a r c h / u n i c o r e 3 2 / k e r n e l / e n t r y . S
*
* Code s p e c i f i c t o P K U n i t y S o C a n d U n i C o r e I S A
*
* Copyright ( C ) 2 0 0 1 - 2 0 1 0 G U A N X u e - t a o
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e v e r s i o n 2 a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* Low- l e v e l v e c t o r i n t e r f a c e r o u t i n e s
* /
# include < l i n u x / i n i t . h >
# include < l i n u x / l i n k a g e . h >
# include < a s m / a s s e m b l e r . h >
# include < a s m / e r r n o . h >
# include < a s m / t h r e a d _ i n f o . h >
# include < a s m / m e m o r y . h >
# include < a s m / u n i s t d . h >
# include < g e n e r a t e d / a s m - o f f s e t s . h >
# include " d e b u g - m a c r o . S "
@
@ Most of the stack format comes from struct pt_regs, but with
@ the addition of 8 bytes for storing syscall args 5 and 6.
@
# define S _ O F F 8
/ *
* The S W I c o d e r e l i e s o n t h e f a c t t h a t R 0 i s a t t h e b o t t o m o f t h e s t a c k
* ( due t o s l o w / f a s t r e s t o r e u s e r r e g s ) .
* /
# if S _ R 0 ! = 0
# error " P l e a s e f i x "
# endif
.macro zero_fp
# ifdef C O N F I G _ F R A M E _ P O I N T E R
mov f p , #0
# endif
.endm
.macro alignment_ t r a p , r t e m p
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
ldw \ r t e m p , . L C c r a l i g n
ldw \ r t e m p , [ \ r t e m p ]
movc p0 . c1 , \ r t e m p , #0
# endif
.endm
.macro load_ u s e r _ s p _ l r , r d , r t e m p , o f f s e t = 0
mov \ r t e m p , a s r
xor \ r t e m p , \ r t e m p , #( P R I V _ M O D E ^ S U S R _ M O D E )
mov. a a s r , \ r t e m p @ switch to the SUSR mode
ldw s p , [ \ r d + ] , #\ o f f s e t @ l o a d s p _ u s e r
ldw l r , [ \ r d + ] , #\ o f f s e t + 4 @ load lr_user
xor \ r t e m p , \ r t e m p , #( P R I V _ M O D E ^ S U S R _ M O D E )
mov. a a s r , \ r t e m p @ switch back to the PRIV mode
.endm
.macro priv_ e x i t , r p s r
mov. a b s r , \ r p s r
ldm. w ( r0 - r15 ) , [ s p ] +
ldm. b ( r16 - p c ) , [ s p ] + @ load r0 - pc, asr
.endm
.macro restore_ u s e r _ r e g s , f a s t = 0 , o f f s e t = 0
ldw r1 , [ s p + ] , #\ o f f s e t + S _ P S R @ g e t c a l l i n g a s r
ldw l r , [ s p + ] , #\ o f f s e t + S _ P C @ g e t p c
mov. a b s r , r1 @ save in bsr_priv
.if \ fast
add s p , s p , #\ o f f s e t + S _ R 1 @ r0 is syscall return value
ldm. w ( r1 - r15 ) , [ s p ] + @ get calling r1 - r15
ldur ( r16 - l r ) , [ s p ] + @ get calling r16 - lr
.else
ldm. w ( r0 - r15 ) , [ s p ] + @ get calling r0 - r15
ldur ( r16 - l r ) , [ s p ] + @ get calling r16 - lr
.endif
nop
add s p , s p , #S _ F R A M E _ S I Z E - S _ R 16
mov. a p c , l r @ return
@ and move bsr_priv into asr
.endm
.macro get_ t h r e a d _ i n f o , r d
mov \ r d , s p > > #13
mov \ r d , \ r d < < #13
.endm
.macro get_ i r q n r _ a n d _ b a s e , i r q n r , i r q s t a t , b a s e , t m p
2011-03-04 13:07:48 +03:00
ldw \ b a s e , = ( P K U N I T Y _ I N T C _ B A S E )
2011-01-15 13:15:45 +03:00
ldw \ i r q s t a t , [ \ b a s e + ] , #0xC @ INTC_ICIP
ldw \ t m p , [ \ b a s e + ] , #0x4 @ INTC_ICMR
and. a \ i r q s t a t , \ i r q s t a t , \ t m p
beq 1 0 0 1 f
cntlz \ i r q n r , \ i r q s t a t
rsub \ i r q n r , \ i r q n r , #31
1001 : /* EQ will be set if no irqs pending */
.endm
# ifdef C O N F I G _ D E B U G _ L L
.macro printreg, r e g , t e m p
adr \ t e m p , 9 0 1 f
stm ( r0 - r3 ) , [ \ t e m p ] +
stw l r , [ \ t e m p + ] , #0x10
mov r0 , \ r e g
b. l p r i n t h e x8
mov r0 , #' : '
b. l p r i n t c h
mov r0 , p c
b. l p r i n t h e x8
adr r0 , 9 0 2 f
b. l p r i n t a s c i i
adr \ t e m p , 9 0 1 f
ldm ( r0 - r3 ) , [ \ t e m p ] +
ldw l r , [ \ t e m p + ] , #0x10
b 9 0 3 f
901 : .word 0 , 0 , 0 , 0 , 0 @ r0-r3, lr
902 : .asciz " : epip4d\n "
.align
903 :
.endm
# endif
/ *
* These a r e t h e r e g i s t e r s u s e d i n t h e s y s c a l l h a n d l e r , a n d a l l o w u s t o
* have i n t h e o r y u p t o 7 a r g u m e n t s t o a f u n c t i o n - r0 t o r6 .
*
* Note t h a t t b l = = w h y i s i n t e n t i o n a l .
*
* We m u s t s e t a t l e a s t " t s k " a n d " w h y " w h e n c a l l i n g r e t _ w i t h _ r e s c h e d u l e .
* /
scno . r e q r21 @ syscall number
tbl . r e q r22 @ syscall table pointer
why . r e q r22 @ Linux syscall (!= 0)
tsk . r e q r23 @ current thread_info
/ *
* Interrupt h a n d l i n g . P r e s e r v e s r17 , r18 , r19
* /
.macro intr_handler
1 : get_ i r q n r _ a n d _ b a s e r0 , r6 , r5 , l r
beq 2 f
mov r1 , s p
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adr l r , 1 b
b a s m _ d o _ I R Q
2 :
.endm
/ *
* PRIV m o d e h a n d l e r s
* /
.macro priv_entry
sub s p , s p , #( S _ F R A M E _ S I Z E - 4 )
stm ( r1 - r15 ) , [ s p ] +
add r5 , s p , #S _ R 15
stm ( r16 - r28 ) , [ r5 ] +
ldm ( r1 - r3 ) , [ r0 ] +
add r5 , s p , #S _ S P - 4 @ here for interlock avoidance
mov r4 , #- 1 @ "" "" "" ""
add r0 , s p , #( S _ F R A M E _ S I Z E - 4 )
stw. w r1 , [ s p + ] , #- 4 @ save the "real" r0 copied
@ from the exception stack
mov r1 , l r
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@ r0 - sp_priv
@ r1 - lr_priv
@ r2 - lr_<exception>, already fixed up for correct return/restart
@ r3 - bsr_<exception>
@ r4 - orig_r0 (see pt_regs definition in ptrace.h)
@
stm ( r0 - r4 ) , [ r5 ] +
.endm
/ *
* User m o d e h a n d l e r s
*
* /
.macro user_entry
sub s p , s p , #S _ F R A M E _ S I Z E
stm ( r1 - r15 ) , [ s p + ]
add r4 , s p , #S _ R 16
stm ( r16 - r28 ) , [ r4 ] +
ldm ( r1 - r3 ) , [ r0 ] +
add r0 , s p , #S _ P C @ h e r e f o r i n t e r l o c k a v o i d a n c e
mov r4 , #- 1 @ "" "" "" ""
stw r1 , [ s p ] @ save the "real" r0 copied
@ from the exception stack
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@ r2 - lr_<exception>, already fixed up for correct return/restart
@ r3 - bsr_<exception>
@ r4 - orig_r0 (see pt_regs definition in ptrace.h)
@
@ Also, separately save sp_user and lr_user
@
stm ( r2 - r4 ) , [ r0 ] +
stur ( s p , l r ) , [ r0 - ]
@
@ Enable the alignment trap while in kernel mode
@
alignment_ t r a p r0
@
@ Clear FP to mark the first stack frame
@
zero_ f p
.endm
.text
@
@ __invalid - generic code for failed exception
@ (re-entrant version of handlers)
@
__invalid :
sub s p , s p , #S _ F R A M E _ S I Z E
stm ( r1 - r15 ) , [ s p + ]
add r1 , s p , #S _ R 16
stm ( r16 - r28 , s p , l r ) , [ r1 ] +
zero_ f p
ldm ( r4 - r6 ) , [ r0 ] +
add r0 , s p , #S _ P C @ h e r e f o r i n t e r l o c k a v o i d a n c e
mov r7 , #- 1 @ "" "" "" ""
stw r4 , [ s p ] @ save preserved r0
stm ( r5 - r7 ) , [ r0 ] + @ lr_<exception>,
@ asr_<exception>, "old_r0"
mov r0 , s p
mov r1 , a s r
b b a d _ m o d e
ENDPROC( _ _ i n v a l i d )
.align 5
__dabt_priv :
priv_ e n t r y
@
@ get ready to re-enable interrupts if appropriate
@
mov r17 , a s r
cand. a r3 , #P S R _ I _ B I T
bne 1 f
andn r17 , r17 , #P S R _ I _ B I T
1 :
@
@ Call the processor-specific abort handler:
@
@ r2 - aborted context pc
@ r3 - aborted context asr
@
@ The abort handler must return the aborted address in r0, and
@ the fault status register in r1.
@
movc r1 , p0 . c3 , #0 @ get FSR
movc r0 , p0 . c4 , #0 @ get FAR
@
@ set desired INTR state, then call main handler
@
mov. a a s r , r17
mov r2 , s p
b. l d o _ D a t a A b o r t
@
@ INTRs off again before pulling preserved data off the stack
@
disable_ i r q r0
@
@ restore BSR and restart the instruction
@
ldw r2 , [ s p + ] , #S _ P S R
priv_ e x i t r2 @ return from exception
ENDPROC( _ _ d a b t _ p r i v )
.align 5
__intr_priv :
priv_ e n t r y
intr_ h a n d l e r
mov r0 , #0 @ epip4d
movc p0 . c5 , r0 , #14
nop; nop; nop; nop; nop; nop; nop; nop
ldw r4 , [ s p + ] , #S _ P S R @ i r q s a r e a l r e a d y d i s a b l e d
priv_ e x i t r4 @ return from exception
ENDPROC( _ _ i n t r _ p r i v )
.ltorg
.align 5
__extn_priv :
priv_ e n t r y
mov r0 , s p @ struct pt_regs *regs
mov r1 , a s r
b b a d _ m o d e @ not supported
ENDPROC( _ _ e x t n _ p r i v )
.align 5
__pabt_priv :
priv_ e n t r y
@
@ re-enable interrupts if appropriate
@
mov r17 , a s r
cand. a r3 , #P S R _ I _ B I T
bne 1 f
andn r17 , r17 , #P S R _ I _ B I T
1 :
@
@ set args, then call main handler
@
@ r0 - address of faulting instruction
@ r1 - pointer to registers on stack
@
mov r0 , r2 @ pass address of aborted instruction
mov r1 , #5
mov. a a s r , r17
mov r2 , s p @ regs
b. l d o _ P r e f e t c h A b o r t @ call abort handler
@
@ INTRs off again before pulling preserved data off the stack
@
disable_ i r q r0
@
@ restore BSR and restart the instruction
@
ldw r2 , [ s p + ] , #S _ P S R
priv_ e x i t r2 @ return from exception
ENDPROC( _ _ p a b t _ p r i v )
.align 5
.LCcralign :
.word cr_alignment
.align 5
__dabt_user :
user_ e n t r y
# ifdef C O N F I G _ U N I C O R E _ F P U _ F 6 4
cff i p , s31
cand. a i p , #0x08000000 @ FPU execption traps?
beq 2 0 9 f
ldw i p , [ s p + ] , #S _ P C
add i p , i p , #4
stw i p , [ s p + ] , #S _ P C
@
@ fall through to the emulation code, which returns using r19 if
@ it has emulated the instruction, or the more conventional lr
@ if we are to treat this as a real extended instruction
@
@ r0 - instruction
@
1 : ldw. u r0 , [ r2 ]
adr r19 , r e t _ f r o m _ e x c e p t i o n
adr l r , 2 0 9 f
@
@ fallthrough to call do_uc_f64
@
/ *
* Check w h e t h e r t h e i n s t r u c t i o n i s a c o - p r o c e s s o r i n s t r u c t i o n .
* If y e s , w e n e e d t o c a l l t h e r e l e v a n t c o - p r o c e s s o r h a n d l e r .
*
* Note t h a t w e d o n ' t d o a f u l l c h e c k h e r e f o r t h e c o - p r o c e s s o r
* instructions; all instructions with bit 27 set are well
* defined. T h e o n l y i n s t r u c t i o n s t h a t s h o u l d f a u l t a r e t h e
* co- p r o c e s s o r i n s t r u c t i o n s .
*
* Emulators m a y w i s h t o m a k e u s e o f t h e f o l l o w i n g r e g i s t e r s :
* r0 = i n s t r u c t i o n o p c o d e .
* r2 = P C
* r1 9 = n o r m a l " s u c c e s s f u l " r e t u r n a d d r e s s
* r2 0 = t h i s t h r e a d s t h r e a d _ i n f o s t r u c t u r e .
* lr = u n r e c o g n i s e d i n s t r u c t i o n r e t u r n a d d r e s s
* /
get_ t h r e a d _ i n f o r20 @ get current thread
and r8 , r0 , #0x00003c00 @ mask out CP number
mov r7 , #1
stb r7 , [ r20 + ] , #T I _ U S E D _ C P + 2 @ set appropriate used_cp[]
@ F64 hardware support entry point.
@ r0 = faulted instruction
@ r19 = return address
@ r20 = fp_state
enable_ i r q r4
add r20 , r20 , #T I _ F P S T A T E @ r 20 = w o r k s p a c e
cff r1 , s31 @ get fpu FPSCR
andn r2 , r1 , #0x08000000
ctf r2 , s31 @ clear 27 bit
mov r2 , s p @ nothing stacked - regdump is at TOS
mov l r , r19 @ setup for a return to the user code
@ Now call the C code to package up the bounce to the support code
@ r0 holds the trigger instruction
@ r1 holds the FPSCR value
@ r2 pointer to register dump
b u c f64 _ e x c h a n d l e r
209 :
# endif
@
@ Call the processor-specific abort handler:
@
@ r2 - aborted context pc
@ r3 - aborted context asr
@
@ The abort handler must return the aborted address in r0, and
@ the fault status register in r1.
@
movc r1 , p0 . c3 , #0 @ get FSR
movc r0 , p0 . c4 , #0 @ get FAR
@
@ INTRs on, then call the main handler
@
enable_ i r q r2
mov r2 , s p
adr l r , r e t _ f r o m _ e x c e p t i o n
b d o _ D a t a A b o r t
ENDPROC( _ _ d a b t _ u s e r )
.align 5
__intr_user :
user_ e n t r y
get_ t h r e a d _ i n f o t s k
intr_ h a n d l e r
mov w h y , #0
b r e t _ t o _ u s e r
ENDPROC( _ _ i n t r _ u s e r )
.ltorg
.align 5
__extn_user :
user_ e n t r y
mov r0 , s p
mov r1 , a s r
b b a d _ m o d e
ENDPROC( _ _ e x t n _ u s e r )
.align 5
__pabt_user :
user_ e n t r y
mov r0 , r2 @ pass address of aborted instruction.
mov r1 , #5
enable_ i r q r1 @ Enable interrupts
mov r2 , s p @ regs
b. l d o _ P r e f e t c h A b o r t @ call abort handler
/* fall through */
/ *
* This i s t h e r e t u r n c o d e t o u s e r m o d e f o r a b o r t h a n d l e r s
* /
ENTRY( r e t _ f r o m _ e x c e p t i o n )
get_ t h r e a d _ i n f o t s k
mov w h y , #0
b r e t _ t o _ u s e r
ENDPROC( _ _ p a b t _ u s e r )
ENDPROC( r e t _ f r o m _ e x c e p t i o n )
/ *
* Register s w i t c h f o r U n i C o r e V 2 p r o c e s s o r s
* r0 = p r e v i o u s t a s k _ s t r u c t , r1 = p r e v i o u s t h r e a d _ i n f o , r2 = n e x t t h r e a d _ i n f o
* previous a n d n e x t a r e g u a r a n t e e d n o t t o b e t h e s a m e .
* /
ENTRY( _ _ s w i t c h _ t o )
add i p , r1 , #T I _ C P U _ S A V E
stm. w ( r4 - r15 ) , [ i p ] +
stm. w ( r16 - r27 , s p , l r ) , [ i p ] +
# ifdef C O N F I G _ U N I C O R E _ F P U _ F 6 4
add i p , r1 , #T I _ F P S T A T E
sfm. w ( f0 - f7 ) , [ i p ] +
sfm. w ( f8 - f15 ) , [ i p ] +
sfm. w ( f16 - f23 ) , [ i p ] +
sfm. w ( f24 - f31 ) , [ i p ] +
cff r4 , s31
stw r4 , [ i p ]
add i p , r2 , #T I _ F P S T A T E
lfm. w ( f0 - f7 ) , [ i p ] +
lfm. w ( f8 - f15 ) , [ i p ] +
lfm. w ( f16 - f23 ) , [ i p ] +
lfm. w ( f24 - f31 ) , [ i p ] +
ldw r4 , [ i p ]
ctf r4 , s31
# endif
add i p , r2 , #T I _ C P U _ S A V E
ldm. w ( r4 - r15 ) , [ i p ] +
ldm ( r16 - r27 , s p , p c ) , [ i p ] + @ Load all regs saved previously
ENDPROC( _ _ s w i t c h _ t o )
.align 5
/ *
* This i s t h e f a s t s y s c a l l r e t u r n p a t h . W e d o a s l i t t l e a s
* possible h e r e , a n d t h i s i n c l u d e s s a v i n g r0 b a c k i n t o t h e P R I V
* stack.
* /
ret_fast_syscall :
disable_ i r q r1 @ disable interrupts
ldw r1 , [ t s k + ] , #T I _ F L A G S
cand. a r1 , #_ T I F _ W O R K _ M A S K
bne f a s t _ w o r k _ p e n d i n g
@ fast_restore_user_regs
restore_ u s e r _ r e g s f a s t = 1 , o f f s e t = S _ O F F
/ *
* Ok, w e n e e d t o d o e x t r a p r o c e s s i n g , e n t e r t h e s l o w p a t h .
* /
fast_work_pending :
stw. w r0 , [ s p + ] , #S _ R 0 + S _ O F F @ returned r0
work_pending :
cand. a r1 , #_ T I F _ N E E D _ R E S C H E D
bne w o r k _ r e s c h e d
mov r0 , s p @ 'regs'
mov r2 , w h y @ 'syscall'
cand. a r1 , #_ T I F _ S I G P E N D I N G @ d e l i v e r i n g a s i g n a l ?
cmovne w h y , #0 @ prevent further restarts
b. l d o _ n o t i f y _ r e s u m e
b r e t _ s l o w _ s y s c a l l @ Check work again
work_resched :
b. l s c h e d u l e
/ *
* " slow" s y s c a l l r e t u r n p a t h . " w h y " t e l l s u s i f t h i s w a s a r e a l s y s c a l l .
* /
ENTRY( r e t _ t o _ u s e r )
ret_slow_syscall :
disable_ i r q r1 @ disable interrupts
get_ t h r e a d _ i n f o t s k @ epip4d, one path error?!
ldw r1 , [ t s k + ] , #T I _ F L A G S
cand. a r1 , #_ T I F _ W O R K _ M A S K
bne w o r k _ p e n d i n g
no_work_pending :
@ slow_restore_user_regs
restore_ u s e r _ r e g s f a s t = 0 , o f f s e t = 0
ENDPROC( r e t _ t o _ u s e r )
/ *
* This i s h o w w e r e t u r n f r o m a f o r k .
* /
ENTRY( r e t _ f r o m _ f o r k )
b. l s c h e d u l e _ t a i l
b r e t _ s l o w _ s y s c a l l
ENDPROC( r e t _ f r o m _ f o r k )
2012-10-14 01:35:21 +04:00
ENTRY( r e t _ f r o m _ k e r n e l _ t h r e a d )
b. l s c h e d u l e _ t a i l
mov r0 , r5
adr l r , r e t _ s l o w _ s y s c a l l
mov p c , r4
ENDPROC( r e t _ f r o m _ k e r n e l _ t h r e a d )
2011-01-15 13:15:45 +03:00
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* SWI h a n d l e r
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* /
.align 5
ENTRY( v e c t o r _ s w i )
sub s p , s p , #S _ F R A M E _ S I Z E
stm ( r0 - r15 ) , [ s p ] + @ Calling r0 - r15
add r8 , s p , #S _ R 16
stm ( r16 - r28 ) , [ r8 ] + @ Calling r16 - r28
add r8 , s p , #S _ P C
stur ( s p , l r ) , [ r8 - ] @ Calling sp, lr
mov r8 , b s r @ called from non-REAL mode
stw l r , [ s p + ] , #S _ P C @ S a v e c a l l i n g P C
stw r8 , [ s p + ] , #S _ P S R @ S a v e A S R
stw r0 , [ s p + ] , #S _ O L D _ R 0 @ Save OLD_R0
zero_ f p
/ *
* Get t h e s y s t e m c a l l n u m b e r .
* /
sub i p , l r , #4
ldw. u s c n o , [ i p ] @ get SWI instruction
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
ldw i p , _ _ c r _ a l i g n m e n t
ldw i p , [ i p ]
movc p0 . c1 , i p , #0 @ update control register
# endif
enable_ i r q i p
get_ t h r e a d _ i n f o t s k
ldw t b l , =sys_call_table @ load syscall table pointer
andn s c n o , s c n o , #0xff000000 @ mask off SWI op-code
andn s c n o , s c n o , #0x00ff0000 @ mask off SWI op-code
stm. w ( r4 , r5 ) , [ s p - ] @ push fifth and sixth args
ldw i p , [ t s k + ] , #T I _ F L A G S @ c h e c k f o r s y s c a l l t r a c i n g
cand. a i p , #_ T I F _ S Y S C A L L _ T R A C E @ a r e w e t r a c i n g s y s c a l l s ?
bne _ _ s y s _ t r a c e
csub. a s c n o , #_ _ N R _ s y s c a l l s @ c h e c k u p p e r s y s c a l l l i m i t
adr l r , r e t _ f a s t _ s y s c a l l @ return address
bea 1 f
ldw p c , [ t b l + ] , s c n o < < #2 @ call sys_* routine
1 :
add r1 , s p , #S _ O F F
2 : mov w h y , #0 @ no longer a real syscall
b s y s _ n i _ s y s c a l l @ not private func
/ *
* This i s t h e r e a l l y s l o w p a t h . W e ' r e g o i n g t o b e d o i n g
* context s w i t c h e s , a n d w a i t i n g f o r o u r p a r e n t t o r e s p o n d .
* /
__sys_trace :
mov r2 , s c n o
add r1 , s p , #S _ O F F
mov r0 , #0 @ trace entry [IP = 0]
b. l s y s c a l l _ t r a c e
adr l r , _ _ s y s _ t r a c e _ r e t u r n @ return address
mov s c n o , r0 @ syscall number (possibly new)
add r1 , s p , #S _ R 0 + S _ O F F @ pointer to regs
csub. a s c n o , #_ _ N R _ s y s c a l l s @ c h e c k u p p e r s y s c a l l l i m i t
bea 2 b
ldm ( r0 - r3 ) , [ r1 ] + @ have to reload r0 - r3
ldw p c , [ t b l + ] , s c n o < < #2 @ call sys_* routine
__sys_trace_return :
stw. w r0 , [ s p + ] , #S _ R 0 + S _ O F F @ save returned r0
mov r2 , s c n o
mov r1 , s p
mov r0 , #1 @ trace exit [IP = 1]
b. l s y s c a l l _ t r a c e
b r e t _ s l o w _ s y s c a l l
.align 5
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
.type _ _ cr_ a l i g n m e n t , #o b j e c t
__cr_alignment :
.word cr_alignment
# endif
.ltorg
ENTRY( s y s _ r t _ s i g r e t u r n )
add r0 , s p , #S _ O F F
mov w h y , #0 @ prevent syscall restart handling
b _ _ s y s _ r t _ s i g r e t u r n
ENDPROC( s y s _ r t _ s i g r e t u r n )
_ _ INIT
/ *
* Vector s t u b s .
*
* This c o d e i s c o p i e d t o 0 x f f f f02 0 0 s o w e c a n u s e b r a n c h e s i n t h e
* vectors, r a t h e r t h a n l d r ' s . N o t e t h a t t h i s c o d e m u s t n o t
* exceed 0 x30 0 b y t e s .
*
* Common s t u b e n t r y m a c r o :
* Enter i n I N T R m o d e , b s r = P R I V / U S E R A S R , l r = P R I V / U S E R P C
*
* SP p o i n t s t o a m i n i m a l a m o u n t o f p r o c e s s o r - p r i v a t e m e m o r y , t h e a d d r e s s
* of w h i c h i s c o p i e d i n t o r0 f o r t h e m o d e s p e c i f i c a b o r t h a n d l e r .
* /
.macro vector_ s t u b , n a m e , m o d e
.align 5
vector_ \ n a m e :
@
@ Save r0, lr_<exception> (parent PC) and bsr_<exception>
@ (parent ASR)
@
stw r0 , [ s p ]
stw l r , [ s p + ] , #4 @ save r0, lr
mov l r , b s r
stw l r , [ s p + ] , #8 @ save bsr
@
@ Prepare for PRIV mode. INTRs remain disabled.
@
mov r0 , a s r
xor r0 , r0 , #( \ m o d e ^ P R I V _ M O D E )
mov. a b s r , r0
@
@ the branch table must immediately follow this code
@
and l r , l r , #0x03
add l r , l r , #1
mov r0 , s p
ldw l r , [ p c + ] , l r < < #2
mov. a p c , l r @ branch to handler in PRIV mode
ENDPROC( v e c t o r _ \ n a m e )
.align 2
@ handler addresses follow this label
.endm
.globl __stubs_start
__stubs_start :
/ *
* Interrupt d i s p a t c h e r
* /
vector_ s t u b i n t r , I N T R _ M O D E
.long __intr_user @ 0 (USER)
.long __invalid @ 1
.long __invalid @ 2
.long __intr_priv @ 3 (PRIV)
/ *
* Data a b o r t d i s p a t c h e r
* Enter i n A B T m o d e , b s r = U S E R A S R , l r = U S E R P C
* /
vector_ s t u b d a b t , A B R T _ M O D E
.long __dabt_user @ 0 (USER)
.long __invalid @ 1
.long __invalid @ 2 (INTR)
.long __dabt_priv @ 3 (PRIV)
/ *
* Prefetch a b o r t d i s p a t c h e r
* Enter i n A B T m o d e , b s r = U S E R A S R , l r = U S E R P C
* /
vector_ s t u b p a b t , A B R T _ M O D E
.long __pabt_user @ 0 (USER)
.long __invalid @ 1
.long __invalid @ 2 (INTR)
.long __pabt_priv @ 3 (PRIV)
/ *
* Undef i n s t r e n t r y d i s p a t c h e r
* Enter i n E X T N m o d e , b s r = P R I V / U S E R A S R , l r = P R I V / U S E R P C
* /
vector_ s t u b e x t n , E X T N _ M O D E
.long __extn_user @ 0 (USER)
.long __invalid @ 1
.long __invalid @ 2 (INTR)
.long __extn_priv @ 3 (PRIV)
/ *
* We g r o u p a l l t h e f o l l o w i n g d a t a t o g e t h e r t o o p t i m i s e
* for C P U s w i t h s e p a r a t e I & D c a c h e s .
* /
.align 5
.LCvswi :
.word vector_swi
.globl __stubs_end
__stubs_end :
.equ stubs_ o f f s e t , _ _ v e c t o r s _ s t a r t + 0 x20 0 - _ _ s t u b s _ s t a r t
.globl __vectors_start
__vectors_start :
jepriv S Y S _ E R R O R 0
b v e c t o r _ e x t n + s t u b s _ o f f s e t
ldw p c , . L C v s w i + s t u b s _ o f f s e t
b v e c t o r _ p a b t + s t u b s _ o f f s e t
b v e c t o r _ d a b t + s t u b s _ o f f s e t
jepriv S Y S _ E R R O R 0
b v e c t o r _ i n t r + s t u b s _ o f f s e t
jepriv S Y S _ E R R O R 0
.globl __vectors_end
__vectors_end :
.data
.globl cr_alignment
.globl cr_no_alignment
cr_alignment :
.space 4
cr_no_alignment :
.space 4