2005-04-26 15:21:02 +01:00
# include < l i n u x / i n i t . h >
2005-04-16 15:20:36 -07:00
# include < l i n u x / l i n k a g e . h >
# include < a s m / a s s e m b l e r . h >
2005-09-09 21:08:59 +02:00
# include < a s m / a s m - o f f s e t s . h >
2005-04-16 15:20:36 -07:00
# include < a s m / e r r n o . h >
2005-04-26 15:21:02 +01:00
# include < a s m / t h r e a d _ i n f o . h >
2010-05-21 18:06:42 +01:00
# include < a s m / v7 m . h >
2005-04-16 15:20:36 -07:00
@ Bad Abort numbers
@ -----------------
@
# define B A D _ P R E F E T C H 0
# define B A D _ D A T A 1
# define B A D _ A D D R E X C P T N 2
# define B A D _ I R Q 3
# define B A D _ U N D E F I N S T R 4
@
2005-04-26 15:18:59 +01:00
@ Most of the stack format comes from struct pt_regs, but with
@ the addition of 8 bytes for storing syscall args 5 and 6.
2006-01-14 16:18:08 +00:00
@ This _must_ remain a multiple of 8 for EABI.
2005-04-16 15:20:36 -07:00
@
# define S _ O F F 8
2005-04-26 15:18:59 +01:00
/ *
* The S W I c o d e r e l i e s o n t h e f a c t t h a t R 0 i s a t t h e b o t t o m o f t h e s t a c k
* ( due t o s l o w / f a s t r e s t o r e u s e r r e g s ) .
* /
# if S _ R 0 ! = 0
# error " P l e a s e f i x "
# endif
2005-04-26 15:21:02 +01:00
.macro zero_fp
# ifdef C O N F I G _ F R A M E _ P O I N T E R
mov f p , #0
# endif
.endm
2005-04-16 15:20:36 -07:00
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
2014-08-28 13:08:14 +01:00
# define A T R A P ( x . . . ) x
# else
# define A T R A P ( x . . . )
# endif
.macro alignment_ t r a p , r t m p1 , r t m p2 , l a b e l
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
mrc p15 , 0 , \ r t m p2 , c1 , c0 , 0
ldr \ r t m p1 , \ l a b e l
ldr \ r t m p1 , [ \ r t m p1 ]
teq \ r t m p1 , \ r t m p2
mcrne p15 , 0 , \ r t m p1 , c1 , c0 , 0
2005-04-16 15:20:36 -07:00
# endif
.endm
2010-05-21 18:06:42 +01:00
# ifdef C O N F I G _ C P U _ V 7 M
/ *
* ARMv7 - M e x c e p t i o n e n t r y / e x i t m a c r o s .
*
* xPSR, R e t u r n A d d r e s s ( ) , L R ( R 1 4 ) , R 1 2 , R 3 , R 2 , R 1 , a n d R 0 a r e
* automatically s a v e d o n t h e c u r r e n t s t a c k ( 3 2 w o r d s ) b e f o r e
* switching t o t h e e x c e p t i o n s t a c k ( S P _ m a i n ) .
*
* If e x c e p t i o n i s t a k e n w h i l e i n u s e r m o d e , S P _ m a i n i s
* empty. O t h e r w i s e , S P _ m a i n i s a l i g n e d t o 6 4 b i t a u t o m a t i c a l l y
* ( CCR. S T K A L I G N s e t ) .
*
* Linux a s s u m e s t h a t t h e i n t e r r u p t s a r e d i s a b l e d w h e n e n t e r i n g a n
* exception h a n d l e r a n d i t m a y B U G i f t h i s i s n o t t h e c a s e . I n t e r r u p t s
* are d i s a b l e d d u r i n g e n t r y a n d r e e n a b l e d i n t h e e x i t m a c r o .
*
* v7 m _ e x c e p t i o n _ s l o w _ e x i t i s u s e d w h e n r e t u r n i n g f r o m S V C o r P e n d S V .
* When r e t u r n i n g t o k e r n e l m o d e , w e d o n ' t r e t u r n f r o m e x c e p t i o n .
* /
.macro v7m_exception_entry
@ determine the location of the registers saved by the core during
@ exception entry. Depending on the mode the cpu was in when the
@ exception happend that is either on the main or the process stack.
@ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
@ was used.
tst l r , #E X C _ R E T _ S T A C K _ M A S K
mrsne r12 , p s p
moveq r12 , s p
@ we cannot rely on r0-r3 and r12 matching the value saved in the
@ exception frame because of tail-chaining. So these have to be
@ reloaded.
ldmia r12 ! , { r0 - r3 }
@ Linux expects to have irqs off. Do it here before taking stack space
cpsid i
sub s p , #S _ F R A M E _ S I Z E - S _ I P
stmdb s p ! , { r0 - r11 }
@ load saved r12, lr, return address and xPSR.
@ r0-r7 are used for signals and never touched from now on. Clobbering
@ r8-r12 is OK.
mov r9 , r12
ldmia r9 ! , { r8 , r10 - r12 }
@ calculate the original stack pointer value.
@ r9 currently points to the memory location just above the auto saved
@ xPSR.
@ The cpu might automatically 8-byte align the stack. Bit 9
@ of the saved xPSR specifies if stack aligning took place. In this case
@ another 32-bit value is included in the stack.
tst r12 , V 7 M _ x P S R _ F R A M E P T R A L I G N
addne r9 , r9 , #4
@ store saved r12 using str to have a register to hold the base for stm
str r8 , [ s p , #S _ I P ]
add r8 , s p , #S _ S P
@ store r13-r15, xPSR
stmia r8 ! , { r9 - r12 }
@ store old_r0
str r0 , [ r8 ]
.endm
/ *
* PENDSV a n d S V C A L L a r e c o n f i g u r e d t o h a v e t h e s a m e e x c e p t i o n
* priorities. A s a k e r n e l t h r e a d r u n s a t S V C A L L e x e c u t i o n p r i o r i t y i t
* can n e v e r b e p r e e m p t e d a n d s o w e w i l l n e v e r h a v e t o r e t u r n t o a
* kernel t h r e a d h e r e .
* /
.macro v7m_exception_slow_exit ret_ r0
cpsid i
ldr l r , =EXC_RET_THREADMODE_PROCESSSTACK
@ read original r12, sp, lr, pc and xPSR
add r12 , s p , #S _ I P
ldmia r12 , { r1 - r5 }
@ an exception frame is always 8-byte aligned. To tell the hardware if
@ the sp to be restored is aligned or not set bit 9 of the saved xPSR
@ accordingly.
tst r2 , #4
subne r2 , r2 , #4
orrne r5 , V 7 M _ x P S R _ F R A M E P T R A L I G N
biceq r5 , V 7 M _ x P S R _ F R A M E P T R A L I G N
2014-05-24 17:38:01 +01:00
@ ensure bit 0 is cleared in the PC, otherwise behaviour is
@ unpredictable
bic r4 , #1
2010-05-21 18:06:42 +01:00
@ write basic exception frame
stmdb r2 ! , { r1 , r3 - r5 }
ldmia s p , { r1 , r3 - r5 }
.if \ ret_ r0
stmdb r2 ! , { r0 , r3 - r5 }
.else
stmdb r2 ! , { r1 , r3 - r5 }
.endif
@ restore process sp
msr p s p , r2
@ restore original r4-r11
ldmia s p ! , { r0 - r11 }
@ restore main sp
add s p , s p , #S _ F R A M E _ S I Z E - S _ I P
cpsie i
bx l r
.endm
# endif / * C O N F I G _ C P U _ V 7 M * /
2009-07-24 12:32:54 +01:00
@
@ Store/load the USER SP and LR registers by switching to the SYS
@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
@ available. Should only be called from SVC mode
@
.macro store_ u s e r _ s p _ l r , r d , r t e m p , o f f s e t = 0
mrs \ r t e m p , c p s r
eor \ r t e m p , \ r t e m p , #( S V C _ M O D E ^ S Y S T E M _ M O D E )
msr c p s r _ c , \ r t e m p @ switch to the SYS mode
str s p , [ \ r d , #\ o f f s e t ] @ s a v e s p _ u s r
str l r , [ \ r d , #\ o f f s e t + 4 ] @ save lr_usr
eor \ r t e m p , \ r t e m p , #( S V C _ M O D E ^ S Y S T E M _ M O D E )
msr c p s r _ c , \ r t e m p @ switch back to the SVC mode
.endm
.macro load_ u s e r _ s p _ l r , r d , r t e m p , o f f s e t = 0
mrs \ r t e m p , c p s r
eor \ r t e m p , \ r t e m p , #( S V C _ M O D E ^ S Y S T E M _ M O D E )
msr c p s r _ c , \ r t e m p @ switch to the SYS mode
ldr s p , [ \ r d , #\ o f f s e t ] @ l o a d s p _ u s r
ldr l r , [ \ r d , #\ o f f s e t + 4 ] @ load lr_usr
eor \ r t e m p , \ r t e m p , #( S V C _ M O D E ^ S Y S T E M _ M O D E )
msr c p s r _ c , \ r t e m p @ switch back to the SVC mode
.endm
2015-08-26 20:07:25 +01:00
2013-03-28 12:57:40 +00:00
.macro svc_ e x i t , r p s r , i r q = 0
.if \ irq ! = 0
2013-03-28 14:36:05 +00:00
@ IRQs already off
2013-03-28 12:57:40 +00:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
@ The parent context IRQs must have been enabled to get here in
@ the first place, so there's no point checking the PSR I bit.
bl t r a c e _ h a r d i r q s _ o n
# endif
.else
2013-03-28 14:36:05 +00:00
@ IRQs off again before pulling preserved data off the stack
disable_ i r q _ n o t r a c e
2013-03-28 12:57:40 +00:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
tst \ r p s r , #P S R _ I _ B I T
bleq t r a c e _ h a r d i r q s _ o n
tst \ r p s r , #P S R _ I _ B I T
blne t r a c e _ h a r d i r q s _ o f f
# endif
.endif
2015-08-26 20:07:25 +01:00
# ifndef C O N F I G _ T H U M B 2 _ K E R N E L
@ ARM mode SVC restore
2009-07-24 12:32:54 +01:00
msr s p s r _ c x s f , \ r p s r
2014-08-15 12:11:50 +01:00
# if d e f i n e d ( C O N F I G _ C P U _ V 6 ) | | d e f i n e d ( C O N F I G _ C P U _ 3 2 v6 K )
@ We must avoid clrex due to Cortex-A15 erratum #830321
sub r0 , s p , #4 @ uninhabited address
strex r1 , r2 , [ r0 ] @ clear the exclusive monitor
2009-09-18 23:27:05 +01:00
# endif
2014-08-15 12:11:50 +01:00
ldmia s p , { r0 - p c } ^ @ load r0 - pc, cpsr
2015-08-26 20:07:25 +01:00
# else
@ Thumb mode SVC restore
ldr l r , [ s p , #S _ S P ] @ t o p o f t h e s t a c k
ldrd r0 , r1 , [ s p , #S _ L R ] @ c a l l i n g l r a n d p c
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r2 , r1 , [ s p , #S _ L R ] @ c l e a r t h e e x c l u s i v e m o n i t o r
stmdb l r ! , { r0 , r1 , \ r p s r } @ calling lr and rfe context
ldmia s p , { r0 - r12 }
mov s p , l r
ldr l r , [ s p ] , #4
rfeia s p !
# endif
2009-07-24 12:32:54 +01:00
.endm
2014-09-17 17:12:06 +01:00
@
@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
@
@ This macro acts in a similar manner to svc_exit but switches to FIQ
@ mode to restore the final part of the register state.
@
@ We cannot use the normal svc_exit procedure because that would
@ clobber spsr_svc (FIQ could be delivered during the first few
@ instructions of vector_swi meaning its contents have not been
@ saved anywhere).
@
@ Note that, unlike svc_exit, this macro also does not allow a caller
@ supplied rpsr. This is because the FIQ exceptions are not re-entrant
@ and the handlers cannot call into the scheduler (meaning the value
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
2015-08-26 20:07:25 +01:00
# ifndef C O N F I G _ T H U M B 2 _ K E R N E L
@ ARM mode restore
2014-09-17 17:12:06 +01:00
mov r0 , s p
ldmib r0 , { r1 - r14 } @ abort is deadly from here onward (it will
@ clobber state restored below)
msr c p s r _ c , #F I Q _ M O D E | P S R _ I _ B I T | P S R _ F _ B I T
add r8 , r0 , #S _ P C
ldr r9 , [ r0 , #S _ P S R ]
msr s p s r _ c x s f , r9
ldr r0 , [ r0 , #S _ R 0 ]
ldmia r8 , { p c } ^
2015-08-26 20:07:25 +01:00
# else
@ Thumb mode restore
add r0 , s p , #S _ R 2
ldr l r , [ s p , #S _ L R ]
ldr s p , [ s p , #S _ S P ] @ a b o r t i s d e a d l y f r o m h e r e o n w a r d ( i t w i l l
@ clobber state restored below)
ldmia r0 , { r2 - r12 }
mov r1 , #F I Q _ M O D E | P S R _ I _ B I T | P S R _ F _ B I T
msr c p s r _ c , r1
sub r0 , #S _ R 2
add r8 , r0 , #S _ P C
ldmia r0 , { r0 - r1 }
rfeia r8
# endif
2014-09-17 17:12:06 +01:00
.endm
2015-08-26 20:07:25 +01:00
2009-07-24 12:32:54 +01:00
.macro restore_ u s e r _ r e g s , f a s t = 0 , o f f s e t = 0
2015-08-26 20:07:25 +01:00
# ifndef C O N F I G _ T H U M B 2 _ K E R N E L
@ ARM mode restore
2015-01-09 18:30:13 +01:00
mov r2 , s p
ldr r1 , [ r2 , #\ o f f s e t + S _ P S R ] @ g e t c a l l i n g c p s r
ldr l r , [ r2 , #\ o f f s e t + S _ P C ] ! @ g e t p c
2009-07-24 12:32:54 +01:00
msr s p s r _ c x s f , r1 @ save in spsr_svc
2014-08-15 12:11:50 +01:00
# if d e f i n e d ( C O N F I G _ C P U _ V 6 ) | | d e f i n e d ( C O N F I G _ C P U _ 3 2 v6 K )
@ We must avoid clrex due to Cortex-A15 erratum #830321
2015-01-09 18:30:13 +01:00
strex r1 , r2 , [ r2 ] @ clear the exclusive monitor
2009-09-18 23:27:05 +01:00
# endif
2009-07-24 12:32:54 +01:00
.if \ fast
2015-01-09 18:30:13 +01:00
ldmdb r2 , { r1 - l r } ^ @ get calling r1 - lr
2009-07-24 12:32:54 +01:00
.else
2015-01-09 18:30:13 +01:00
ldmdb r2 , { r0 - l r } ^ @ get calling r0 - lr
2009-07-24 12:32:54 +01:00
.endif
2010-03-15 16:04:14 +01:00
mov r0 , r0 @ ARMv5T and earlier require a nop
@ after ldm {}^
2015-01-09 18:30:13 +01:00
add s p , s p , #\ o f f s e t + S _ F R A M E _ S I Z E
2009-07-24 12:32:54 +01:00
movs p c , l r @ return & move spsr_svc into cpsr
2015-08-26 20:07:25 +01:00
# elif d e f i n e d ( C O N F I G _ C P U _ V 7 M )
@ V7M restore.
@ Note that we don't need to do clrex here as clearing the local
@ monitor is part of the exception entry and exit sequence.
2010-05-21 18:06:42 +01:00
.if \ offset
add s p , #\ o f f s e t
.endif
v7 m _ e x c e p t i o n _ s l o w _ e x i t r e t _ r0 = \ f a s t
2015-08-26 20:07:25 +01:00
# else
@ Thumb mode restore
2009-07-24 12:32:54 +01:00
mov r2 , s p
load_ u s e r _ s p _ l r r2 , r3 , \ o f f s e t + S _ S P @ calling sp, lr
ldr r1 , [ s p , #\ o f f s e t + S _ P S R ] @ g e t c a l l i n g c p s r
ldr l r , [ s p , #\ o f f s e t + S _ P C ] @ g e t p c
add s p , s p , #\ o f f s e t + S _ S P
msr s p s r _ c x s f , r1 @ save in spsr_svc
2014-08-15 12:11:50 +01:00
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r1 , r2 , [ s p ] @ clear the exclusive monitor
2009-07-24 12:32:54 +01:00
.if \ fast
ldmdb s p , { r1 - r12 } @ get calling r1 - r12
.else
ldmdb s p , { r0 - r12 } @ get calling r0 - r12
.endif
add s p , s p , #S _ F R A M E _ S I Z E - S _ S P
movs p c , l r @ return & move spsr_svc into cpsr
# endif / * ! C O N F I G _ T H U M B 2 _ K E R N E L * /
2015-08-26 20:07:25 +01:00
.endm
2005-04-16 15:20:36 -07:00
2013-03-28 22:54:40 +01:00
/ *
* Context t r a c k i n g s u b s y s t e m . U s e d t o i n s t r u m e n t t r a n s i t i o n s
* between u s e r a n d k e r n e l m o d e .
* /
.macro ct_ u s e r _ e x i t , s a v e = 1
# ifdef C O N F I G _ C O N T E X T _ T R A C K I N G
.if \ save
stmdb s p ! , { r0 - r3 , i p , l r }
2013-09-10 00:54:17 +02:00
bl c o n t e x t _ t r a c k i n g _ u s e r _ e x i t
2013-03-28 22:54:40 +01:00
ldmia s p ! , { r0 - r3 , i p , l r }
.else
2013-09-10 00:54:17 +02:00
bl c o n t e x t _ t r a c k i n g _ u s e r _ e x i t
2013-03-28 22:54:40 +01:00
.endif
# endif
.endm
.macro ct_ u s e r _ e n t e r , s a v e = 1
# ifdef C O N F I G _ C O N T E X T _ T R A C K I N G
.if \ save
stmdb s p ! , { r0 - r3 , i p , l r }
2013-09-10 00:54:17 +02:00
bl c o n t e x t _ t r a c k i n g _ u s e r _ e n t e r
2013-03-28 22:54:40 +01:00
ldmia s p ! , { r0 - r3 , i p , l r }
.else
2013-09-10 00:54:17 +02:00
bl c o n t e x t _ t r a c k i n g _ u s e r _ e n t e r
2013-03-28 22:54:40 +01:00
.endif
# endif
.endm
2005-04-16 15:20:36 -07:00
/ *
* These a r e t h e r e g i s t e r s u s e d i n t h e s y s c a l l h a n d l e r , a n d a l l o w u s t o
* have i n t h e o r y u p t o 7 a r g u m e n t s t o a f u n c t i o n - r0 t o r6 .
*
* r7 i s r e s e r v e d f o r t h e s y s t e m c a l l n u m b e r f o r t h u m b m o d e .
*
* Note t h a t t b l = = w h y i s i n t e n t i o n a l .
*
* We m u s t s e t a t l e a s t " t s k " a n d " w h y " w h e n c a l l i n g r e t _ w i t h _ r e s c h e d u l e .
* /
scno . r e q r7 @ syscall number
tbl . r e q r8 @ syscall table pointer
why . r e q r8 @ Linux syscall (!= 0)
tsk . r e q r9 @ current thread_info