2005-04-17 02:20:36 +04:00
/ *
* linux/ a r c h / a r m / k e r n e l / e n t r y - c o m m o n . S
*
* Copyright ( C ) 2 0 0 0 R u s s e l l K i n g
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e v e r s i o n 2 a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
* /
# include < a s m / u n i s t d . h >
2008-06-21 22:17:27 +04:00
# include < a s m / f t r a c e . h >
2009-02-16 13:42:09 +03:00
# include < a s m / u n w i n d . h >
2005-04-17 02:20:36 +04:00
2012-02-07 19:28:22 +04:00
# ifdef C O N F I G _ N E E D _ R E T _ T O _ U S E R
# include < m a c h / e n t r y - m a c r o . S >
# else
.macro arch_ r e t _ t o _ u s e r , t m p1 , t m p2
.endm
# endif
2005-04-17 02:20:36 +04:00
# include " e n t r y - h e a d e r . S "
.align 5
/ *
* This i s t h e f a s t s y s c a l l r e t u r n p a t h . W e d o a s l i t t l e a s
* possible h e r e , a n d t h i s i n c l u d e s s a v i n g r0 b a c k i n t o t h e S V C
* stack.
* /
ret_fast_syscall :
2009-02-16 13:42:09 +03:00
UNWIND( . f n s t a r t )
UNWIND( . c a n t u n w i n d )
2005-04-26 18:18:26 +04:00
disable_ i r q @ disable interrupts
2005-04-17 02:20:36 +04:00
ldr r1 , [ t s k , #T I _ F L A G S ]
tst r1 , #_ T I F _ W O R K _ M A S K
bne f a s t _ w o r k _ p e n d i n g
2010-12-23 03:52:44 +03:00
# if d e f i n e d ( C O N F I G _ I R Q S O F F _ T R A C E R )
asm_ t r a c e _ h a r d i r q s _ o n
# endif
2005-04-26 18:20:34 +04:00
2007-02-17 00:16:32 +03:00
/* perform architecture specific actions before user return */
arch_ r e t _ t o _ u s e r r1 , l r
2009-07-24 15:32:54 +04:00
restore_ u s e r _ r e g s f a s t = 1 , o f f s e t = S _ O F F
2009-02-16 13:42:09 +03:00
UNWIND( . f n e n d )
2005-04-17 02:20:36 +04:00
/ *
* Ok, w e n e e d t o d o e x t r a p r o c e s s i n g , e n t e r t h e s l o w p a t h .
* /
fast_work_pending :
str r0 , [ s p , #S _ R 0 + S _ O F F ] ! @ returned r0
work_pending :
mov r0 , s p @ 'regs'
mov r2 , w h y @ 'syscall'
2012-07-19 20:47:55 +04:00
bl d o _ w o r k _ p e n d i n g
2012-07-19 20:48:50 +04:00
cmp r0 , #0
2012-07-19 20:48:21 +04:00
beq n o _ w o r k _ p e n d i n g
2012-07-19 20:48:50 +04:00
movlt s c n o , #( _ _ N R _ r e s t a r t _ s y s c a l l - _ _ N R _ S Y S C A L L _ B A S E )
2012-07-19 20:48:21 +04:00
ldmia s p , { r0 - r6 } @ have to reload r0 - r6
b l o c a l _ r e s t a r t @ ... and off we go
2005-04-17 02:20:36 +04:00
/ *
* " slow" s y s c a l l r e t u r n p a t h . " w h y " t e l l s u s i f t h i s w a s a r e a l s y s c a l l .
* /
ENTRY( r e t _ t o _ u s e r )
ret_slow_syscall :
2005-04-26 18:18:26 +04:00
disable_ i r q @ disable interrupts
2011-06-05 05:24:58 +04:00
ENTRY( r e t _ t o _ u s e r _ f r o m _ i r q )
2005-04-17 02:20:36 +04:00
ldr r1 , [ t s k , #T I _ F L A G S ]
tst r1 , #_ T I F _ W O R K _ M A S K
bne w o r k _ p e n d i n g
no_work_pending :
2010-12-23 03:52:44 +03:00
# if d e f i n e d ( C O N F I G _ I R Q S O F F _ T R A C E R )
asm_ t r a c e _ h a r d i r q s _ o n
# endif
2007-02-17 00:16:32 +03:00
/* perform architecture specific actions before user return */
arch_ r e t _ t o _ u s e r r1 , l r
2009-07-24 15:32:54 +04:00
restore_ u s e r _ r e g s f a s t = 0 , o f f s e t = 0
2011-06-05 05:24:58 +04:00
ENDPROC( r e t _ t o _ u s e r _ f r o m _ i r q )
2008-08-28 14:22:32 +04:00
ENDPROC( r e t _ t o _ u s e r )
2005-04-17 02:20:36 +04:00
/ *
* This i s h o w w e r e t u r n f r o m a f o r k .
* /
ENTRY( r e t _ f r o m _ f o r k )
bl s c h e d u l e _ t a i l
get_ t h r e a d _ i n f o t s k
mov w h y , #1
b r e t _ s l o w _ s y s c a l l
2008-08-28 14:22:32 +04:00
ENDPROC( r e t _ f r o m _ f o r k )
2005-04-17 02:20:36 +04:00
2012-09-10 05:31:07 +04:00
ENTRY( r e t _ f r o m _ k e r n e l _ t h r e a d )
UNWIND( . f n s t a r t )
UNWIND( . c a n t u n w i n d )
bl s c h e d u l e _ t a i l
mov r0 , r4
adr l r , B S Y M ( 1 f ) @ kernel threads should not exit
mov p c , r5
1 : bl d o _ e x i t
nop
UNWIND( . f n e n d )
ENDPROC( r e t _ f r o m _ k e r n e l _ t h r e a d )
2012-08-02 11:46:39 +04:00
/ *
* turn a k e r n e l t h r e a d i n t o u s e r l a n d p r o c e s s
* use : ret_ f r o m _ k e r n e l _ e x e c v e ( s t r u c t p t _ r e g s * n o r m a l )
* /
ENTRY( r e t _ f r o m _ k e r n e l _ e x e c v e )
mov w h y , #0 @ not a syscall
str w h y , [ r0 , #S _ R 0 ] @ ... and we want 0 in ->ARM_r0 as well
get_ t h r e a d _ i n f o t s k @ thread structure
mov s p , r0 @ stack pointer just under pt_regs
b r e t _ s l o w _ s y s c a l l
ENDPROC( r e t _ f r o m _ k e r n e l _ e x e c v e )
2006-01-19 15:57:01 +03:00
.equ NR_ s y s c a l l s ,0
# define C A L L ( x ) . e q u N R _ s y s c a l l s ,N R _ s y s c a l l s + 1
2005-04-17 02:20:36 +04:00
# include " c a l l s . S "
2006-01-19 15:57:01 +03:00
# undef C A L L
# define C A L L ( x ) . l o n g x
2005-04-17 02:20:36 +04:00
2008-10-07 03:06:12 +04:00
# ifdef C O N F I G _ F U N C T I O N _ T R A C E R
2010-08-03 20:09:40 +04:00
/ *
* When c o m p i l i n g w i t h - p g , g c c i n s e r t s a c a l l t o t h e m c o u n t r o u t i n e a t t h e
* start o f e v e r y f u n c t i o n . I n m c o u n t , a p a r t f r o m t h e f u n c t i o n ' s a d d r e s s ( i n
* lr) , w e n e e d t o g e t h o l d o f t h e f u n c t i o n ' s c a l l e r ' s a d d r e s s .
*
* Older G C C s ( p r e - 4 . 4 ) i n s e r t e d a c a l l t o a r o u t i n e c a l l e d m c o u n t l i k e t h i s :
*
* bl m c o u n t
*
* These v e r s i o n s h a v e t h e l i m i t a t i o n t h a t i n o r d e r f o r t h e m c o u n t r o u t i n e t o
* be a b l e t o d e t e r m i n e t h e f u n c t i o n ' s c a l l e r ' s a d d r e s s , a n A P C S - s t y l e f r a m e
* pointer ( w h i c h i s s e t u p w i t h s o m e t h i n g l i k e t h e c o d e b e l o w ) i s r e q u i r e d .
*
* mov i p , s p
* push { f p , i p , l r , p c }
* sub f p , i p , #4
*
* With E A B I , t h e s e f r a m e p o i n t e r s a r e n o t a v a i l a b l e u n l e s s - m a p c s - f r a m e i s
* specified, a n d i f b u i l d i n g a s T h u m b - 2 , n o t e v e n t h e n .
*
* Newer G C C s ( 4 . 4 + ) s o l v e t h i s p r o b l e m b y i n t r o d u c i n g a n e w v e r s i o n o f m c o u n t ,
* with c a l l s i t e s l i k e :
*
* push { l r }
* bl _ _ g n u _ m c o u n t _ n c
*
* With t h e s e c o m p i l e r s , f r a m e p o i n t e r s a r e n o t n e c e s s a r y .
*
* mcount c a n b e t h o u g h t o f a s a f u n c t i o n c a l l e d i n t h e m i d d l e o f a s u b r o u t i n e
* call. A s s u c h , i t n e e d s t o b e t r a n s p a r e n t f o r b o t h t h e c a l l e r a n d t h e
* callee : the o r i g i n a l l r n e e d s t o b e r e s t o r e d w h e n l e a v i n g m c o u n t , a n d n o
* registers s h o u l d b e c l o b b e r e d . ( I n t h e _ _ g n u _ m c o u n t _ n c i m p l e m e n t a t i o n , w e
* clobber t h e i p r e g i s t e r . T h i s i s O K b e c a u s e t h e A R M c a l l i n g c o n v e n t i o n
* allows i t t o b e c l o b b e r e d i n s u b r o u t i n e s a n d d o e s n ' t u s e i t t o h o l d
* parameters. )
2010-08-10 22:43:28 +04:00
*
* When u s i n g d y n a m i c f t r a c e , w e p a t c h o u t t h e m c o u n t c a l l b y a " m o v r0 , r0 "
* for t h e m c o u n t c a s e , a n d a " p o p { l r } " f o r t h e _ _ g n u _ m c o u n t _ n c c a s e ( s e e
* arch/ a r m / k e r n e l / f t r a c e . c ) .
2010-08-03 20:09:40 +04:00
* /
2010-08-10 22:32:37 +04:00
# ifndef C O N F I G _ O L D _ M C O U N T
# if ( _ _ G N U C _ _ < 4 | | ( _ _ G N U C _ _ = = 4 & & _ _ G N U C _ M I N O R _ _ < 4 ) )
# error F t r a c e r e q u i r e s C O N F I G _ F R A M E _ P O I N T E R =y w i t h G C C o l d e r t h a n 4 . 4 . 0 .
# endif
# endif
2012-01-24 19:52:52 +04:00
.macro mcount_adjust_addr rd, r n
bic \ r d , \ r n , #1 @ clear the Thumb bit if present
sub \ r d , \ r d , #M C O U N T _ I N S N _ S I Z E
.endm
2010-10-07 16:09:47 +04:00
.macro __mcount suffix
mcount_ e n t e r
ldr r0 , =ftrace_trace_function
ldr r2 , [ r0 ]
adr r0 , . L f t r a c e _ s t u b
cmp r0 , r2
bne 1 f
2010-08-10 22:43:28 +04:00
2010-10-09 20:54:38 +04:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
ldr r1 , =ftrace_graph_return
ldr r2 , [ r1 ]
cmp r0 , r2
bne f t r a c e _ g r a p h _ c a l l e r \ s u f f i x
ldr r1 , =ftrace_graph_entry
ldr r2 , [ r1 ]
ldr r0 , =ftrace_graph_entry_stub
cmp r0 , r2
bne f t r a c e _ g r a p h _ c a l l e r \ s u f f i x
# endif
2010-10-07 16:09:47 +04:00
mcount_ e x i t
2010-08-10 22:43:28 +04:00
2010-10-07 16:09:47 +04:00
1 : mcount_ g e t _ l r r1 @ lr of instrumented func
2012-01-24 19:52:52 +04:00
mcount_ a d j u s t _ a d d r r0 , l r @ instrumented function
2010-10-07 16:09:47 +04:00
adr l r , B S Y M ( 2 f )
mov p c , r2
2 : mcount_ e x i t
.endm
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
.macro __ftrace_caller suffix
mcount_ e n t e r
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
mcount_ g e t _ l r r1 @ lr of instrumented func
2012-01-24 19:52:52 +04:00
mcount_ a d j u s t _ a d d r r0 , l r @ instrumented function
2010-10-07 16:09:47 +04:00
.globl ftrace_ c a l l \ s u f f i x
ftrace_ c a l l \ s u f f i x :
2010-08-03 20:08:09 +04:00
bl f t r a c e _ s t u b
2010-10-07 16:09:47 +04:00
2010-11-06 20:33:21 +03:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
.globl ftrace_ g r a p h _ c a l l \ s u f f i x
ftrace_ g r a p h _ c a l l \ s u f f i x :
mov r0 , r0
# endif
2010-10-07 16:09:47 +04:00
mcount_ e x i t
.endm
2008-05-31 12:53:50 +04:00
2010-10-09 20:54:38 +04:00
.macro __ftrace_graph_caller
sub r0 , f p , #4 @ &lr of instrumented routine (&parent)
2010-11-06 20:33:21 +03:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
@ called from __ftrace_caller, saved in mcount_enter
ldr r1 , [ s p , #16 ] @ instrumented routine (func)
2012-01-24 19:52:52 +04:00
mcount_ a d j u s t _ a d d r r1 , r1
2010-11-06 20:33:21 +03:00
# else
@ called from __mcount, untouched in lr
2012-01-24 19:52:52 +04:00
mcount_ a d j u s t _ a d d r r1 , l r @ instrumented routine (func)
2010-11-06 20:33:21 +03:00
# endif
2010-10-09 20:54:38 +04:00
mov r2 , f p @ frame pointer
bl p r e p a r e _ f t r a c e _ r e t u r n
mcount_ e x i t
.endm
2008-05-31 12:53:50 +04:00
2010-08-10 22:43:28 +04:00
# ifdef C O N F I G _ O L D _ M C O U N T
2010-10-07 16:09:47 +04:00
/ *
* mcount
* /
.macro mcount_enter
stmdb s p ! , { r0 - r3 , l r }
.endm
.macro mcount_get_lr reg
ldr \ r e g , [ f p , #- 4 ]
.endm
.macro mcount_exit
ldr l r , [ f p , #- 4 ]
ldmia s p ! , { r0 - r3 , p c }
.endm
2010-08-10 22:43:28 +04:00
ENTRY( m c o u n t )
2010-10-07 16:09:47 +04:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
2010-08-10 22:43:28 +04:00
stmdb s p ! , { l r }
ldr l r , [ f p , #- 4 ]
ldmia s p ! , { p c }
2010-10-07 16:09:47 +04:00
# else
_ _ mcount _ o l d
# endif
2010-08-10 22:33:52 +04:00
ENDPROC( m c o u n t )
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
2010-08-10 22:43:28 +04:00
ENTRY( f t r a c e _ c a l l e r _ o l d )
2010-10-07 16:09:47 +04:00
_ _ ftrace_ c a l l e r _ o l d
2010-08-10 22:43:28 +04:00
ENDPROC( f t r a c e _ c a l l e r _ o l d )
# endif
2008-05-31 12:53:50 +04:00
2010-10-09 20:54:38 +04:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
ENTRY( f t r a c e _ g r a p h _ c a l l e r _ o l d )
_ _ ftrace_ g r a p h _ c a l l e r
ENDPROC( f t r a c e _ g r a p h _ c a l l e r _ o l d )
# endif
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
# endif
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
/ *
* _ _ gnu_ m c o u n t _ n c
* /
.macro mcount_enter
2010-08-03 20:08:09 +04:00
stmdb s p ! , { r0 - r3 , l r }
2010-10-07 16:09:47 +04:00
.endm
.macro mcount_get_lr reg
ldr \ r e g , [ s p , #20 ]
.endm
.macro mcount_exit
2010-08-03 20:08:09 +04:00
ldmia s p ! , { r0 - r3 , i p , l r }
mov p c , i p
2010-10-07 16:09:47 +04:00
.endm
2009-08-13 22:38:16 +04:00
2010-10-07 16:09:47 +04:00
ENTRY( _ _ g n u _ m c o u n t _ n c )
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
mov i p , l r
ldmia s p ! , { l r }
2010-08-03 20:08:09 +04:00
mov p c , i p
2010-10-07 16:09:47 +04:00
# else
_ _ mcount
# endif
2010-08-10 22:33:52 +04:00
ENDPROC( _ _ g n u _ m c o u n t _ n c )
2009-08-13 22:38:16 +04:00
2010-10-07 16:09:47 +04:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
ENTRY( f t r a c e _ c a l l e r )
_ _ ftrace_ c a l l e r
ENDPROC( f t r a c e _ c a l l e r )
2010-08-10 22:32:37 +04:00
# endif
2008-05-31 12:53:50 +04:00
2010-10-09 20:54:38 +04:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
ENTRY( f t r a c e _ g r a p h _ c a l l e r )
_ _ ftrace_ g r a p h _ c a l l e r
ENDPROC( f t r a c e _ g r a p h _ c a l l e r )
2010-08-10 22:32:37 +04:00
# endif
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
2008-05-31 12:53:50 +04:00
2010-10-09 20:54:38 +04:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
.globl return_to_handler
return_to_handler :
stmdb s p ! , { r0 - r3 }
mov r0 , f p @ frame pointer
bl f t r a c e _ r e t u r n _ t o _ h a n d l e r
mov l r , r0 @ r0 has real ret addr
ldmia s p ! , { r0 - r3 }
mov p c , l r
# endif
2008-05-31 12:53:50 +04:00
2010-08-10 22:33:52 +04:00
ENTRY( f t r a c e _ s t u b )
2010-08-10 22:37:21 +04:00
.Lftrace_stub :
2010-08-03 20:08:09 +04:00
mov p c , l r
2010-08-10 22:33:52 +04:00
ENDPROC( f t r a c e _ s t u b )
2008-05-31 12:53:50 +04:00
2008-10-07 03:06:12 +04:00
# endif / * C O N F I G _ F U N C T I O N _ T R A C E R * /
2008-05-31 12:53:50 +04:00
2005-04-17 02:20:36 +04:00
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* SWI h a n d l e r
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* /
.align 5
ENTRY( v e c t o r _ s w i )
2005-04-26 18:20:34 +04:00
sub s p , s p , #S _ F R A M E _ S I Z E
stmia s p , { r0 - r12 } @ Calling r0 - r12
2009-07-24 15:32:54 +04:00
ARM( a d d r8 , s p , #S _ P C )
ARM( s t m d b r8 , { s p , l r } ^ ) @ Calling sp, lr
THUMB( m o v r8 , s p )
THUMB( s t o r e _ u s e r _ s p _ l r r8 , r10 , S _ S P ) @ calling sp, lr
2005-04-26 18:20:34 +04:00
mrs r8 , s p s r @ called from non-FIQ mode, so ok.
str l r , [ s p , #S _ P C ] @ S a v e c a l l i n g P C
str r8 , [ s p , #S _ P S R ] @ S a v e C P S R
str r0 , [ s p , #S _ O L D _ R 0 ] @ Save OLD_R0
2005-04-17 02:20:36 +04:00
zero_ f p
2005-04-26 18:19:24 +04:00
/ *
* Get t h e s y s t e m c a l l n u m b e r .
* /
2006-01-14 19:31:29 +03:00
2006-01-14 19:36:12 +03:00
# if d e f i n e d ( C O N F I G _ O A B I _ C O M P A T )
2006-01-14 19:31:29 +03:00
2006-01-14 19:36:12 +03:00
/ *
* If w e h a v e C O N F I G _ O A B I _ C O M P A T t h e n w e n e e d t o l o o k a t t h e s w i
* value t o d e t e r m i n e i f i t i s a n E A B I o r a n o l d A B I c a l l .
* /
# ifdef C O N F I G _ A R M _ T H U M B
tst r8 , #P S R _ T _ B I T
movne r10 , #0 @ no thumb OABI emulation
ldreq r10 , [ l r , #- 4 ] @ get SWI instruction
# else
ldr r10 , [ l r , #- 4 ] @ get SWI instruction
# endif
2009-05-30 17:00:18 +04:00
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
rev r10 , r10 @ little endian instruction
# endif
2006-01-14 19:36:12 +03:00
# elif d e f i n e d ( C O N F I G _ A E A B I )
/ *
* Pure E A B I u s e r s p a c e a l w a y s p u t s y s c a l l n u m b e r i n t o s c n o ( r7 ) .
* /
2006-01-14 19:31:29 +03:00
# elif d e f i n e d ( C O N F I G _ A R M _ T H U M B )
2006-01-14 19:36:12 +03:00
/* Legacy ABI only, possibly thumb mode. */
2005-04-26 18:19:24 +04:00
tst r8 , #P S R _ T _ B I T @ t h i s i s S P S R f r o m s a v e _ u s e r _ r e g s
addne s c n o , r7 , #_ _ N R _ S Y S C A L L _ B A S E @ p u t O S n u m b e r i n
ldreq s c n o , [ l r , #- 4 ]
2006-01-14 19:36:12 +03:00
2005-04-26 18:19:24 +04:00
# else
2006-01-14 19:36:12 +03:00
/* Legacy ABI only. */
2005-04-26 18:19:24 +04:00
ldr s c n o , [ l r , #- 4 ] @ get SWI instruction
# endif
2005-04-17 02:20:36 +04:00
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
ldr i p , _ _ c r _ a l i g n m e n t
ldr i p , [ i p ]
mcr p15 , 0 , i p , c1 , c0 @ update control register
# endif
2005-04-26 18:18:26 +04:00
enable_ i r q
2005-04-17 02:20:36 +04:00
get_ t h r e a d _ i n f o t s k
2006-01-14 19:36:12 +03:00
adr t b l , s y s _ c a l l _ t a b l e @ load syscall table pointer
# if d e f i n e d ( C O N F I G _ O A B I _ C O M P A T )
/ *
* If t h e s w i a r g u m e n t i s z e r o , t h i s i s a n E A B I c a l l a n d w e d o n o t h i n g .
*
* If t h i s i s a n o l d A B I c a l l , g e t t h e s y s c a l l n u m b e r i n t o s c n o a n d
* get t h e o l d A B I s y s c a l l t a b l e a d d r e s s .
* /
bics r10 , r10 , #0xff000000
eorne s c n o , r10 , #_ _ N R _ O A B I _ S Y S C A L L _ B A S E
ldrne t b l , =sys_oabi_call_table
# elif ! d e f i n e d ( C O N F I G _ A E A B I )
2005-04-17 02:20:36 +04:00
bic s c n o , s c n o , #0xff000000 @ mask off SWI op-code
2005-04-26 18:19:24 +04:00
eor s c n o , s c n o , #_ _ N R _ S Y S C A L L _ B A S E @ c h e c k O S n u m b e r
2006-01-14 19:31:29 +03:00
# endif
2006-01-14 19:36:12 +03:00
2012-07-19 20:48:21 +04:00
local_restart :
2010-08-27 02:08:35 +04:00
ldr r10 , [ t s k , #T I _ F L A G S ] @ c h e c k f o r s y s c a l l t r a c i n g
2006-01-14 19:31:29 +03:00
stmdb s p ! , { r4 , r5 } @ push fifth and sixth args
2010-08-27 02:08:35 +04:00
# ifdef C O N F I G _ S E C C O M P
tst r10 , #_ T I F _ S E C C O M P
beq 1 f
mov r0 , s c n o
bl _ _ s e c u r e _ c o m p u t i n g
add r0 , s p , #S _ R 0 + S _ O F F @ pointer to regs
ldmia r0 , { r0 - r3 } @ have to reload r0 - r3
1 :
# endif
2012-01-03 23:23:09 +04:00
tst r10 , #_ T I F _ S Y S C A L L _ W O R K @ a r e w e t r a c i n g s y s c a l l s ?
2005-04-17 02:20:36 +04:00
bne _ _ s y s _ t r a c e
cmp s c n o , #N R _ s y s c a l l s @ c h e c k u p p e r s y s c a l l l i m i t
2009-07-24 15:32:54 +04:00
adr l r , B S Y M ( r e t _ f a s t _ s y s c a l l ) @ return address
2005-04-17 02:20:36 +04:00
ldrcc p c , [ t b l , s c n o , l s l #2 ] @ call sys_* routine
add r1 , s p , #S _ O F F
2 : mov w h y , #0 @ no longer a real syscall
2005-04-26 18:19:24 +04:00
cmp s c n o , #( _ _ A R M _ N R _ B A S E - _ _ N R _ S Y S C A L L _ B A S E )
eor r0 , s c n o , #_ _ N R _ S Y S C A L L _ B A S E @ p u t O S n u m b e r b a c k
2005-04-17 02:20:36 +04:00
bcs a r m _ s y s c a l l
b s y s _ n i _ s y s c a l l @ not private func
2008-08-28 14:22:32 +04:00
ENDPROC( v e c t o r _ s w i )
2005-04-17 02:20:36 +04:00
/ *
* This i s t h e r e a l l y s l o w p a t h . W e ' r e g o i n g t o b e d o i n g
* context s w i t c h e s , a n d w a i t i n g f o r o u r p a r e n t t o r e s p o n d .
* /
__sys_trace :
2012-07-06 18:50:14 +04:00
mov r1 , s c n o
add r0 , s p , #S _ O F F
bl s y s c a l l _ t r a c e _ e n t e r
2005-04-17 02:20:36 +04:00
2009-07-24 15:32:54 +04:00
adr l r , B S Y M ( _ _ s y s _ t r a c e _ r e t u r n ) @ return address
2006-01-14 22:30:04 +03:00
mov s c n o , r0 @ syscall number (possibly new)
2005-04-17 02:20:36 +04:00
add r1 , s p , #S _ R 0 + S _ O F F @ pointer to regs
cmp s c n o , #N R _ s y s c a l l s @ c h e c k u p p e r s y s c a l l l i m i t
2012-07-19 20:49:22 +04:00
ldmccia r1 , { r0 - r6 } @ have to reload r0 - r6
stmccia s p , { r4 , r5 } @ and update the stack args
2005-04-17 02:20:36 +04:00
ldrcc p c , [ t b l , s c n o , l s l #2 ] @ call sys_* routine
b 2 b
__sys_trace_return :
str r0 , [ s p , #S _ R 0 + S _ O F F ] ! @ save returned r0
2012-07-06 18:50:14 +04:00
mov r1 , s c n o
mov r0 , s p
bl s y s c a l l _ t r a c e _ e x i t
2005-04-17 02:20:36 +04:00
b r e t _ s l o w _ s y s c a l l
.align 5
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
.type _ _ cr_ a l i g n m e n t , #o b j e c t
__cr_alignment :
.word cr_alignment
2006-01-14 19:36:12 +03:00
# endif
.ltorg
/ *
* This i s t h e s y s c a l l t a b l e d e c l a r a t i o n f o r n a t i v e A B I s y s c a l l s .
* With E A B I a c o u p l e s y s c a l l s a r e o b s o l e t e a n d d e f i n e d a s s y s _ n i _ s y s c a l l .
* /
# define A B I ( n a t i v e , c o m p a t ) n a t i v e
# ifdef C O N F I G _ A E A B I
# define O B S O L E T E ( s y s c a l l ) s y s _ n i _ s y s c a l l
# else
# define O B S O L E T E ( s y s c a l l ) s y s c a l l
2005-04-17 02:20:36 +04:00
# endif
.type sys_ c a l l _ t a b l e , #o b j e c t
ENTRY( s y s _ c a l l _ t a b l e )
# include " c a l l s . S "
2006-01-14 19:36:12 +03:00
# undef A B I
# undef O B S O L E T E
2005-04-17 02:20:36 +04:00
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Special s y s t e m c a l l w r a p p e r s
* /
@ r0 = syscall number
2005-12-17 18:25:42 +03:00
@ r8 = syscall table
2005-04-17 02:20:36 +04:00
sys_syscall :
2006-05-16 17:25:55 +04:00
bic s c n o , r0 , #_ _ N R _ O A B I _ S Y S C A L L _ B A S E
2005-04-17 02:20:36 +04:00
cmp s c n o , #_ _ N R _ s y s c a l l - _ _ N R _ S Y S C A L L _ B A S E
cmpne s c n o , #N R _ s y s c a l l s @ c h e c k r a n g e
stmloia s p , { r5 , r6 } @ shuffle args
movlo r0 , r1
movlo r1 , r2
movlo r2 , r3
movlo r3 , r4
ldrlo p c , [ t b l , s c n o , l s l #2 ]
b s y s _ n i _ s y s c a l l
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s y s c a l l )
2005-04-17 02:20:36 +04:00
sys_fork_wrapper :
add r0 , s p , #S _ O F F
b s y s _ f o r k
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ f o r k _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_vfork_wrapper :
add r0 , s p , #S _ O F F
b s y s _ v f o r k
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ v f o r k _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_clone_wrapper :
add i p , s p , #S _ O F F
str i p , [ s p , #4 ]
b s y s _ c l o n e
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ c l o n e _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_sigreturn_wrapper :
add r0 , s p , #S _ O F F
arm: fix really nasty sigreturn bug
If a signal hits us outside of a syscall and another gets delivered
when we are in sigreturn (e.g. because it had been in sa_mask for
the first one and got sent to us while we'd been in the first handler),
we have a chance of returning from the second handler to location one
insn prior to where we ought to return. If r0 happens to contain -513
(-ERESTARTNOINTR), sigreturn will get confused into doing restart
syscall song and dance.
Incredible joy to debug, since it manifests as random, infrequent and
very hard to reproduce double execution of instructions in userland
code...
The fix is simple - mark it "don't bother with restarts" in wrapper,
i.e. set r8 to 0 in sys_sigreturn and sys_rt_sigreturn wrappers,
suppressing the syscall restart handling on return from these guys.
They can't legitimately return a restart-worthy error anyway.
Testcase:
#include <unistd.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/time.h>
#include <errno.h>
void f(int n)
{
__asm__ __volatile__(
"ldr r0, [%0]\n"
"b 1f\n"
"b 2f\n"
"1:b .\n"
"2:\n" : : "r"(&n));
}
void handler1(int sig) { }
void handler2(int sig) { raise(1); }
void handler3(int sig) { exit(0); }
main()
{
struct sigaction s = {.sa_handler = handler2};
struct itimerval t1 = { .it_value = {1} };
struct itimerval t2 = { .it_value = {2} };
signal(1, handler1);
sigemptyset(&s.sa_mask);
sigaddset(&s.sa_mask, 1);
sigaction(SIGALRM, &s, NULL);
signal(SIGVTALRM, handler3);
setitimer(ITIMER_REAL, &t1, NULL);
setitimer(ITIMER_VIRTUAL, &t2, NULL);
f(-513); /* -ERESTARTNOINTR */
write(1, "buggered\n", 9);
return 1;
}
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-09-17 17:34:39 +04:00
mov w h y , #0 @ prevent syscall restart handling
2005-04-17 02:20:36 +04:00
b s y s _ s i g r e t u r n
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s i g r e t u r n _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_rt_sigreturn_wrapper :
add r0 , s p , #S _ O F F
arm: fix really nasty sigreturn bug
If a signal hits us outside of a syscall and another gets delivered
when we are in sigreturn (e.g. because it had been in sa_mask for
the first one and got sent to us while we'd been in the first handler),
we have a chance of returning from the second handler to location one
insn prior to where we ought to return. If r0 happens to contain -513
(-ERESTARTNOINTR), sigreturn will get confused into doing restart
syscall song and dance.
Incredible joy to debug, since it manifests as random, infrequent and
very hard to reproduce double execution of instructions in userland
code...
The fix is simple - mark it "don't bother with restarts" in wrapper,
i.e. set r8 to 0 in sys_sigreturn and sys_rt_sigreturn wrappers,
suppressing the syscall restart handling on return from these guys.
They can't legitimately return a restart-worthy error anyway.
Testcase:
#include <unistd.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/time.h>
#include <errno.h>
void f(int n)
{
__asm__ __volatile__(
"ldr r0, [%0]\n"
"b 1f\n"
"b 2f\n"
"1:b .\n"
"2:\n" : : "r"(&n));
}
void handler1(int sig) { }
void handler2(int sig) { raise(1); }
void handler3(int sig) { exit(0); }
main()
{
struct sigaction s = {.sa_handler = handler2};
struct itimerval t1 = { .it_value = {1} };
struct itimerval t2 = { .it_value = {2} };
signal(1, handler1);
sigemptyset(&s.sa_mask);
sigaddset(&s.sa_mask, 1);
sigaction(SIGALRM, &s, NULL);
signal(SIGVTALRM, handler3);
setitimer(ITIMER_REAL, &t1, NULL);
setitimer(ITIMER_VIRTUAL, &t2, NULL);
f(-513); /* -ERESTARTNOINTR */
write(1, "buggered\n", 9);
return 1;
}
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-09-17 17:34:39 +04:00
mov w h y , #0 @ prevent syscall restart handling
2005-04-17 02:20:36 +04:00
b s y s _ r t _ s i g r e t u r n
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ r t _ s i g r e t u r n _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_sigaltstack_wrapper :
ldr r2 , [ s p , #S _ O F F + S _ S P ]
b d o _ s i g a l t s t a c k
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s i g a l t s t a c k _ w r a p p e r )
2005-04-17 02:20:36 +04:00
2006-01-14 19:35:03 +03:00
sys_statfs64_wrapper :
teq r1 , #88
moveq r1 , #84
b s y s _ s t a t f s64
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s t a t f s64 _ w r a p p e r )
2006-01-14 19:35:03 +03:00
sys_fstatfs64_wrapper :
teq r1 , #88
moveq r1 , #84
b s y s _ f s t a t f s64
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ f s t a t f s64 _ w r a p p e r )
2006-01-14 19:35:03 +03:00
2005-04-17 02:20:36 +04:00
/ *
* Note : off_ 4 k ( r5 ) i s a l w a y s u n i t s o f 4 K . I f w e c a n ' t d o t h e r e q u e s t e d
* offset, w e r e t u r n E I N V A L .
* /
sys_mmap2 :
# if P A G E _ S H I F T > 1 2
tst r5 , #P G O F F _ M A S K
moveq r5 , r5 , l s r #P A G E _ S H I F T - 12
streq r5 , [ s p , #4 ]
2009-12-01 01:37:04 +03:00
beq s y s _ m m a p _ p g o f f
2005-04-17 02:20:36 +04:00
mov r0 , #- E I N V A L
2006-06-25 14:17:23 +04:00
mov p c , l r
2005-04-17 02:20:36 +04:00
# else
str r5 , [ s p , #4 ]
2009-12-01 01:37:04 +03:00
b s y s _ m m a p _ p g o f f
2005-04-17 02:20:36 +04:00
# endif
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ m m a p2 )
2006-01-14 19:35:31 +03:00
# ifdef C O N F I G _ O A B I _ C O M P A T
2006-01-14 19:36:12 +03:00
2006-01-14 19:35:31 +03:00
/ *
* These a r e s y s c a l l s w i t h a r g u m e n t r e g i s t e r d i f f e r e n c e s
* /
sys_oabi_pread64 :
stmia s p , { r3 , r4 }
b s y s _ p r e a d64
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ p r e a d64 )
2006-01-14 19:35:31 +03:00
sys_oabi_pwrite64 :
stmia s p , { r3 , r4 }
b s y s _ p w r i t e 6 4
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ p w r i t e 6 4 )
2006-01-14 19:35:31 +03:00
sys_oabi_truncate64 :
mov r3 , r2
mov r2 , r1
b s y s _ t r u n c a t e 6 4
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ t r u n c a t e 6 4 )
2006-01-14 19:35:31 +03:00
sys_oabi_ftruncate64 :
mov r3 , r2
mov r2 , r1
b s y s _ f t r u n c a t e 6 4
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ f t r u n c a t e 6 4 )
2006-01-14 19:35:31 +03:00
sys_oabi_readahead :
str r3 , [ s p ]
mov r3 , r2
mov r2 , r1
b s y s _ r e a d a h e a d
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ r e a d a h e a d )
2006-01-14 19:35:31 +03:00
2006-01-14 19:36:12 +03:00
/ *
* Let' s d e c l a r e a s e c o n d s y s c a l l t a b l e f o r o l d A B I b i n a r i e s
* using t h e c o m p a t i b i l i t y s y s c a l l e n t r i e s .
* /
# define A B I ( n a t i v e , c o m p a t ) c o m p a t
# define O B S O L E T E ( s y s c a l l ) s y s c a l l
.type sys_ o a b i _ c a l l _ t a b l e , #o b j e c t
ENTRY( s y s _ o a b i _ c a l l _ t a b l e )
# include " c a l l s . S "
# undef A B I
# undef O B S O L E T E
2006-01-14 19:35:31 +03:00
# endif