2005-04-17 02:20:36 +04:00
/ *
* linux/ a r c h / a r m / k e r n e l / e n t r y - c o m m o n . S
*
* Copyright ( C ) 2 0 0 0 R u s s e l l K i n g
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e v e r s i o n 2 a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
* /
# include < a s m / u n i s t d . h >
2008-06-21 22:17:27 +04:00
# include < a s m / f t r a c e . h >
2008-08-05 19:14:15 +04:00
# include < m a c h / e n t r y - m a c r o . S >
2009-02-16 13:42:09 +03:00
# include < a s m / u n w i n d . h >
2005-04-17 02:20:36 +04:00
# include " e n t r y - h e a d e r . S "
.align 5
/ *
* This i s t h e f a s t s y s c a l l r e t u r n p a t h . W e d o a s l i t t l e a s
* possible h e r e , a n d t h i s i n c l u d e s s a v i n g r0 b a c k i n t o t h e S V C
* stack.
* /
ret_fast_syscall :
2009-02-16 13:42:09 +03:00
UNWIND( . f n s t a r t )
UNWIND( . c a n t u n w i n d )
2005-04-26 18:18:26 +04:00
disable_ i r q @ disable interrupts
2005-04-17 02:20:36 +04:00
ldr r1 , [ t s k , #T I _ F L A G S ]
tst r1 , #_ T I F _ W O R K _ M A S K
bne f a s t _ w o r k _ p e n d i n g
2005-04-26 18:20:34 +04:00
2007-02-17 00:16:32 +03:00
/* perform architecture specific actions before user return */
arch_ r e t _ t o _ u s e r r1 , l r
2009-07-24 15:32:54 +04:00
restore_ u s e r _ r e g s f a s t = 1 , o f f s e t = S _ O F F
2009-02-16 13:42:09 +03:00
UNWIND( . f n e n d )
2005-04-17 02:20:36 +04:00
/ *
* Ok, w e n e e d t o d o e x t r a p r o c e s s i n g , e n t e r t h e s l o w p a t h .
* /
fast_work_pending :
str r0 , [ s p , #S _ R 0 + S _ O F F ] ! @ returned r0
work_pending :
tst r1 , #_ T I F _ N E E D _ R E S C H E D
bne w o r k _ r e s c h e d
2009-09-02 12:14:16 +04:00
tst r1 , #_ T I F _ S I G P E N D I N G | _ T I F _ N O T I F Y _ R E S U M E
2005-04-17 02:20:36 +04:00
beq n o _ w o r k _ p e n d i n g
mov r0 , s p @ 'regs'
mov r2 , w h y @ 'syscall'
bl d o _ n o t i f y _ r e s u m e
2005-11-19 13:01:07 +03:00
b r e t _ s l o w _ s y s c a l l @ Check work again
2005-04-17 02:20:36 +04:00
work_resched :
bl s c h e d u l e
/ *
* " slow" s y s c a l l r e t u r n p a t h . " w h y " t e l l s u s i f t h i s w a s a r e a l s y s c a l l .
* /
ENTRY( r e t _ t o _ u s e r )
ret_slow_syscall :
2005-04-26 18:18:26 +04:00
disable_ i r q @ disable interrupts
2005-04-17 02:20:36 +04:00
ldr r1 , [ t s k , #T I _ F L A G S ]
tst r1 , #_ T I F _ W O R K _ M A S K
bne w o r k _ p e n d i n g
no_work_pending :
2007-02-17 00:16:32 +03:00
/* perform architecture specific actions before user return */
arch_ r e t _ t o _ u s e r r1 , l r
2009-07-24 15:32:54 +04:00
restore_ u s e r _ r e g s f a s t = 0 , o f f s e t = 0
2008-08-28 14:22:32 +04:00
ENDPROC( r e t _ t o _ u s e r )
2005-04-17 02:20:36 +04:00
/ *
* This i s h o w w e r e t u r n f r o m a f o r k .
* /
ENTRY( r e t _ f r o m _ f o r k )
bl s c h e d u l e _ t a i l
get_ t h r e a d _ i n f o t s k
ldr r1 , [ t s k , #T I _ F L A G S ] @ c h e c k f o r s y s c a l l t r a c i n g
mov w h y , #1
tst r1 , #_ T I F _ S Y S C A L L _ T R A C E @ a r e w e t r a c i n g s y s c a l l s ?
beq r e t _ s l o w _ s y s c a l l
mov r1 , s p
mov r0 , #1 @ trace exit [IP = 1]
bl s y s c a l l _ t r a c e
b r e t _ s l o w _ s y s c a l l
2008-08-28 14:22:32 +04:00
ENDPROC( r e t _ f r o m _ f o r k )
2005-04-17 02:20:36 +04:00
2006-01-19 15:57:01 +03:00
.equ NR_ s y s c a l l s ,0
# define C A L L ( x ) . e q u N R _ s y s c a l l s ,N R _ s y s c a l l s + 1
2005-04-17 02:20:36 +04:00
# include " c a l l s . S "
2006-01-19 15:57:01 +03:00
# undef C A L L
# define C A L L ( x ) . l o n g x
2005-04-17 02:20:36 +04:00
2008-10-07 03:06:12 +04:00
# ifdef C O N F I G _ F U N C T I O N _ T R A C E R
2008-05-31 12:53:50 +04:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
ENTRY( m c o u n t )
stmdb s p ! , { r0 - r3 , l r }
mov r0 , l r
2008-06-21 22:17:27 +04:00
sub r0 , r0 , #M C O U N T _ I N S N _ S I Z E
2008-05-31 12:53:50 +04:00
.globl mcount_call
mcount_call :
bl f t r a c e _ s t u b
[ARM] 5418/1: restore lr before leaving mcount
gcc seems to expect that lr isn't clobbered by mcount, because for a
function starting with:
static int func(void)
{
void *ra = __builtin_return_address(0);
printk(KERN_EMERG "__builtin_return_address(0) = %pS\n", ra)
...
the following assembler is generated by gcc 4.3.2:
0: e1a0c00d mov ip, sp
4: e92dd810 push {r4, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4 ; 0x4
c: ebfffffe bl 0 <mcount>
10: e59f0034 ldr r0, [pc, #52]
14: e1a0100e mov r1, lr
18: ebfffffe bl 0 <printk>
Without this patch obviously __builtin_return_address(0) yields
func+0x10 instead of the return address of the caller.
Note this patch fixes a similar issue for the routines used with dynamic
ftrace even though this isn't currently selectable for ARM.
Cc: Abhishek Sagar <sagar.abhishek@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2009-03-04 13:48:46 +03:00
ldr l r , [ f p , #- 4 ] @ restore lr
2008-05-31 12:53:50 +04:00
ldmia s p ! , { r0 - r3 , p c }
ENTRY( f t r a c e _ c a l l e r )
stmdb s p ! , { r0 - r3 , l r }
ldr r1 , [ f p , #- 4 ]
mov r0 , l r
2008-06-21 22:17:27 +04:00
sub r0 , r0 , #M C O U N T _ I N S N _ S I Z E
2008-05-31 12:53:50 +04:00
.globl ftrace_call
ftrace_call :
bl f t r a c e _ s t u b
[ARM] 5418/1: restore lr before leaving mcount
gcc seems to expect that lr isn't clobbered by mcount, because for a
function starting with:
static int func(void)
{
void *ra = __builtin_return_address(0);
printk(KERN_EMERG "__builtin_return_address(0) = %pS\n", ra)
...
the following assembler is generated by gcc 4.3.2:
0: e1a0c00d mov ip, sp
4: e92dd810 push {r4, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4 ; 0x4
c: ebfffffe bl 0 <mcount>
10: e59f0034 ldr r0, [pc, #52]
14: e1a0100e mov r1, lr
18: ebfffffe bl 0 <printk>
Without this patch obviously __builtin_return_address(0) yields
func+0x10 instead of the return address of the caller.
Note this patch fixes a similar issue for the routines used with dynamic
ftrace even though this isn't currently selectable for ARM.
Cc: Abhishek Sagar <sagar.abhishek@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2009-03-04 13:48:46 +03:00
ldr l r , [ f p , #- 4 ] @ restore lr
2008-05-31 12:53:50 +04:00
ldmia s p ! , { r0 - r3 , p c }
# else
2009-08-13 22:38:16 +04:00
ENTRY( _ _ g n u _ m c o u n t _ n c )
stmdb s p ! , { r0 - r3 , l r }
ldr r0 , =ftrace_trace_function
ldr r2 , [ r0 ]
adr r0 , f t r a c e _ s t u b
cmp r0 , r2
bne g n u _ t r a c e
ldmia s p ! , { r0 - r3 , i p , l r }
2009-09-29 09:12:37 +04:00
mov p c , i p
2009-08-13 22:38:16 +04:00
gnu_trace :
ldr r1 , [ s p , #20 ] @ lr of instrumented routine
mov r0 , l r
sub r0 , r0 , #M C O U N T _ I N S N _ S I Z E
mov l r , p c
mov p c , r2
ldmia s p ! , { r0 - r3 , i p , l r }
2009-09-29 09:12:37 +04:00
mov p c , i p
2009-08-13 22:38:16 +04:00
2008-05-31 12:53:50 +04:00
ENTRY( m c o u n t )
stmdb s p ! , { r0 - r3 , l r }
ldr r0 , =ftrace_trace_function
ldr r2 , [ r0 ]
adr r0 , f t r a c e _ s t u b
cmp r0 , r2
bne t r a c e
[ARM] 5418/1: restore lr before leaving mcount
gcc seems to expect that lr isn't clobbered by mcount, because for a
function starting with:
static int func(void)
{
void *ra = __builtin_return_address(0);
printk(KERN_EMERG "__builtin_return_address(0) = %pS\n", ra)
...
the following assembler is generated by gcc 4.3.2:
0: e1a0c00d mov ip, sp
4: e92dd810 push {r4, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4 ; 0x4
c: ebfffffe bl 0 <mcount>
10: e59f0034 ldr r0, [pc, #52]
14: e1a0100e mov r1, lr
18: ebfffffe bl 0 <printk>
Without this patch obviously __builtin_return_address(0) yields
func+0x10 instead of the return address of the caller.
Note this patch fixes a similar issue for the routines used with dynamic
ftrace even though this isn't currently selectable for ARM.
Cc: Abhishek Sagar <sagar.abhishek@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2009-03-04 13:48:46 +03:00
ldr l r , [ f p , #- 4 ] @ restore lr
2008-05-31 12:53:50 +04:00
ldmia s p ! , { r0 - r3 , p c }
trace :
2009-01-31 03:21:56 +03:00
ldr r1 , [ f p , #- 4 ] @ lr of instrumented routine
2008-05-31 12:53:50 +04:00
mov r0 , l r
2008-06-21 22:17:27 +04:00
sub r0 , r0 , #M C O U N T _ I N S N _ S I Z E
2008-05-31 12:53:50 +04:00
mov l r , p c
mov p c , r2
2009-07-29 22:31:30 +04:00
ldr l r , [ f p , #- 4 ] @ restore lr
2008-05-31 12:53:50 +04:00
ldmia s p ! , { r0 - r3 , p c }
# endif / * C O N F I G _ D Y N A M I C _ F T R A C E * /
.globl ftrace_stub
ftrace_stub :
mov p c , l r
2008-10-07 03:06:12 +04:00
# endif / * C O N F I G _ F U N C T I O N _ T R A C E R * /
2008-05-31 12:53:50 +04:00
2005-04-17 02:20:36 +04:00
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* SWI h a n d l e r
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* /
/ * If w e ' r e o p t i m i s i n g f o r S t r o n g A R M t h e r e s u l t i n g c o d e w o n ' t
run o n a n A R M 7 a n d w e c a n s a v e a c o u p l e o f i n s t r u c t i o n s .
- - pb * /
# ifdef C O N F I G _ C P U _ A R M 7 1 0
2006-01-14 19:31:29 +03:00
# define A 7 1 0 ( c o d e . . . ) c o d e
.Larm710bug :
2005-04-17 02:20:36 +04:00
ldmia s p , { r0 - l r } ^ @ Get calling r0 - lr
mov r0 , r0
add s p , s p , #S _ F R A M E _ S I Z E
2005-10-12 22:51:24 +04:00
subs p c , l r , #4
2005-04-17 02:20:36 +04:00
# else
2006-01-14 19:31:29 +03:00
# define A 7 1 0 ( c o d e . . . )
2005-04-17 02:20:36 +04:00
# endif
.align 5
ENTRY( v e c t o r _ s w i )
2005-04-26 18:20:34 +04:00
sub s p , s p , #S _ F R A M E _ S I Z E
stmia s p , { r0 - r12 } @ Calling r0 - r12
2009-07-24 15:32:54 +04:00
ARM( a d d r8 , s p , #S _ P C )
ARM( s t m d b r8 , { s p , l r } ^ ) @ Calling sp, lr
THUMB( m o v r8 , s p )
THUMB( s t o r e _ u s e r _ s p _ l r r8 , r10 , S _ S P ) @ calling sp, lr
2005-04-26 18:20:34 +04:00
mrs r8 , s p s r @ called from non-FIQ mode, so ok.
str l r , [ s p , #S _ P C ] @ S a v e c a l l i n g P C
str r8 , [ s p , #S _ P S R ] @ S a v e C P S R
str r0 , [ s p , #S _ O L D _ R 0 ] @ Save OLD_R0
2005-04-17 02:20:36 +04:00
zero_ f p
2005-04-26 18:19:24 +04:00
/ *
* Get t h e s y s t e m c a l l n u m b e r .
* /
2006-01-14 19:31:29 +03:00
2006-01-14 19:36:12 +03:00
# if d e f i n e d ( C O N F I G _ O A B I _ C O M P A T )
2006-01-14 19:31:29 +03:00
2006-01-14 19:36:12 +03:00
/ *
* If w e h a v e C O N F I G _ O A B I _ C O M P A T t h e n w e n e e d t o l o o k a t t h e s w i
* value t o d e t e r m i n e i f i t i s a n E A B I o r a n o l d A B I c a l l .
* /
# ifdef C O N F I G _ A R M _ T H U M B
tst r8 , #P S R _ T _ B I T
movne r10 , #0 @ no thumb OABI emulation
ldreq r10 , [ l r , #- 4 ] @ get SWI instruction
# else
ldr r10 , [ l r , #- 4 ] @ get SWI instruction
A7 1 0 ( a n d i p , r10 , #0x0f000000 @ check for SWI )
A7 1 0 ( t e q i p , #0x0f000000 )
A7 1 0 ( b n e . L a r m 7 1 0 b u g )
# endif
2009-05-30 17:00:18 +04:00
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
rev r10 , r10 @ little endian instruction
# endif
2006-01-14 19:36:12 +03:00
# elif d e f i n e d ( C O N F I G _ A E A B I )
/ *
* Pure E A B I u s e r s p a c e a l w a y s p u t s y s c a l l n u m b e r i n t o s c n o ( r7 ) .
* /
2006-01-14 19:31:29 +03:00
A7 1 0 ( l d r i p , [ l r , #- 4 ] @ get SWI instruction )
A7 1 0 ( a n d i p , i p , #0x0f000000 @ check for SWI )
A7 1 0 ( t e q i p , #0x0f000000 )
A7 1 0 ( b n e . L a r m 7 1 0 b u g )
2006-01-14 19:36:12 +03:00
2006-01-14 19:31:29 +03:00
# elif d e f i n e d ( C O N F I G _ A R M _ T H U M B )
2006-01-14 19:36:12 +03:00
/* Legacy ABI only, possibly thumb mode. */
2005-04-26 18:19:24 +04:00
tst r8 , #P S R _ T _ B I T @ t h i s i s S P S R f r o m s a v e _ u s e r _ r e g s
addne s c n o , r7 , #_ _ N R _ S Y S C A L L _ B A S E @ p u t O S n u m b e r i n
ldreq s c n o , [ l r , #- 4 ]
2006-01-14 19:36:12 +03:00
2005-04-26 18:19:24 +04:00
# else
2006-01-14 19:36:12 +03:00
/* Legacy ABI only. */
2005-04-26 18:19:24 +04:00
ldr s c n o , [ l r , #- 4 ] @ get SWI instruction
2006-01-14 19:31:29 +03:00
A7 1 0 ( a n d i p , s c n o , #0x0f000000 @ check for SWI )
A7 1 0 ( t e q i p , #0x0f000000 )
A7 1 0 ( b n e . L a r m 7 1 0 b u g )
2006-01-14 19:36:12 +03:00
2005-04-26 18:19:24 +04:00
# endif
2005-04-17 02:20:36 +04:00
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
ldr i p , _ _ c r _ a l i g n m e n t
ldr i p , [ i p ]
mcr p15 , 0 , i p , c1 , c0 @ update control register
# endif
2005-04-26 18:18:26 +04:00
enable_ i r q
2005-04-17 02:20:36 +04:00
get_ t h r e a d _ i n f o t s k
2006-01-14 19:36:12 +03:00
adr t b l , s y s _ c a l l _ t a b l e @ load syscall table pointer
2005-04-17 02:20:36 +04:00
ldr i p , [ t s k , #T I _ F L A G S ] @ c h e c k f o r s y s c a l l t r a c i n g
2006-01-14 19:36:12 +03:00
# if d e f i n e d ( C O N F I G _ O A B I _ C O M P A T )
/ *
* If t h e s w i a r g u m e n t i s z e r o , t h i s i s a n E A B I c a l l a n d w e d o n o t h i n g .
*
* If t h i s i s a n o l d A B I c a l l , g e t t h e s y s c a l l n u m b e r i n t o s c n o a n d
* get t h e o l d A B I s y s c a l l t a b l e a d d r e s s .
* /
bics r10 , r10 , #0xff000000
eorne s c n o , r10 , #_ _ N R _ O A B I _ S Y S C A L L _ B A S E
ldrne t b l , =sys_oabi_call_table
# elif ! d e f i n e d ( C O N F I G _ A E A B I )
2005-04-17 02:20:36 +04:00
bic s c n o , s c n o , #0xff000000 @ mask off SWI op-code
2005-04-26 18:19:24 +04:00
eor s c n o , s c n o , #_ _ N R _ S Y S C A L L _ B A S E @ c h e c k O S n u m b e r
2006-01-14 19:31:29 +03:00
# endif
2006-01-14 19:36:12 +03:00
2006-01-14 19:31:29 +03:00
stmdb s p ! , { r4 , r5 } @ push fifth and sixth args
2005-04-17 02:20:36 +04:00
tst i p , #_ T I F _ S Y S C A L L _ T R A C E @ a r e w e t r a c i n g s y s c a l l s ?
bne _ _ s y s _ t r a c e
cmp s c n o , #N R _ s y s c a l l s @ c h e c k u p p e r s y s c a l l l i m i t
2009-07-24 15:32:54 +04:00
adr l r , B S Y M ( r e t _ f a s t _ s y s c a l l ) @ return address
2005-04-17 02:20:36 +04:00
ldrcc p c , [ t b l , s c n o , l s l #2 ] @ call sys_* routine
add r1 , s p , #S _ O F F
2 : mov w h y , #0 @ no longer a real syscall
2005-04-26 18:19:24 +04:00
cmp s c n o , #( _ _ A R M _ N R _ B A S E - _ _ N R _ S Y S C A L L _ B A S E )
eor r0 , s c n o , #_ _ N R _ S Y S C A L L _ B A S E @ p u t O S n u m b e r b a c k
2005-04-17 02:20:36 +04:00
bcs a r m _ s y s c a l l
b s y s _ n i _ s y s c a l l @ not private func
2008-08-28 14:22:32 +04:00
ENDPROC( v e c t o r _ s w i )
2005-04-17 02:20:36 +04:00
/ *
* This i s t h e r e a l l y s l o w p a t h . W e ' r e g o i n g t o b e d o i n g
* context s w i t c h e s , a n d w a i t i n g f o r o u r p a r e n t t o r e s p o n d .
* /
__sys_trace :
2006-01-14 22:30:04 +03:00
mov r2 , s c n o
2005-04-17 02:20:36 +04:00
add r1 , s p , #S _ O F F
mov r0 , #0 @ trace entry [IP = 0]
bl s y s c a l l _ t r a c e
2009-07-24 15:32:54 +04:00
adr l r , B S Y M ( _ _ s y s _ t r a c e _ r e t u r n ) @ return address
2006-01-14 22:30:04 +03:00
mov s c n o , r0 @ syscall number (possibly new)
2005-04-17 02:20:36 +04:00
add r1 , s p , #S _ R 0 + S _ O F F @ pointer to regs
cmp s c n o , #N R _ s y s c a l l s @ c h e c k u p p e r s y s c a l l l i m i t
ldmccia r1 , { r0 - r3 } @ have to reload r0 - r3
ldrcc p c , [ t b l , s c n o , l s l #2 ] @ call sys_* routine
b 2 b
__sys_trace_return :
str r0 , [ s p , #S _ R 0 + S _ O F F ] ! @ save returned r0
2006-01-14 22:30:04 +03:00
mov r2 , s c n o
2005-04-17 02:20:36 +04:00
mov r1 , s p
mov r0 , #1 @ trace exit [IP = 1]
bl s y s c a l l _ t r a c e
b r e t _ s l o w _ s y s c a l l
.align 5
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
.type _ _ cr_ a l i g n m e n t , #o b j e c t
__cr_alignment :
.word cr_alignment
2006-01-14 19:36:12 +03:00
# endif
.ltorg
/ *
* This i s t h e s y s c a l l t a b l e d e c l a r a t i o n f o r n a t i v e A B I s y s c a l l s .
* With E A B I a c o u p l e s y s c a l l s a r e o b s o l e t e a n d d e f i n e d a s s y s _ n i _ s y s c a l l .
* /
# define A B I ( n a t i v e , c o m p a t ) n a t i v e
# ifdef C O N F I G _ A E A B I
# define O B S O L E T E ( s y s c a l l ) s y s _ n i _ s y s c a l l
# else
# define O B S O L E T E ( s y s c a l l ) s y s c a l l
2005-04-17 02:20:36 +04:00
# endif
.type sys_ c a l l _ t a b l e , #o b j e c t
ENTRY( s y s _ c a l l _ t a b l e )
# include " c a l l s . S "
2006-01-14 19:36:12 +03:00
# undef A B I
# undef O B S O L E T E
2005-04-17 02:20:36 +04:00
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Special s y s t e m c a l l w r a p p e r s
* /
@ r0 = syscall number
2005-12-17 18:25:42 +03:00
@ r8 = syscall table
2005-04-17 02:20:36 +04:00
sys_syscall :
2006-05-16 17:25:55 +04:00
bic s c n o , r0 , #_ _ N R _ O A B I _ S Y S C A L L _ B A S E
2005-04-17 02:20:36 +04:00
cmp s c n o , #_ _ N R _ s y s c a l l - _ _ N R _ S Y S C A L L _ B A S E
cmpne s c n o , #N R _ s y s c a l l s @ c h e c k r a n g e
stmloia s p , { r5 , r6 } @ shuffle args
movlo r0 , r1
movlo r1 , r2
movlo r2 , r3
movlo r3 , r4
ldrlo p c , [ t b l , s c n o , l s l #2 ]
b s y s _ n i _ s y s c a l l
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s y s c a l l )
2005-04-17 02:20:36 +04:00
sys_fork_wrapper :
add r0 , s p , #S _ O F F
b s y s _ f o r k
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ f o r k _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_vfork_wrapper :
add r0 , s p , #S _ O F F
b s y s _ v f o r k
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ v f o r k _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_execve_wrapper :
add r3 , s p , #S _ O F F
b s y s _ e x e c v e
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ e x e c v e _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_clone_wrapper :
add i p , s p , #S _ O F F
str i p , [ s p , #4 ]
b s y s _ c l o n e
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ c l o n e _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_sigreturn_wrapper :
add r0 , s p , #S _ O F F
b s y s _ s i g r e t u r n
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s i g r e t u r n _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_rt_sigreturn_wrapper :
add r0 , s p , #S _ O F F
b s y s _ r t _ s i g r e t u r n
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ r t _ s i g r e t u r n _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_sigaltstack_wrapper :
ldr r2 , [ s p , #S _ O F F + S _ S P ]
b d o _ s i g a l t s t a c k
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s i g a l t s t a c k _ w r a p p e r )
2005-04-17 02:20:36 +04:00
2006-01-14 19:35:03 +03:00
sys_statfs64_wrapper :
teq r1 , #88
moveq r1 , #84
b s y s _ s t a t f s64
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s t a t f s64 _ w r a p p e r )
2006-01-14 19:35:03 +03:00
sys_fstatfs64_wrapper :
teq r1 , #88
moveq r1 , #84
b s y s _ f s t a t f s64
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ f s t a t f s64 _ w r a p p e r )
2006-01-14 19:35:03 +03:00
2005-04-17 02:20:36 +04:00
/ *
* Note : off_ 4 k ( r5 ) i s a l w a y s u n i t s o f 4 K . I f w e c a n ' t d o t h e r e q u e s t e d
* offset, w e r e t u r n E I N V A L .
* /
sys_mmap2 :
# if P A G E _ S H I F T > 1 2
tst r5 , #P G O F F _ M A S K
moveq r5 , r5 , l s r #P A G E _ S H I F T - 12
streq r5 , [ s p , #4 ]
2009-12-01 01:37:04 +03:00
beq s y s _ m m a p _ p g o f f
2005-04-17 02:20:36 +04:00
mov r0 , #- E I N V A L
2006-06-25 14:17:23 +04:00
mov p c , l r
2005-04-17 02:20:36 +04:00
# else
str r5 , [ s p , #4 ]
2009-12-01 01:37:04 +03:00
b s y s _ m m a p _ p g o f f
2005-04-17 02:20:36 +04:00
# endif
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ m m a p2 )
2006-01-14 19:35:31 +03:00
# ifdef C O N F I G _ O A B I _ C O M P A T
2006-01-14 19:36:12 +03:00
2006-01-14 19:35:31 +03:00
/ *
* These a r e s y s c a l l s w i t h a r g u m e n t r e g i s t e r d i f f e r e n c e s
* /
sys_oabi_pread64 :
stmia s p , { r3 , r4 }
b s y s _ p r e a d64
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ p r e a d64 )
2006-01-14 19:35:31 +03:00
sys_oabi_pwrite64 :
stmia s p , { r3 , r4 }
b s y s _ p w r i t e 6 4
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ p w r i t e 6 4 )
2006-01-14 19:35:31 +03:00
sys_oabi_truncate64 :
mov r3 , r2
mov r2 , r1
b s y s _ t r u n c a t e 6 4
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ t r u n c a t e 6 4 )
2006-01-14 19:35:31 +03:00
sys_oabi_ftruncate64 :
mov r3 , r2
mov r2 , r1
b s y s _ f t r u n c a t e 6 4
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ f t r u n c a t e 6 4 )
2006-01-14 19:35:31 +03:00
sys_oabi_readahead :
str r3 , [ s p ]
mov r3 , r2
mov r2 , r1
b s y s _ r e a d a h e a d
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ r e a d a h e a d )
2006-01-14 19:35:31 +03:00
2006-01-14 19:36:12 +03:00
/ *
* Let' s d e c l a r e a s e c o n d s y s c a l l t a b l e f o r o l d A B I b i n a r i e s
* using t h e c o m p a t i b i l i t y s y s c a l l e n t r i e s .
* /
# define A B I ( n a t i v e , c o m p a t ) c o m p a t
# define O B S O L E T E ( s y s c a l l ) s y s c a l l
.type sys_ o a b i _ c a l l _ t a b l e , #o b j e c t
ENTRY( s y s _ o a b i _ c a l l _ t a b l e )
# include " c a l l s . S "
# undef A B I
# undef O B S O L E T E
2006-01-14 19:35:31 +03:00
# endif