2005-04-17 02:20:36 +04:00
/ *
* linux/ a r c h / a r m / k e r n e l / e n t r y - c o m m o n . S
*
* Copyright ( C ) 2 0 0 0 R u s s e l l K i n g
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e v e r s i o n 2 a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
* /
# include < a s m / u n i s t d . h >
2008-06-21 22:17:27 +04:00
# include < a s m / f t r a c e . h >
2009-02-16 13:42:09 +03:00
# include < a s m / u n w i n d . h >
2005-04-17 02:20:36 +04:00
2012-02-07 19:28:22 +04:00
# ifdef C O N F I G _ N E E D _ R E T _ T O _ U S E R
# include < m a c h / e n t r y - m a c r o . S >
# else
.macro arch_ r e t _ t o _ u s e r , t m p1 , t m p2
.endm
# endif
2005-04-17 02:20:36 +04:00
# include " e n t r y - h e a d e r . S "
.align 5
/ *
* This i s t h e f a s t s y s c a l l r e t u r n p a t h . W e d o a s l i t t l e a s
* possible h e r e , a n d t h i s i n c l u d e s s a v i n g r0 b a c k i n t o t h e S V C
* stack.
* /
ret_fast_syscall :
2009-02-16 13:42:09 +03:00
UNWIND( . f n s t a r t )
UNWIND( . c a n t u n w i n d )
2005-04-26 18:18:26 +04:00
disable_ i r q @ disable interrupts
2005-04-17 02:20:36 +04:00
ldr r1 , [ t s k , #T I _ F L A G S ]
tst r1 , #_ T I F _ W O R K _ M A S K
bne f a s t _ w o r k _ p e n d i n g
2010-12-23 03:52:44 +03:00
asm_ t r a c e _ h a r d i r q s _ o n
2005-04-26 18:20:34 +04:00
2007-02-17 00:16:32 +03:00
/* perform architecture specific actions before user return */
arch_ r e t _ t o _ u s e r r1 , l r
2013-03-29 01:54:40 +04:00
ct_ u s e r _ e n t e r
2007-02-17 00:16:32 +03:00
2009-07-24 15:32:54 +04:00
restore_ u s e r _ r e g s f a s t = 1 , o f f s e t = S _ O F F
2009-02-16 13:42:09 +03:00
UNWIND( . f n e n d )
2005-04-17 02:20:36 +04:00
/ *
* Ok, w e n e e d t o d o e x t r a p r o c e s s i n g , e n t e r t h e s l o w p a t h .
* /
fast_work_pending :
str r0 , [ s p , #S _ R 0 + S _ O F F ] ! @ returned r0
work_pending :
mov r0 , s p @ 'regs'
mov r2 , w h y @ 'syscall'
2012-07-19 20:47:55 +04:00
bl d o _ w o r k _ p e n d i n g
2012-07-19 20:48:50 +04:00
cmp r0 , #0
2012-07-19 20:48:21 +04:00
beq n o _ w o r k _ p e n d i n g
2012-07-19 20:48:50 +04:00
movlt s c n o , #( _ _ N R _ r e s t a r t _ s y s c a l l - _ _ N R _ S Y S C A L L _ B A S E )
2012-07-19 20:48:21 +04:00
ldmia s p , { r0 - r6 } @ have to reload r0 - r6
b l o c a l _ r e s t a r t @ ... and off we go
2005-04-17 02:20:36 +04:00
/ *
* " slow" s y s c a l l r e t u r n p a t h . " w h y " t e l l s u s i f t h i s w a s a r e a l s y s c a l l .
* /
ENTRY( r e t _ t o _ u s e r )
ret_slow_syscall :
2005-04-26 18:18:26 +04:00
disable_ i r q @ disable interrupts
2011-06-05 05:24:58 +04:00
ENTRY( r e t _ t o _ u s e r _ f r o m _ i r q )
2005-04-17 02:20:36 +04:00
ldr r1 , [ t s k , #T I _ F L A G S ]
tst r1 , #_ T I F _ W O R K _ M A S K
bne w o r k _ p e n d i n g
no_work_pending :
2010-12-23 03:52:44 +03:00
asm_ t r a c e _ h a r d i r q s _ o n
2013-03-28 15:44:25 +04:00
2007-02-17 00:16:32 +03:00
/* perform architecture specific actions before user return */
arch_ r e t _ t o _ u s e r r1 , l r
2013-03-29 01:54:40 +04:00
ct_ u s e r _ e n t e r s a v e = 0
2007-02-17 00:16:32 +03:00
2009-07-24 15:32:54 +04:00
restore_ u s e r _ r e g s f a s t = 0 , o f f s e t = 0
2011-06-05 05:24:58 +04:00
ENDPROC( r e t _ t o _ u s e r _ f r o m _ i r q )
2008-08-28 14:22:32 +04:00
ENDPROC( r e t _ t o _ u s e r )
2005-04-17 02:20:36 +04:00
/ *
* This i s h o w w e r e t u r n f r o m a f o r k .
* /
ENTRY( r e t _ f r o m _ f o r k )
bl s c h e d u l e _ t a i l
2012-10-11 06:23:29 +04:00
cmp r5 , #0
movne r0 , r4
ARM: fix oops on initial entry to userspace with Thumb2 kernels
Daniel Mack reports an oops at boot with the latest kernels:
Internal error: Oops - undefined instruction: 0 [#1] SMP THUMB2
Modules linked in:
CPU: 0 Not tainted (3.6.0-11057-g584df1d #145)
PC is at cpsw_probe+0x45a/0x9ac
LR is at trace_hardirqs_on_caller+0x8f/0xfc
pc : [<c03493de>] lr : [<c005e81f>] psr: 60000113
sp : cf055fb0 ip : 00000000 fp : 00000000
r10: 00000000 r9 : 00000000 r8 : 00000000
r7 : 00000000 r6 : 00000000 r5 : c0344555 r4 : 00000000
r3 : cf057a40 r2 : 00000000 r1 : 00000001 r0 : 00000000
Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user
Control: 50c5387d Table: 8f3f4019 DAC: 00000015
Process init (pid: 1, stack limit = 0xcf054240)
Stack: (0xcf055fb0 to 0xcf056000)
5fa0: 00000001 00000000 00000000 00000000
5fc0: cf055fb0 c000d1a8 00000000 00000000 00000000 00000000 00000000 00000000
5fe0: 00000000 be9b3f10 00000000 b6f6add0 00000010 00000000 aaaabfaf a8babbaa
The analysis of this is as follows. In init/main.c, we issue:
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
This creates a new thread, which falls through to the ret_from_fork
assembly, with r4 set NULL and r5 set to kernel_init. You can see
this in your oops dump register set - r5 is 0xc0344555, which is the
address of kernel_init plus 1 which marks the function as Thumb code.
Now, let's look at this code a little closer - this is what the
disassembly looks like:
c000d180 <ret_from_fork>:
c000d180: f03a fe08 bl c0047d94 <schedule_tail>
c000d184: 2d00 cmp r5, #0
c000d186: bf1e ittt ne
c000d188: 4620 movne r0, r4
c000d18a: 46fe movne lr, pc <-- XXXXXXX
c000d18c: 46af movne pc, r5
c000d18e: 46e9 mov r9, sp
c000d190: ea4f 3959 mov.w r9, r9, lsr #13
c000d194: ea4f 3949 mov.w r9, r9, lsl #13
c000d198: e7c8 b.n c000d12c <ret_to_user>
c000d19a: bf00 nop
c000d19c: f3af 8000 nop.w
This code was introduced in 9fff2fa0db911 (arm: switch to saner
kernel_execve() semantics). I have marked one instruction, and it's
the significant one - I'll come back to that later.
Eventually, having had a successful call to kernel_execve(), kernel_init()
returns zero.
In returning, it uses the value in 'lr' which was set by the instruction
I marked above. Unfortunately, this causes lr to contain 0xc000d18e -
an even address. This switches the ISA to ARM on return but with a non
word aligned PC value.
So, what do we end up executing? Well, not the instructions above - yes
the opcodes, but they don't mean the same thing in ARM mode. In ARM mode,
it looks like this instead:
c000d18c: 46e946af strbtmi r4, [r9], pc, lsr #13
c000d190: 3959ea4f ldmdbcc r9, {r0, r1, r2, r3, r6, r9, fp, sp, lr, pc}^
c000d194: 3949ea4f stmdbcc r9, {r0, r1, r2, r3, r6, r9, fp, sp, lr, pc}^
c000d198: bf00e7c8 svclt 0x0000e7c8
c000d19c: 8000f3af andhi pc, r0, pc, lsr #7
c000d1a0: e88db092 stm sp, {r1, r4, r7, ip, sp, pc}
c000d1a4: 46e81fff ; <UNDEFINED> instruction: 0x46e81fff
c000d1a8: 8a00f3ef bhi 0xc004a16c
c000d1ac: 0a0cf08a beq 0xc03493dc
I have included more above, because it's relevant. The PSR flags which
we can see in the oops dump are nZCv, so Z and C are set.
All the above ARM instructions are not executed, except for two.
c000d1a0, which has no writeback, and writes below the current stack
pointer (and that data is lost when we take the next exception.) The
other instruction which is executed is c000d1ac, which takes us to...
0xc03493dc. However, remember that bit 1 of the PC got set. So that
makes the PC value 0xc03493de.
And that value is the value we find in the oops dump for PC. What is
the instruction here when interpreted in ARM mode?
0: f71e150c ; <UNDEFINED> instruction: 0xf71e150c
and there we have our undefined instruction (remember that the 'never'
condition code, 0xf, has been deprecated and is now always executed as
it is now being used for additional instructions.)
This path also nicely explains the state of the stack we see in the oops
dump too.
The above is a consistent and sane story for how we got to the oops
dump, which all stems from the instruction at 0xc000d18a being wrong.
Reported-by: Daniel Mack <zonque@gmail.com>
Tested-by: Daniel Mack <zonque@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-15 03:16:49 +04:00
adrne l r , B S Y M ( 1 f )
2012-10-11 06:23:29 +04:00
movne p c , r5
ARM: fix oops on initial entry to userspace with Thumb2 kernels
Daniel Mack reports an oops at boot with the latest kernels:
Internal error: Oops - undefined instruction: 0 [#1] SMP THUMB2
Modules linked in:
CPU: 0 Not tainted (3.6.0-11057-g584df1d #145)
PC is at cpsw_probe+0x45a/0x9ac
LR is at trace_hardirqs_on_caller+0x8f/0xfc
pc : [<c03493de>] lr : [<c005e81f>] psr: 60000113
sp : cf055fb0 ip : 00000000 fp : 00000000
r10: 00000000 r9 : 00000000 r8 : 00000000
r7 : 00000000 r6 : 00000000 r5 : c0344555 r4 : 00000000
r3 : cf057a40 r2 : 00000000 r1 : 00000001 r0 : 00000000
Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user
Control: 50c5387d Table: 8f3f4019 DAC: 00000015
Process init (pid: 1, stack limit = 0xcf054240)
Stack: (0xcf055fb0 to 0xcf056000)
5fa0: 00000001 00000000 00000000 00000000
5fc0: cf055fb0 c000d1a8 00000000 00000000 00000000 00000000 00000000 00000000
5fe0: 00000000 be9b3f10 00000000 b6f6add0 00000010 00000000 aaaabfaf a8babbaa
The analysis of this is as follows. In init/main.c, we issue:
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
This creates a new thread, which falls through to the ret_from_fork
assembly, with r4 set NULL and r5 set to kernel_init. You can see
this in your oops dump register set - r5 is 0xc0344555, which is the
address of kernel_init plus 1 which marks the function as Thumb code.
Now, let's look at this code a little closer - this is what the
disassembly looks like:
c000d180 <ret_from_fork>:
c000d180: f03a fe08 bl c0047d94 <schedule_tail>
c000d184: 2d00 cmp r5, #0
c000d186: bf1e ittt ne
c000d188: 4620 movne r0, r4
c000d18a: 46fe movne lr, pc <-- XXXXXXX
c000d18c: 46af movne pc, r5
c000d18e: 46e9 mov r9, sp
c000d190: ea4f 3959 mov.w r9, r9, lsr #13
c000d194: ea4f 3949 mov.w r9, r9, lsl #13
c000d198: e7c8 b.n c000d12c <ret_to_user>
c000d19a: bf00 nop
c000d19c: f3af 8000 nop.w
This code was introduced in 9fff2fa0db911 (arm: switch to saner
kernel_execve() semantics). I have marked one instruction, and it's
the significant one - I'll come back to that later.
Eventually, having had a successful call to kernel_execve(), kernel_init()
returns zero.
In returning, it uses the value in 'lr' which was set by the instruction
I marked above. Unfortunately, this causes lr to contain 0xc000d18e -
an even address. This switches the ISA to ARM on return but with a non
word aligned PC value.
So, what do we end up executing? Well, not the instructions above - yes
the opcodes, but they don't mean the same thing in ARM mode. In ARM mode,
it looks like this instead:
c000d18c: 46e946af strbtmi r4, [r9], pc, lsr #13
c000d190: 3959ea4f ldmdbcc r9, {r0, r1, r2, r3, r6, r9, fp, sp, lr, pc}^
c000d194: 3949ea4f stmdbcc r9, {r0, r1, r2, r3, r6, r9, fp, sp, lr, pc}^
c000d198: bf00e7c8 svclt 0x0000e7c8
c000d19c: 8000f3af andhi pc, r0, pc, lsr #7
c000d1a0: e88db092 stm sp, {r1, r4, r7, ip, sp, pc}
c000d1a4: 46e81fff ; <UNDEFINED> instruction: 0x46e81fff
c000d1a8: 8a00f3ef bhi 0xc004a16c
c000d1ac: 0a0cf08a beq 0xc03493dc
I have included more above, because it's relevant. The PSR flags which
we can see in the oops dump are nZCv, so Z and C are set.
All the above ARM instructions are not executed, except for two.
c000d1a0, which has no writeback, and writes below the current stack
pointer (and that data is lost when we take the next exception.) The
other instruction which is executed is c000d1ac, which takes us to...
0xc03493dc. However, remember that bit 1 of the PC got set. So that
makes the PC value 0xc03493de.
And that value is the value we find in the oops dump for PC. What is
the instruction here when interpreted in ARM mode?
0: f71e150c ; <UNDEFINED> instruction: 0xf71e150c
and there we have our undefined instruction (remember that the 'never'
condition code, 0xf, has been deprecated and is now always executed as
it is now being used for additional instructions.)
This path also nicely explains the state of the stack we see in the oops
dump too.
The above is a consistent and sane story for how we got to the oops
dump, which all stems from the instruction at 0xc000d18a being wrong.
Reported-by: Daniel Mack <zonque@gmail.com>
Tested-by: Daniel Mack <zonque@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-15 03:16:49 +04:00
1 : get_ t h r e a d _ i n f o t s k
2005-04-17 02:20:36 +04:00
b r e t _ s l o w _ s y s c a l l
2008-08-28 14:22:32 +04:00
ENDPROC( r e t _ f r o m _ f o r k )
2005-04-17 02:20:36 +04:00
2006-01-19 15:57:01 +03:00
.equ NR_ s y s c a l l s ,0
# define C A L L ( x ) . e q u N R _ s y s c a l l s ,N R _ s y s c a l l s + 1
2005-04-17 02:20:36 +04:00
# include " c a l l s . S "
2012-09-07 21:18:25 +04:00
/ *
* Ensure t h a t t h e s y s t e m c a l l t a b l e i s e q u a l t o _ _ N R _ s y s c a l l s ,
* which i s t h e v a l u e t h e r e s t o f t h e s y s t e m s e e s
* /
.ifne NR_syscalls - _ _ NR_ s y s c a l l s
.error " _ _ NR_ s y s c a l l s i s n o t e q u a l t o t h e s i z e o f t h e s y s c a l l t a b l e "
.endif
2006-01-19 15:57:01 +03:00
# undef C A L L
# define C A L L ( x ) . l o n g x
2005-04-17 02:20:36 +04:00
2008-10-07 03:06:12 +04:00
# ifdef C O N F I G _ F U N C T I O N _ T R A C E R
2010-08-03 20:09:40 +04:00
/ *
* When c o m p i l i n g w i t h - p g , g c c i n s e r t s a c a l l t o t h e m c o u n t r o u t i n e a t t h e
* start o f e v e r y f u n c t i o n . I n m c o u n t , a p a r t f r o m t h e f u n c t i o n ' s a d d r e s s ( i n
* lr) , w e n e e d t o g e t h o l d o f t h e f u n c t i o n ' s c a l l e r ' s a d d r e s s .
*
* Older G C C s ( p r e - 4 . 4 ) i n s e r t e d a c a l l t o a r o u t i n e c a l l e d m c o u n t l i k e t h i s :
*
* bl m c o u n t
*
* These v e r s i o n s h a v e t h e l i m i t a t i o n t h a t i n o r d e r f o r t h e m c o u n t r o u t i n e t o
* be a b l e t o d e t e r m i n e t h e f u n c t i o n ' s c a l l e r ' s a d d r e s s , a n A P C S - s t y l e f r a m e
* pointer ( w h i c h i s s e t u p w i t h s o m e t h i n g l i k e t h e c o d e b e l o w ) i s r e q u i r e d .
*
* mov i p , s p
* push { f p , i p , l r , p c }
* sub f p , i p , #4
*
* With E A B I , t h e s e f r a m e p o i n t e r s a r e n o t a v a i l a b l e u n l e s s - m a p c s - f r a m e i s
* specified, a n d i f b u i l d i n g a s T h u m b - 2 , n o t e v e n t h e n .
*
* Newer G C C s ( 4 . 4 + ) s o l v e t h i s p r o b l e m b y i n t r o d u c i n g a n e w v e r s i o n o f m c o u n t ,
* with c a l l s i t e s l i k e :
*
* push { l r }
* bl _ _ g n u _ m c o u n t _ n c
*
* With t h e s e c o m p i l e r s , f r a m e p o i n t e r s a r e n o t n e c e s s a r y .
*
* mcount c a n b e t h o u g h t o f a s a f u n c t i o n c a l l e d i n t h e m i d d l e o f a s u b r o u t i n e
* call. A s s u c h , i t n e e d s t o b e t r a n s p a r e n t f o r b o t h t h e c a l l e r a n d t h e
* callee : the o r i g i n a l l r n e e d s t o b e r e s t o r e d w h e n l e a v i n g m c o u n t , a n d n o
* registers s h o u l d b e c l o b b e r e d . ( I n t h e _ _ g n u _ m c o u n t _ n c i m p l e m e n t a t i o n , w e
* clobber t h e i p r e g i s t e r . T h i s i s O K b e c a u s e t h e A R M c a l l i n g c o n v e n t i o n
* allows i t t o b e c l o b b e r e d i n s u b r o u t i n e s a n d d o e s n ' t u s e i t t o h o l d
* parameters. )
2010-08-10 22:43:28 +04:00
*
* When u s i n g d y n a m i c f t r a c e , w e p a t c h o u t t h e m c o u n t c a l l b y a " m o v r0 , r0 "
* for t h e m c o u n t c a s e , a n d a " p o p { l r } " f o r t h e _ _ g n u _ m c o u n t _ n c c a s e ( s e e
* arch/ a r m / k e r n e l / f t r a c e . c ) .
2010-08-03 20:09:40 +04:00
* /
2010-08-10 22:32:37 +04:00
# ifndef C O N F I G _ O L D _ M C O U N T
# if ( _ _ G N U C _ _ < 4 | | ( _ _ G N U C _ _ = = 4 & & _ _ G N U C _ M I N O R _ _ < 4 ) )
# error F t r a c e r e q u i r e s C O N F I G _ F R A M E _ P O I N T E R =y w i t h G C C o l d e r t h a n 4 . 4 . 0 .
# endif
# endif
2012-01-24 19:52:52 +04:00
.macro mcount_adjust_addr rd, r n
bic \ r d , \ r n , #1 @ clear the Thumb bit if present
sub \ r d , \ r d , #M C O U N T _ I N S N _ S I Z E
.endm
2010-10-07 16:09:47 +04:00
.macro __mcount suffix
mcount_ e n t e r
ldr r0 , =ftrace_trace_function
ldr r2 , [ r0 ]
adr r0 , . L f t r a c e _ s t u b
cmp r0 , r2
bne 1 f
2010-08-10 22:43:28 +04:00
2010-10-09 20:54:38 +04:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
ldr r1 , =ftrace_graph_return
ldr r2 , [ r1 ]
cmp r0 , r2
bne f t r a c e _ g r a p h _ c a l l e r \ s u f f i x
ldr r1 , =ftrace_graph_entry
ldr r2 , [ r1 ]
ldr r0 , =ftrace_graph_entry_stub
cmp r0 , r2
bne f t r a c e _ g r a p h _ c a l l e r \ s u f f i x
# endif
2010-10-07 16:09:47 +04:00
mcount_ e x i t
2010-08-10 22:43:28 +04:00
2010-10-07 16:09:47 +04:00
1 : mcount_ g e t _ l r r1 @ lr of instrumented func
2012-01-24 19:52:52 +04:00
mcount_ a d j u s t _ a d d r r0 , l r @ instrumented function
2010-10-07 16:09:47 +04:00
adr l r , B S Y M ( 2 f )
mov p c , r2
2 : mcount_ e x i t
.endm
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
.macro __ftrace_caller suffix
mcount_ e n t e r
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
mcount_ g e t _ l r r1 @ lr of instrumented func
2012-01-24 19:52:52 +04:00
mcount_ a d j u s t _ a d d r r0 , l r @ instrumented function
2010-10-07 16:09:47 +04:00
.globl ftrace_ c a l l \ s u f f i x
ftrace_ c a l l \ s u f f i x :
2010-08-03 20:08:09 +04:00
bl f t r a c e _ s t u b
2010-10-07 16:09:47 +04:00
2010-11-06 20:33:21 +03:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
.globl ftrace_ g r a p h _ c a l l \ s u f f i x
ftrace_ g r a p h _ c a l l \ s u f f i x :
mov r0 , r0
# endif
2010-10-07 16:09:47 +04:00
mcount_ e x i t
.endm
2008-05-31 12:53:50 +04:00
2010-10-09 20:54:38 +04:00
.macro __ftrace_graph_caller
sub r0 , f p , #4 @ &lr of instrumented routine (&parent)
2010-11-06 20:33:21 +03:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
@ called from __ftrace_caller, saved in mcount_enter
ldr r1 , [ s p , #16 ] @ instrumented routine (func)
2012-01-24 19:52:52 +04:00
mcount_ a d j u s t _ a d d r r1 , r1
2010-11-06 20:33:21 +03:00
# else
@ called from __mcount, untouched in lr
2012-01-24 19:52:52 +04:00
mcount_ a d j u s t _ a d d r r1 , l r @ instrumented routine (func)
2010-11-06 20:33:21 +03:00
# endif
2010-10-09 20:54:38 +04:00
mov r2 , f p @ frame pointer
bl p r e p a r e _ f t r a c e _ r e t u r n
mcount_ e x i t
.endm
2008-05-31 12:53:50 +04:00
2010-08-10 22:43:28 +04:00
# ifdef C O N F I G _ O L D _ M C O U N T
2010-10-07 16:09:47 +04:00
/ *
* mcount
* /
.macro mcount_enter
stmdb s p ! , { r0 - r3 , l r }
.endm
.macro mcount_get_lr reg
ldr \ r e g , [ f p , #- 4 ]
.endm
.macro mcount_exit
ldr l r , [ f p , #- 4 ]
ldmia s p ! , { r0 - r3 , p c }
.endm
2010-08-10 22:43:28 +04:00
ENTRY( m c o u n t )
2010-10-07 16:09:47 +04:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
2010-08-10 22:43:28 +04:00
stmdb s p ! , { l r }
ldr l r , [ f p , #- 4 ]
ldmia s p ! , { p c }
2010-10-07 16:09:47 +04:00
# else
_ _ mcount _ o l d
# endif
2010-08-10 22:33:52 +04:00
ENDPROC( m c o u n t )
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
2010-08-10 22:43:28 +04:00
ENTRY( f t r a c e _ c a l l e r _ o l d )
2010-10-07 16:09:47 +04:00
_ _ ftrace_ c a l l e r _ o l d
2010-08-10 22:43:28 +04:00
ENDPROC( f t r a c e _ c a l l e r _ o l d )
# endif
2008-05-31 12:53:50 +04:00
2010-10-09 20:54:38 +04:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
ENTRY( f t r a c e _ g r a p h _ c a l l e r _ o l d )
_ _ ftrace_ g r a p h _ c a l l e r
ENDPROC( f t r a c e _ g r a p h _ c a l l e r _ o l d )
# endif
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
# endif
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
/ *
* _ _ gnu_ m c o u n t _ n c
* /
.macro mcount_enter
2013-04-03 01:11:46 +04:00
/ *
* This p a d c o m p e n s a t e s f o r t h e p u s h { l r } a t t h e c a l l s i t e . N o t e t h a t w e a r e
* unable t o u n w i n d t h r o u g h a f u n c t i o n w h i c h d o e s n o t o t h e r w i s e s a v e i t s l r .
* /
UNWIND( . p a d #4 )
2010-08-03 20:08:09 +04:00
stmdb s p ! , { r0 - r3 , l r }
2013-04-03 01:11:46 +04:00
UNWIND( . s a v e { r0 - r3 , l r } )
2010-10-07 16:09:47 +04:00
.endm
.macro mcount_get_lr reg
ldr \ r e g , [ s p , #20 ]
.endm
.macro mcount_exit
2010-08-03 20:08:09 +04:00
ldmia s p ! , { r0 - r3 , i p , l r }
mov p c , i p
2010-10-07 16:09:47 +04:00
.endm
2009-08-13 22:38:16 +04:00
2010-10-07 16:09:47 +04:00
ENTRY( _ _ g n u _ m c o u n t _ n c )
2013-04-03 01:11:46 +04:00
UNWIND( . f n s t a r t )
2010-10-07 16:09:47 +04:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
mov i p , l r
ldmia s p ! , { l r }
2010-08-03 20:08:09 +04:00
mov p c , i p
2010-10-07 16:09:47 +04:00
# else
_ _ mcount
# endif
2013-04-03 01:11:46 +04:00
UNWIND( . f n e n d )
2010-08-10 22:33:52 +04:00
ENDPROC( _ _ g n u _ m c o u n t _ n c )
2009-08-13 22:38:16 +04:00
2010-10-07 16:09:47 +04:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
ENTRY( f t r a c e _ c a l l e r )
2013-04-03 01:11:46 +04:00
UNWIND( . f n s t a r t )
2010-10-07 16:09:47 +04:00
_ _ ftrace_ c a l l e r
2013-04-03 01:11:46 +04:00
UNWIND( . f n e n d )
2010-10-07 16:09:47 +04:00
ENDPROC( f t r a c e _ c a l l e r )
2010-08-10 22:32:37 +04:00
# endif
2008-05-31 12:53:50 +04:00
2010-10-09 20:54:38 +04:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
ENTRY( f t r a c e _ g r a p h _ c a l l e r )
2013-04-03 01:11:46 +04:00
UNWIND( . f n s t a r t )
2010-10-09 20:54:38 +04:00
_ _ ftrace_ g r a p h _ c a l l e r
2013-04-03 01:11:46 +04:00
UNWIND( . f n e n d )
2010-10-09 20:54:38 +04:00
ENDPROC( f t r a c e _ g r a p h _ c a l l e r )
2010-08-10 22:32:37 +04:00
# endif
2008-05-31 12:53:50 +04:00
2010-10-07 16:09:47 +04:00
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
2008-05-31 12:53:50 +04:00
2010-10-09 20:54:38 +04:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
.globl return_to_handler
return_to_handler :
stmdb s p ! , { r0 - r3 }
mov r0 , f p @ frame pointer
bl f t r a c e _ r e t u r n _ t o _ h a n d l e r
mov l r , r0 @ r0 has real ret addr
ldmia s p ! , { r0 - r3 }
mov p c , l r
# endif
2008-05-31 12:53:50 +04:00
2010-08-10 22:33:52 +04:00
ENTRY( f t r a c e _ s t u b )
2010-08-10 22:37:21 +04:00
.Lftrace_stub :
2010-08-03 20:08:09 +04:00
mov p c , l r
2010-08-10 22:33:52 +04:00
ENDPROC( f t r a c e _ s t u b )
2008-05-31 12:53:50 +04:00
2008-10-07 03:06:12 +04:00
# endif / * C O N F I G _ F U N C T I O N _ T R A C E R * /
2008-05-31 12:53:50 +04:00
2005-04-17 02:20:36 +04:00
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* SWI h a n d l e r
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* /
.align 5
ENTRY( v e c t o r _ s w i )
2010-05-21 21:06:42 +04:00
# ifdef C O N F I G _ C P U _ V 7 M
v7 m _ e x c e p t i o n _ e n t r y
# else
2005-04-26 18:20:34 +04:00
sub s p , s p , #S _ F R A M E _ S I Z E
stmia s p , { r0 - r12 } @ Calling r0 - r12
2009-07-24 15:32:54 +04:00
ARM( a d d r8 , s p , #S _ P C )
ARM( s t m d b r8 , { s p , l r } ^ ) @ Calling sp, lr
THUMB( m o v r8 , s p )
THUMB( s t o r e _ u s e r _ s p _ l r r8 , r10 , S _ S P ) @ calling sp, lr
2005-04-26 18:20:34 +04:00
mrs r8 , s p s r @ called from non-FIQ mode, so ok.
str l r , [ s p , #S _ P C ] @ S a v e c a l l i n g P C
str r8 , [ s p , #S _ P S R ] @ S a v e C P S R
str r0 , [ s p , #S _ O L D _ R 0 ] @ Save OLD_R0
2010-05-21 21:06:42 +04:00
# endif
2005-04-17 02:20:36 +04:00
zero_ f p
2005-04-26 18:19:24 +04:00
2013-06-05 14:25:13 +04:00
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
ldr i p , _ _ c r _ a l i g n m e n t
ldr i p , [ i p ]
mcr p15 , 0 , i p , c1 , c0 @ update control register
# endif
enable_ i r q
ct_ u s e r _ e x i t
get_ t h r e a d _ i n f o t s k
2005-04-26 18:19:24 +04:00
/ *
* Get t h e s y s t e m c a l l n u m b e r .
* /
2006-01-14 19:31:29 +03:00
2006-01-14 19:36:12 +03:00
# if d e f i n e d ( C O N F I G _ O A B I _ C O M P A T )
2006-01-14 19:31:29 +03:00
2006-01-14 19:36:12 +03:00
/ *
* If w e h a v e C O N F I G _ O A B I _ C O M P A T t h e n w e n e e d t o l o o k a t t h e s w i
* value t o d e t e r m i n e i f i t i s a n E A B I o r a n o l d A B I c a l l .
* /
# ifdef C O N F I G _ A R M _ T H U M B
tst r8 , #P S R _ T _ B I T
movne r10 , #0 @ no thumb OABI emulation
2013-06-05 14:25:13 +04:00
USER( l d r e q r10 , [ l r , #- 4 ] ) @ get SWI instruction
2006-01-14 19:36:12 +03:00
# else
2013-06-05 14:25:13 +04:00
USER( l d r r10 , [ l r , #- 4 ] ) @ get SWI instruction
2006-01-14 19:36:12 +03:00
# endif
2009-05-30 17:00:18 +04:00
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
rev r10 , r10 @ little endian instruction
# endif
2006-01-14 19:36:12 +03:00
# elif d e f i n e d ( C O N F I G _ A E A B I )
/ *
* Pure E A B I u s e r s p a c e a l w a y s p u t s y s c a l l n u m b e r i n t o s c n o ( r7 ) .
* /
2006-01-14 19:31:29 +03:00
# elif d e f i n e d ( C O N F I G _ A R M _ T H U M B )
2006-01-14 19:36:12 +03:00
/* Legacy ABI only, possibly thumb mode. */
2005-04-26 18:19:24 +04:00
tst r8 , #P S R _ T _ B I T @ t h i s i s S P S R f r o m s a v e _ u s e r _ r e g s
addne s c n o , r7 , #_ _ N R _ S Y S C A L L _ B A S E @ p u t O S n u m b e r i n
2013-06-05 14:25:13 +04:00
USER( l d r e q s c n o , [ l r , #- 4 ] )
2006-01-14 19:36:12 +03:00
2005-04-26 18:19:24 +04:00
# else
2006-01-14 19:36:12 +03:00
/* Legacy ABI only. */
2013-06-05 14:25:13 +04:00
USER( l d r s c n o , [ l r , #- 4 ] ) @ get SWI instruction
2005-04-26 18:19:24 +04:00
# endif
2005-04-17 02:20:36 +04:00
2006-01-14 19:36:12 +03:00
adr t b l , s y s _ c a l l _ t a b l e @ load syscall table pointer
# if d e f i n e d ( C O N F I G _ O A B I _ C O M P A T )
/ *
* If t h e s w i a r g u m e n t i s z e r o , t h i s i s a n E A B I c a l l a n d w e d o n o t h i n g .
*
* If t h i s i s a n o l d A B I c a l l , g e t t h e s y s c a l l n u m b e r i n t o s c n o a n d
* get t h e o l d A B I s y s c a l l t a b l e a d d r e s s .
* /
bics r10 , r10 , #0xff000000
eorne s c n o , r10 , #_ _ N R _ O A B I _ S Y S C A L L _ B A S E
ldrne t b l , =sys_oabi_call_table
# elif ! d e f i n e d ( C O N F I G _ A E A B I )
2005-04-17 02:20:36 +04:00
bic s c n o , s c n o , #0xff000000 @ mask off SWI op-code
2005-04-26 18:19:24 +04:00
eor s c n o , s c n o , #_ _ N R _ S Y S C A L L _ B A S E @ c h e c k O S n u m b e r
2006-01-14 19:31:29 +03:00
# endif
2006-01-14 19:36:12 +03:00
2012-07-19 20:48:21 +04:00
local_restart :
2010-08-27 02:08:35 +04:00
ldr r10 , [ t s k , #T I _ F L A G S ] @ c h e c k f o r s y s c a l l t r a c i n g
2006-01-14 19:31:29 +03:00
stmdb s p ! , { r4 , r5 } @ push fifth and sixth args
2010-08-27 02:08:35 +04:00
2012-01-03 23:23:09 +04:00
tst r10 , #_ T I F _ S Y S C A L L _ W O R K @ a r e w e t r a c i n g s y s c a l l s ?
2005-04-17 02:20:36 +04:00
bne _ _ s y s _ t r a c e
cmp s c n o , #N R _ s y s c a l l s @ c h e c k u p p e r s y s c a l l l i m i t
2009-07-24 15:32:54 +04:00
adr l r , B S Y M ( r e t _ f a s t _ s y s c a l l ) @ return address
2005-04-17 02:20:36 +04:00
ldrcc p c , [ t b l , s c n o , l s l #2 ] @ call sys_* routine
add r1 , s p , #S _ O F F
2005-04-26 18:19:24 +04:00
cmp s c n o , #( _ _ A R M _ N R _ B A S E - _ _ N R _ S Y S C A L L _ B A S E )
eor r0 , s c n o , #_ _ N R _ S Y S C A L L _ B A S E @ p u t O S n u m b e r b a c k
2013-05-13 22:16:34 +04:00
bcs a r m _ s y s c a l l
2 : mov w h y , #0 @ no longer a real syscall
2005-04-17 02:20:36 +04:00
b s y s _ n i _ s y s c a l l @ not private func
2013-06-05 14:25:13 +04:00
# if d e f i n e d ( C O N F I G _ O A B I _ C O M P A T ) | | ! d e f i n e d ( C O N F I G _ A E A B I )
/ *
* We f a i l e d t o h a n d l e a f a u l t t r y i n g t o a c c e s s t h e p a g e
* containing t h e s w i i n s t r u c t i o n , b u t w e ' r e n o t r e a l l y i n a
* position t o r e t u r n - E F A U L T . I n s t e a d , r e t u r n b a c k t o t h e
* instruction a n d r e - e n t e r t h e u s e r f a u l t h a n d l i n g p a t h t r y i n g
* to p a g e i t i n . T h i s w i l l l i k e l y r e s u l t i n s e n d i n g S E G V t o t h e
* current t a s k .
* /
9001 :
sub l r , l r , #4
str l r , [ s p , #S _ P C ]
b r e t _ f a s t _ s y s c a l l
# endif
2008-08-28 14:22:32 +04:00
ENDPROC( v e c t o r _ s w i )
2005-04-17 02:20:36 +04:00
/ *
* This i s t h e r e a l l y s l o w p a t h . W e ' r e g o i n g t o b e d o i n g
* context s w i t c h e s , a n d w a i t i n g f o r o u r p a r e n t t o r e s p o n d .
* /
__sys_trace :
2012-07-06 18:50:14 +04:00
mov r1 , s c n o
add r0 , s p , #S _ O F F
bl s y s c a l l _ t r a c e _ e n t e r
2005-04-17 02:20:36 +04:00
2009-07-24 15:32:54 +04:00
adr l r , B S Y M ( _ _ s y s _ t r a c e _ r e t u r n ) @ return address
2006-01-14 22:30:04 +03:00
mov s c n o , r0 @ syscall number (possibly new)
2005-04-17 02:20:36 +04:00
add r1 , s p , #S _ R 0 + S _ O F F @ pointer to regs
cmp s c n o , #N R _ s y s c a l l s @ c h e c k u p p e r s y s c a l l l i m i t
2012-07-19 20:49:22 +04:00
ldmccia r1 , { r0 - r6 } @ have to reload r0 - r6
stmccia s p , { r4 , r5 } @ and update the stack args
2005-04-17 02:20:36 +04:00
ldrcc p c , [ t b l , s c n o , l s l #2 ] @ call sys_* routine
2012-11-16 01:12:17 +04:00
cmp s c n o , #- 1 @ skip the syscall?
bne 2 b
add s p , s p , #S _ O F F @ r e s t o r e s t a c k
b r e t _ s l o w _ s y s c a l l
2005-04-17 02:20:36 +04:00
__sys_trace_return :
str r0 , [ s p , #S _ R 0 + S _ O F F ] ! @ save returned r0
2012-07-06 18:50:14 +04:00
mov r0 , s p
bl s y s c a l l _ t r a c e _ e x i t
2005-04-17 02:20:36 +04:00
b r e t _ s l o w _ s y s c a l l
.align 5
# ifdef C O N F I G _ A L I G N M E N T _ T R A P
.type _ _ cr_ a l i g n m e n t , #o b j e c t
__cr_alignment :
.word cr_alignment
2006-01-14 19:36:12 +03:00
# endif
.ltorg
/ *
* This i s t h e s y s c a l l t a b l e d e c l a r a t i o n f o r n a t i v e A B I s y s c a l l s .
* With E A B I a c o u p l e s y s c a l l s a r e o b s o l e t e a n d d e f i n e d a s s y s _ n i _ s y s c a l l .
* /
# define A B I ( n a t i v e , c o m p a t ) n a t i v e
# ifdef C O N F I G _ A E A B I
# define O B S O L E T E ( s y s c a l l ) s y s _ n i _ s y s c a l l
# else
# define O B S O L E T E ( s y s c a l l ) s y s c a l l
2005-04-17 02:20:36 +04:00
# endif
.type sys_ c a l l _ t a b l e , #o b j e c t
ENTRY( s y s _ c a l l _ t a b l e )
# include " c a l l s . S "
2006-01-14 19:36:12 +03:00
# undef A B I
# undef O B S O L E T E
2005-04-17 02:20:36 +04:00
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Special s y s t e m c a l l w r a p p e r s
* /
@ r0 = syscall number
2005-12-17 18:25:42 +03:00
@ r8 = syscall table
2005-04-17 02:20:36 +04:00
sys_syscall :
2006-05-16 17:25:55 +04:00
bic s c n o , r0 , #_ _ N R _ O A B I _ S Y S C A L L _ B A S E
2005-04-17 02:20:36 +04:00
cmp s c n o , #_ _ N R _ s y s c a l l - _ _ N R _ S Y S C A L L _ B A S E
cmpne s c n o , #N R _ s y s c a l l s @ c h e c k r a n g e
stmloia s p , { r5 , r6 } @ shuffle args
movlo r0 , r1
movlo r1 , r2
movlo r2 , r3
movlo r3 , r4
ldrlo p c , [ t b l , s c n o , l s l #2 ]
b s y s _ n i _ s y s c a l l
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s y s c a l l )
2005-04-17 02:20:36 +04:00
sys_sigreturn_wrapper :
add r0 , s p , #S _ O F F
arm: fix really nasty sigreturn bug
If a signal hits us outside of a syscall and another gets delivered
when we are in sigreturn (e.g. because it had been in sa_mask for
the first one and got sent to us while we'd been in the first handler),
we have a chance of returning from the second handler to location one
insn prior to where we ought to return. If r0 happens to contain -513
(-ERESTARTNOINTR), sigreturn will get confused into doing restart
syscall song and dance.
Incredible joy to debug, since it manifests as random, infrequent and
very hard to reproduce double execution of instructions in userland
code...
The fix is simple - mark it "don't bother with restarts" in wrapper,
i.e. set r8 to 0 in sys_sigreturn and sys_rt_sigreturn wrappers,
suppressing the syscall restart handling on return from these guys.
They can't legitimately return a restart-worthy error anyway.
Testcase:
#include <unistd.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/time.h>
#include <errno.h>
void f(int n)
{
__asm__ __volatile__(
"ldr r0, [%0]\n"
"b 1f\n"
"b 2f\n"
"1:b .\n"
"2:\n" : : "r"(&n));
}
void handler1(int sig) { }
void handler2(int sig) { raise(1); }
void handler3(int sig) { exit(0); }
main()
{
struct sigaction s = {.sa_handler = handler2};
struct itimerval t1 = { .it_value = {1} };
struct itimerval t2 = { .it_value = {2} };
signal(1, handler1);
sigemptyset(&s.sa_mask);
sigaddset(&s.sa_mask, 1);
sigaction(SIGALRM, &s, NULL);
signal(SIGVTALRM, handler3);
setitimer(ITIMER_REAL, &t1, NULL);
setitimer(ITIMER_VIRTUAL, &t2, NULL);
f(-513); /* -ERESTARTNOINTR */
write(1, "buggered\n", 9);
return 1;
}
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-09-17 17:34:39 +04:00
mov w h y , #0 @ prevent syscall restart handling
2005-04-17 02:20:36 +04:00
b s y s _ s i g r e t u r n
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s i g r e t u r n _ w r a p p e r )
2005-04-17 02:20:36 +04:00
sys_rt_sigreturn_wrapper :
add r0 , s p , #S _ O F F
arm: fix really nasty sigreturn bug
If a signal hits us outside of a syscall and another gets delivered
when we are in sigreturn (e.g. because it had been in sa_mask for
the first one and got sent to us while we'd been in the first handler),
we have a chance of returning from the second handler to location one
insn prior to where we ought to return. If r0 happens to contain -513
(-ERESTARTNOINTR), sigreturn will get confused into doing restart
syscall song and dance.
Incredible joy to debug, since it manifests as random, infrequent and
very hard to reproduce double execution of instructions in userland
code...
The fix is simple - mark it "don't bother with restarts" in wrapper,
i.e. set r8 to 0 in sys_sigreturn and sys_rt_sigreturn wrappers,
suppressing the syscall restart handling on return from these guys.
They can't legitimately return a restart-worthy error anyway.
Testcase:
#include <unistd.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/time.h>
#include <errno.h>
void f(int n)
{
__asm__ __volatile__(
"ldr r0, [%0]\n"
"b 1f\n"
"b 2f\n"
"1:b .\n"
"2:\n" : : "r"(&n));
}
void handler1(int sig) { }
void handler2(int sig) { raise(1); }
void handler3(int sig) { exit(0); }
main()
{
struct sigaction s = {.sa_handler = handler2};
struct itimerval t1 = { .it_value = {1} };
struct itimerval t2 = { .it_value = {2} };
signal(1, handler1);
sigemptyset(&s.sa_mask);
sigaddset(&s.sa_mask, 1);
sigaction(SIGALRM, &s, NULL);
signal(SIGVTALRM, handler3);
setitimer(ITIMER_REAL, &t1, NULL);
setitimer(ITIMER_VIRTUAL, &t2, NULL);
f(-513); /* -ERESTARTNOINTR */
write(1, "buggered\n", 9);
return 1;
}
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-09-17 17:34:39 +04:00
mov w h y , #0 @ prevent syscall restart handling
2005-04-17 02:20:36 +04:00
b s y s _ r t _ s i g r e t u r n
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ r t _ s i g r e t u r n _ w r a p p e r )
2005-04-17 02:20:36 +04:00
2006-01-14 19:35:03 +03:00
sys_statfs64_wrapper :
teq r1 , #88
moveq r1 , #84
b s y s _ s t a t f s64
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ s t a t f s64 _ w r a p p e r )
2006-01-14 19:35:03 +03:00
sys_fstatfs64_wrapper :
teq r1 , #88
moveq r1 , #84
b s y s _ f s t a t f s64
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ f s t a t f s64 _ w r a p p e r )
2006-01-14 19:35:03 +03:00
2005-04-17 02:20:36 +04:00
/ *
* Note : off_ 4 k ( r5 ) i s a l w a y s u n i t s o f 4 K . I f w e c a n ' t d o t h e r e q u e s t e d
* offset, w e r e t u r n E I N V A L .
* /
sys_mmap2 :
# if P A G E _ S H I F T > 1 2
tst r5 , #P G O F F _ M A S K
moveq r5 , r5 , l s r #P A G E _ S H I F T - 12
streq r5 , [ s p , #4 ]
2009-12-01 01:37:04 +03:00
beq s y s _ m m a p _ p g o f f
2005-04-17 02:20:36 +04:00
mov r0 , #- E I N V A L
2006-06-25 14:17:23 +04:00
mov p c , l r
2005-04-17 02:20:36 +04:00
# else
str r5 , [ s p , #4 ]
2009-12-01 01:37:04 +03:00
b s y s _ m m a p _ p g o f f
2005-04-17 02:20:36 +04:00
# endif
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ m m a p2 )
2006-01-14 19:35:31 +03:00
# ifdef C O N F I G _ O A B I _ C O M P A T
2006-01-14 19:36:12 +03:00
2006-01-14 19:35:31 +03:00
/ *
* These a r e s y s c a l l s w i t h a r g u m e n t r e g i s t e r d i f f e r e n c e s
* /
sys_oabi_pread64 :
stmia s p , { r3 , r4 }
b s y s _ p r e a d64
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ p r e a d64 )
2006-01-14 19:35:31 +03:00
sys_oabi_pwrite64 :
stmia s p , { r3 , r4 }
b s y s _ p w r i t e 6 4
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ p w r i t e 6 4 )
2006-01-14 19:35:31 +03:00
sys_oabi_truncate64 :
mov r3 , r2
mov r2 , r1
b s y s _ t r u n c a t e 6 4
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ t r u n c a t e 6 4 )
2006-01-14 19:35:31 +03:00
sys_oabi_ftruncate64 :
mov r3 , r2
mov r2 , r1
b s y s _ f t r u n c a t e 6 4
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ f t r u n c a t e 6 4 )
2006-01-14 19:35:31 +03:00
sys_oabi_readahead :
str r3 , [ s p ]
mov r3 , r2
mov r2 , r1
b s y s _ r e a d a h e a d
2008-08-28 14:22:32 +04:00
ENDPROC( s y s _ o a b i _ r e a d a h e a d )
2006-01-14 19:35:31 +03:00
2006-01-14 19:36:12 +03:00
/ *
* Let' s d e c l a r e a s e c o n d s y s c a l l t a b l e f o r o l d A B I b i n a r i e s
* using t h e c o m p a t i b i l i t y s y s c a l l e n t r i e s .
* /
# define A B I ( n a t i v e , c o m p a t ) c o m p a t
# define O B S O L E T E ( s y s c a l l ) s y s c a l l
.type sys_ o a b i _ c a l l _ t a b l e , #o b j e c t
ENTRY( s y s _ o a b i _ c a l l _ t a b l e )
# include " c a l l s . S "
# undef A B I
# undef O B S O L E T E
2006-01-14 19:35:31 +03:00
# endif