2005-04-16 15:20:36 -07:00
/ *
* linux/ a r c h / a r m / k e r n e l / e n t r y - a r m v . S
*
* Copyright ( C ) 1 9 9 6 ,1 9 9 7 ,1 9 9 8 R u s s e l l K i n g .
* ARM7 0 0 f i x b y M a t t h e w G o d b o l t ( l i n u x - u s e r @willothewisp.demon.co.uk)
2006-01-13 21:05:25 +00:00
* nommu s u p p o r t b y H y o k S . C h o i ( h y o k . c h o i @samsung.com)
2005-04-16 15:20:36 -07:00
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e v e r s i o n 2 a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* Low- l e v e l v e c t o r i n t e r f a c e r o u t i n e s
*
2007-12-04 14:33:33 +01:00
* Note : there i s a S t r o n g A R M b u g i n t h e S T M I A r n , { r e g s } ^ i n s t r u c t i o n
* that c a u s e s i t t o s a v e w r o n g v a l u e s . . . B e a w a r e !
2005-04-16 15:20:36 -07:00
* /
2012-03-10 10:30:31 -06:00
# include < a s m / a s s e m b l e r . h >
2005-10-29 21:44:55 +01:00
# include < a s m / m e m o r y . h >
2011-02-06 15:32:24 +00:00
# include < a s m / g l u e - d f . h >
# include < a s m / g l u e - p f . h >
2005-04-16 15:20:36 -07:00
# include < a s m / v f p m a c r o s . h >
2012-02-08 18:26:34 -06:00
# ifndef C O N F I G _ M U L T I _ I R Q _ H A N D L E R
2008-08-05 16:14:15 +01:00
# include < m a c h / e n t r y - m a c r o . S >
2012-02-08 18:26:34 -06:00
# endif
2006-06-21 13:31:52 +01:00
# include < a s m / t h r e a d _ n o t i f y . h >
2009-02-16 11:42:09 +01:00
# include < a s m / u n w i n d . h >
2009-11-09 23:53:29 +00:00
# include < a s m / u n i s t d . h >
2010-07-05 14:53:10 +01:00
# include < a s m / t l s . h >
2012-03-28 18:30:01 +01:00
# include < a s m / s y s t e m _ i n f o . h >
2005-04-16 15:20:36 -07:00
# include " e n t r y - h e a d e r . S "
2010-12-22 13:20:08 +01:00
# include < a s m / e n t r y - m a c r o - m u l t i . S >
2005-04-16 15:20:36 -07:00
2005-05-21 18:14:44 +01:00
/ *
2011-06-26 10:34:02 +01:00
* Interrupt h a n d l i n g .
2005-05-21 18:14:44 +01:00
* /
.macro irq_handler
2010-12-13 09:42:34 +01:00
# ifdef C O N F I G _ M U L T I _ I R Q _ H A N D L E R
2011-06-26 10:34:02 +01:00
ldr r1 , =handle_arch_irq
2010-12-13 09:42:34 +01:00
mov r0 , s p
adr l r , B S Y M ( 9 9 9 7 f )
2011-09-06 09:23:26 +01:00
ldr p c , [ r1 ]
# else
2010-12-22 13:20:08 +01:00
arch_ i r q _ h a n d l e r _ d e f a u l t
2011-09-06 09:23:26 +01:00
# endif
2010-09-04 10:47:48 +01:00
9997 :
2005-05-21 18:14:44 +01:00
.endm
2011-06-26 10:22:08 +01:00
.macro pabt_helper
2011-06-26 12:37:35 +01:00
@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2011-06-26 10:22:08 +01:00
# ifdef M U L T I _ P A B O R T
2011-06-25 15:46:08 +01:00
ldr i p , . L C p r o c f n s
2011-06-26 10:22:08 +01:00
mov l r , p c
2011-06-25 15:46:08 +01:00
ldr p c , [ i p , #P R O C E S S O R _ P A B T _ F U N C ]
2011-06-26 10:22:08 +01:00
# else
bl C P U _ P A B O R T _ H A N D L E R
# endif
.endm
.macro dabt_helper
@
@ Call the processor-specific abort handler:
@
2011-06-26 16:01:26 +01:00
@ r2 - pt_regs
2011-06-26 14:35:07 +01:00
@ r4 - aborted context pc
@ r5 - aborted context psr
2011-06-26 10:22:08 +01:00
@
@ The abort handler must return the aborted address in r0, and
@ the fault status register in r1. r9 must be preserved.
@
# ifdef M U L T I _ D A B O R T
2011-06-25 15:46:08 +01:00
ldr i p , . L C p r o c f n s
2011-06-26 10:22:08 +01:00
mov l r , p c
2011-06-25 15:46:08 +01:00
ldr p c , [ i p , #P R O C E S S O R _ D A B T _ F U N C ]
2011-06-26 10:22:08 +01:00
# else
bl C P U _ D A B O R T _ H A N D L E R
# endif
.endm
2007-12-03 15:27:56 -05:00
# ifdef C O N F I G _ K P R O B E S
.section .kprobes .text , " ax" ,% p r o g b i t s
# else
.text
# endif
2005-04-16 15:20:36 -07:00
/ *
* Invalid m o d e h a n d l e r s
* /
2005-05-31 22:22:32 +01:00
.macro inv_ e n t r y , r e a s o n
sub s p , s p , #S _ F R A M E _ S I Z E
2009-07-24 12:32:54 +01:00
ARM( s t m i b s p , { r1 - l r } )
THUMB( s t m i a s p , { r0 - r12 } )
THUMB( s t r s p , [ s p , #S _ S P ] )
THUMB( s t r l r , [ s p , #S _ L R ] )
2005-04-16 15:20:36 -07:00
mov r1 , #\ r e a s o n
.endm
__pabt_invalid :
2005-05-31 22:22:32 +01:00
inv_ e n t r y B A D _ P R E F E T C H
b c o m m o n _ i n v a l i d
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ p a b t _ i n v a l i d )
2005-04-16 15:20:36 -07:00
__dabt_invalid :
2005-05-31 22:22:32 +01:00
inv_ e n t r y B A D _ D A T A
b c o m m o n _ i n v a l i d
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ d a b t _ i n v a l i d )
2005-04-16 15:20:36 -07:00
__irq_invalid :
2005-05-31 22:22:32 +01:00
inv_ e n t r y B A D _ I R Q
b c o m m o n _ i n v a l i d
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ i r q _ i n v a l i d )
2005-04-16 15:20:36 -07:00
__und_invalid :
2005-05-31 22:22:32 +01:00
inv_ e n t r y B A D _ U N D E F I N S T R
@
@ XXX fall through to common_invalid
@
@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid :
zero_ f p
ldmia r0 , { r4 - r6 }
add r0 , s p , #S _ P C @ h e r e f o r i n t e r l o c k a v o i d a n c e
mov r7 , #- 1 @ "" "" "" ""
str r4 , [ s p ] @ save preserved r0
stmia r0 , { r5 - r7 } @ lr_<exception>,
@ cpsr_<exception>, "old_r0"
2005-04-16 15:20:36 -07:00
mov r0 , s p
b b a d _ m o d e
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ u n d _ i n v a l i d )
2005-04-16 15:20:36 -07:00
/ *
* SVC m o d e h a n d l e r s
* /
2006-01-14 16:18:08 +00:00
# if d e f i n e d ( C O N F I G _ A E A B I ) & & ( _ _ L I N U X _ A R M _ A R C H _ _ > = 5 )
# define S P F I X ( c o d e . . . ) c o d e
# else
# define S P F I X ( c o d e . . . )
# endif
2014-09-17 17:12:06 +01:00
.macro svc_ e n t r y , s t a c k _ h o l e =0 , t r a c e =1
2009-02-16 11:42:09 +01:00
UNWIND( . f n s t a r t )
UNWIND( . s a v e { r0 - p c } )
2009-07-24 12:32:54 +01:00
sub s p , s p , #( S _ F R A M E _ S I Z E + \ s t a c k _ h o l e - 4 )
# ifdef C O N F I G _ T H U M B 2 _ K E R N E L
SPFIX( s t r r0 , [ s p ] ) @ temporarily saved
SPFIX( m o v r0 , s p )
SPFIX( t s t r0 , #4 ) @ test original stack alignment
SPFIX( l d r r0 , [ s p ] ) @ restored
# else
2006-01-14 16:18:08 +00:00
SPFIX( t s t s p , #4 )
2009-07-24 12:32:54 +01:00
# endif
SPFIX( s u b e q s p , s p , #4 )
stmia s p , { r1 - r12 }
2005-05-31 22:22:32 +01:00
2011-06-25 15:44:20 +01:00
ldmia r0 , { r3 - r5 }
add r7 , s p , #S _ S P - 4 @ here for interlock avoidance
mov r6 , #- 1 @ "" "" "" ""
add r2 , s p , #( S _ F R A M E _ S I Z E + \ s t a c k _ h o l e - 4 )
SPFIX( a d d e q r2 , r2 , #4 )
str r3 , [ s p , #- 4 ] ! @ save the "real" r0 copied
2005-05-31 22:22:32 +01:00
@ from the exception stack
2011-06-25 15:44:20 +01:00
mov r3 , l r
2005-04-16 15:20:36 -07:00
@
@ We are now ready to fill in the remaining blanks on the stack:
@
2011-06-25 15:44:20 +01:00
@ r2 - sp_svc
@ r3 - lr_svc
@ r4 - lr_<exception>, already fixed up for correct return/restart
@ r5 - spsr_<exception>
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
2005-04-16 15:20:36 -07:00
@
2011-06-25 15:44:20 +01:00
stmia r7 , { r2 - r6 }
2005-04-16 15:20:36 -07:00
2014-09-17 17:12:06 +01:00
.if \ trace
2011-06-25 11:44:06 +01:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
bl t r a c e _ h a r d i r q s _ o f f
# endif
2014-09-17 17:12:06 +01:00
.endif
2011-06-25 17:35:19 +01:00
.endm
2005-04-16 15:20:36 -07:00
2011-06-25 17:35:19 +01:00
.align 5
__dabt_svc :
svc_ e n t r y
2005-04-16 15:20:36 -07:00
mov r2 , s p
2011-06-26 16:01:26 +01:00
dabt_ h e l p e r
2013-11-04 11:42:29 +01:00
THUMB( l d r r5 , [ s p , #S _ P S R ] ) @ p o t e n t i a l l y u p d a t e d C P S R
2011-06-25 15:44:20 +01:00
svc_ e x i t r5 @ return from exception
2009-02-16 11:42:09 +01:00
UNWIND( . f n e n d )
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ d a b t _ s v c )
2005-04-16 15:20:36 -07:00
.align 5
__irq_svc :
2005-05-31 22:22:32 +01:00
svc_ e n t r y
2005-05-21 18:14:44 +01:00
irq_ h a n d l e r
2011-06-25 10:57:57 +01:00
2005-04-16 15:20:36 -07:00
# ifdef C O N F I G _ P R E E M P T
2011-06-25 10:57:57 +01:00
get_ t h r e a d _ i n f o t s k
ldr r8 , [ t s k , #T I _ P R E E M P T ] @ g e t p r e e m p t c o u n t
2005-05-21 18:15:45 +01:00
ldr r0 , [ t s k , #T I _ F L A G S ] @ g e t f l a g s
2008-04-13 17:47:35 +01:00
teq r8 , #0 @ if preempt count != 0
movne r0 , #0 @ force flags to 0
2005-04-16 15:20:36 -07:00
tst r0 , #_ T I F _ N E E D _ R E S C H E D
blne s v c _ p r e e m p t
# endif
2011-06-26 12:47:08 +01:00
2013-03-28 12:57:40 +00:00
svc_ e x i t r5 , i r q = 1 @ return from exception
2009-02-16 11:42:09 +01:00
UNWIND( . f n e n d )
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ i r q _ s v c )
2005-04-16 15:20:36 -07:00
.ltorg
# ifdef C O N F I G _ P R E E M P T
svc_preempt :
2008-04-13 17:47:35 +01:00
mov r8 , l r
2005-04-16 15:20:36 -07:00
1 : bl p r e e m p t _ s c h e d u l e _ i r q @ irq en/disable is done inside
2005-05-21 18:15:45 +01:00
ldr r0 , [ t s k , #T I _ F L A G S ] @ g e t n e w t a s k s T I _ F L A G S
2005-04-16 15:20:36 -07:00
tst r0 , #_ T I F _ N E E D _ R E S C H E D
2014-06-30 16:29:12 +01:00
reteq r8 @ go again
2005-04-16 15:20:36 -07:00
b 1 b
# endif
2012-07-30 19:42:10 +01:00
__und_fault :
@ Correct the PC such that it is pointing at the instruction
@ which caused the fault. If the faulting instruction was ARM
@ the PC will be pointing at the next instruction, and have to
@ subtract 4. Otherwise, it is Thumb, and the PC will be
@ pointing at the second half of the Thumb instruction. We
@ have to subtract 2.
ldr r2 , [ r0 , #S _ P C ]
sub r2 , r2 , r1
str r2 , [ r0 , #S _ P C ]
b d o _ u n d e f i n s t r
ENDPROC( _ _ u n d _ f a u l t )
2005-04-16 15:20:36 -07:00
.align 5
__und_svc :
2007-12-14 15:56:01 -05:00
# ifdef C O N F I G _ K P R O B E S
@ If a kprobe is about to simulate a "stmdb sp..." instruction,
@ it obviously needs free stack space which then will belong to
@ the saved context.
svc_ e n t r y 6 4
# else
2005-05-31 22:22:32 +01:00
svc_ e n t r y
2007-12-14 15:56:01 -05:00
# endif
2005-04-16 15:20:36 -07:00
@
@ call emulation code, which returns using r9 if it has emulated
@ the instruction, or the more conventional lr if we are to treat
@ this as a real undefined instruction
@
@ r0 - instruction
@
2012-07-30 19:42:10 +01:00
# ifndef C O N F I G _ T H U M B 2 _ K E R N E L
2011-06-25 15:44:20 +01:00
ldr r0 , [ r4 , #- 4 ]
2009-09-18 23:27:07 +01:00
# else
2012-07-30 19:42:10 +01:00
mov r1 , #2
2011-06-25 15:44:20 +01:00
ldrh r0 , [ r4 , #- 2 ] @ Thumb instruction at LR - 2
2011-08-19 17:59:27 +01:00
cmp r0 , #0xe800 @ 32-bit instruction if xx >= 0
2012-07-30 19:42:10 +01:00
blo _ _ u n d _ s v c _ f a u l t
ldrh r9 , [ r4 ] @ bottom 16 bits
add r4 , r4 , #2
str r4 , [ s p , #S _ P C ]
orr r0 , r9 , r0 , l s l #16
2009-09-18 23:27:07 +01:00
# endif
2012-07-30 19:42:10 +01:00
adr r9 , B S Y M ( _ _ u n d _ s v c _ f i n i s h )
2011-06-25 15:44:20 +01:00
mov r2 , r4
2005-04-16 15:20:36 -07:00
bl c a l l _ f p e
2012-07-30 19:42:10 +01:00
mov r1 , #4 @ PC correction to apply
__und_svc_fault :
2005-04-16 15:20:36 -07:00
mov r0 , s p @ struct pt_regs *regs
2012-07-30 19:42:10 +01:00
bl _ _ u n d _ f a u l t
2005-04-16 15:20:36 -07:00
2012-07-30 19:42:10 +01:00
__und_svc_finish :
2011-06-25 15:44:20 +01:00
ldr r5 , [ s p , #S _ P S R ] @ G e t S V C c p s r
svc_ e x i t r5 @ return from exception
2009-02-16 11:42:09 +01:00
UNWIND( . f n e n d )
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ u n d _ s v c )
2005-04-16 15:20:36 -07:00
.align 5
__pabt_svc :
2005-05-31 22:22:32 +01:00
svc_ e n t r y
2009-09-25 13:39:47 +01:00
mov r2 , s p @ regs
2011-06-26 12:37:35 +01:00
pabt_ h e l p e r
2011-06-25 15:44:20 +01:00
svc_ e x i t r5 @ return from exception
2009-02-16 11:42:09 +01:00
UNWIND( . f n e n d )
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ p a b t _ s v c )
2005-04-16 15:20:36 -07:00
2014-09-17 17:12:06 +01:00
.align 5
__fiq_svc :
svc_ e n t r y t r a c e =0
mov r0 , s p @ struct pt_regs *regs
bl h a n d l e _ f i q _ a s _ n m i
svc_ e x i t _ v i a _ f i q
UNWIND( . f n e n d )
ENDPROC( _ _ f i q _ s v c )
2005-04-16 15:20:36 -07:00
.align 5
2005-05-31 18:02:00 +01:00
.LCcralign :
.word cr_alignment
2008-04-18 22:43:07 +01:00
# ifdef M U L T I _ D A B O R T
2005-04-16 15:20:36 -07:00
.LCprocfns :
.word processor
# endif
.LCfp :
.word fp_enter
2014-09-17 17:12:06 +01:00
/ *
* Abort m o d e h a n d l e r s
* /
@
@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
@ and reuses the same macros. However in abort mode we must also
@ save/restore lr_abt and spsr_abt to make nested aborts safe.
@
.align 5
__fiq_abt :
svc_ e n t r y t r a c e =0
ARM( m s r c p s r _ c , #A B T _ M O D E | P S R _ I _ B I T | P S R _ F _ B I T )
THUMB( m o v r0 , #A B T _ M O D E | P S R _ I _ B I T | P S R _ F _ B I T )
THUMB( m s r c p s r _ c , r0 )
mov r1 , l r @ Save lr_abt
mrs r2 , s p s r @ Save spsr_abt, abort is now safe
ARM( m s r c p s r _ c , #S V C _ M O D E | P S R _ I _ B I T | P S R _ F _ B I T )
THUMB( m o v r0 , #S V C _ M O D E | P S R _ I _ B I T | P S R _ F _ B I T )
THUMB( m s r c p s r _ c , r0 )
stmfd s p ! , { r1 - r2 }
add r0 , s p , #8 @ struct pt_regs *regs
bl h a n d l e _ f i q _ a s _ n m i
ldmfd s p ! , { r1 - r2 }
ARM( m s r c p s r _ c , #A B T _ M O D E | P S R _ I _ B I T | P S R _ F _ B I T )
THUMB( m o v r0 , #A B T _ M O D E | P S R _ I _ B I T | P S R _ F _ B I T )
THUMB( m s r c p s r _ c , r0 )
mov l r , r1 @ Restore lr_abt, abort is unsafe
msr s p s r _ c x s f , r2 @ Restore spsr_abt
ARM( m s r c p s r _ c , #S V C _ M O D E | P S R _ I _ B I T | P S R _ F _ B I T )
THUMB( m o v r0 , #S V C _ M O D E | P S R _ I _ B I T | P S R _ F _ B I T )
THUMB( m s r c p s r _ c , r0 )
svc_ e x i t _ v i a _ f i q
UNWIND( . f n e n d )
ENDPROC( _ _ f i q _ a b t )
2005-04-16 15:20:36 -07:00
/ *
* User m o d e h a n d l e r s
2006-01-14 16:18:08 +00:00
*
* EABI n o t e : s p _ s v c i s a l w a y s 6 4 - b i t a l i g n e d h e r e , s o s h o u l d S _ F R A M E _ S I Z E
2005-04-16 15:20:36 -07:00
* /
2006-01-14 16:18:08 +00:00
# if d e f i n e d ( C O N F I G _ A E A B I ) & & ( _ _ L I N U X _ A R M _ A R C H _ _ > = 5 ) & & ( S _ F R A M E _ S I Z E & 7 )
# error " s i z e o f ( s t r u c t p t _ r e g s ) m u s t b e a m u l t i p l e o f 8 "
# endif
2014-09-17 17:12:06 +01:00
.macro usr_ e n t r y , t r a c e =1
2009-02-16 11:42:09 +01:00
UNWIND( . f n s t a r t )
UNWIND( . c a n t u n w i n d ) @ don't unwind the user space
2005-05-31 22:22:32 +01:00
sub s p , s p , #S _ F R A M E _ S I Z E
2009-07-24 12:32:54 +01:00
ARM( s t m i b s p , { r1 - r12 } )
THUMB( s t m i a s p , { r0 - r12 } )
2005-05-31 22:22:32 +01:00
2014-08-28 13:08:14 +01:00
ATRAP( m r c p15 , 0 , r7 , c1 , c0 , 0 )
ATRAP( l d r r8 , . L C c r a l i g n )
2011-06-25 15:44:20 +01:00
ldmia r0 , { r3 - r5 }
2005-05-31 22:22:32 +01:00
add r0 , s p , #S _ P C @ h e r e f o r i n t e r l o c k a v o i d a n c e
2011-06-25 15:44:20 +01:00
mov r6 , #- 1 @ "" "" "" ""
2005-05-31 22:22:32 +01:00
2011-06-25 15:44:20 +01:00
str r3 , [ s p ] @ save the "real" r0 copied
2005-05-31 22:22:32 +01:00
@ from the exception stack
2005-04-16 15:20:36 -07:00
2014-08-28 13:08:14 +01:00
ATRAP( l d r r8 , [ r8 , #0 ] )
2005-04-16 15:20:36 -07:00
@
@ We are now ready to fill in the remaining blanks on the stack:
@
2011-06-25 15:44:20 +01:00
@ r4 - lr_<exception>, already fixed up for correct return/restart
@ r5 - spsr_<exception>
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
2005-04-16 15:20:36 -07:00
@
@ Also, separately save sp_usr and lr_usr
@
2011-06-25 15:44:20 +01:00
stmia r0 , { r4 - r6 }
2009-07-24 12:32:54 +01:00
ARM( s t m d b r0 , { s p , l r } ^ )
THUMB( s t o r e _ u s e r _ s p _ l r r0 , r1 , S _ S P - S _ P C )
2005-04-16 15:20:36 -07:00
@ Enable the alignment trap while in kernel mode
2014-08-28 13:08:14 +01:00
ATRAP( t e q r8 , r7 )
ATRAP( m c r n e p15 , 0 , r8 , c1 , c0 , 0 )
2005-04-16 15:20:36 -07:00
@
@ Clear FP to mark the first stack frame
@
zero_ f p
2011-06-25 17:35:19 +01:00
2014-09-17 17:12:06 +01:00
.if \ trace
2011-06-25 17:35:19 +01:00
# ifdef C O N F I G _ I R Q S O F F _ T R A C E R
bl t r a c e _ h a r d i r q s _ o f f
# endif
2013-03-28 22:54:40 +01:00
ct_ u s e r _ e x i t s a v e = 0
2014-09-17 17:12:06 +01:00
.endif
2005-04-16 15:20:36 -07:00
.endm
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
.macro kuser_cmpxchg_check
2013-08-06 09:48:42 +01:00
# if ! d e f i n e d ( C O N F I G _ C P U _ 3 2 v6 K ) & & d e f i n e d ( C O N F I G _ K U S E R _ H E L P E R S ) & & \
! defined( C O N F I G _ N E E D S _ S Y S C A L L _ F O R _ C M P X C H G )
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
# ifndef C O N F I G _ M M U
# warning " N P T L o n n o n M M U n e e d s f i x i n g "
# else
@ Make sure our user space atomic helper is restarted
@ if it was interrupted in a critical region. Here we
@ perform a quick test inline since it should be false
@ 99.9999% of the time. The rest is done out of line.
2011-06-25 15:44:20 +01:00
cmp r4 , #T A S K _ S I Z E
2011-06-19 23:36:03 -04:00
blhs k u s e r _ c m p x c h g 6 4 _ f i x u p
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
# endif
# endif
.endm
2005-04-16 15:20:36 -07:00
.align 5
__dabt_usr :
2005-05-31 22:22:32 +01:00
usr_ e n t r y
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
kuser_ c m p x c h g _ c h e c k
2005-04-16 15:20:36 -07:00
mov r2 , s p
2011-06-26 16:01:26 +01:00
dabt_ h e l p e r
b r e t _ f r o m _ e x c e p t i o n
2009-02-16 11:42:09 +01:00
UNWIND( . f n e n d )
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ d a b t _ u s r )
2005-04-16 15:20:36 -07:00
.align 5
__irq_usr :
2005-05-31 22:22:32 +01:00
usr_ e n t r y
2011-06-25 18:28:19 +01:00
kuser_ c m p x c h g _ c h e c k
2005-05-21 18:14:44 +01:00
irq_ h a n d l e r
2011-06-25 10:57:57 +01:00
get_ t h r e a d _ i n f o t s k
2005-04-16 15:20:36 -07:00
mov w h y , #0
2011-06-05 02:24:58 +01:00
b r e t _ t o _ u s e r _ f r o m _ i r q
2009-02-16 11:42:09 +01:00
UNWIND( . f n e n d )
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ i r q _ u s r )
2005-04-16 15:20:36 -07:00
.ltorg
.align 5
__und_usr :
2005-05-31 22:22:32 +01:00
usr_ e n t r y
2011-06-25 18:28:19 +01:00
2011-06-25 15:44:20 +01:00
mov r2 , r4
mov r3 , r5
2005-04-16 15:20:36 -07:00
2012-07-30 19:42:10 +01:00
@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
@ faulting instruction depending on Thumb mode.
@ r3 = regs->ARM_cpsr
2005-04-16 15:20:36 -07:00
@
2012-07-30 19:42:10 +01:00
@ The emulation code returns using r9 if it has emulated the
@ instruction, or the more conventional lr if we are to treat
@ this as a real undefined instruction
2005-04-16 15:20:36 -07:00
@
2009-07-24 12:32:54 +01:00
adr r9 , B S Y M ( r e t _ f r o m _ e x c e p t i o n )
2012-07-30 19:42:10 +01:00
2014-04-22 16:14:29 +01:00
@ IRQs must be enabled before attempting to read the instruction from
@ user space since that could cause a page/translation fault if the
@ page table was modified by another CPU.
enable_ i r q
2008-04-18 22:43:08 +01:00
tst r3 , #P S R _ T _ B I T @ T h u m b m o d e ?
2012-07-30 19:42:10 +01:00
bne _ _ u n d _ u s r _ t h u m b
sub r4 , r2 , #4 @ ARM instr at LR - 4
1 : ldrt r0 , [ r4 ]
2013-02-12 18:59:57 +00:00
ARM_ B E 8 ( r e v r0 , r0 ) @ little endian instruction
2012-07-30 19:42:10 +01:00
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
@ lr = 32-bit undefined instruction function
adr l r , B S Y M ( _ _ u n d _ u s r _ f a u l t _ 3 2 )
b c a l l _ f p e
__und_usr_thumb :
2008-04-18 22:43:08 +01:00
@ Thumb instruction
2012-07-30 19:42:10 +01:00
sub r4 , r2 , #2 @ First half of thumb instr at LR - 2
2011-08-19 18:00:08 +01:00
# if C O N F I G _ A R M _ T H U M B & & _ _ L I N U X _ A R M _ A R C H _ _ > = 6 & & C O N F I G _ C P U _ V 7
/ *
* Thumb- 2 i n s t r u c t i o n h a n d l i n g . N o t e t h a t b e c a u s e p r e - v6 a n d > = v6 p l a t f o r m s
* can n e v e r b e s u p p o r t e d i n a s i n g l e k e r n e l , t h i s c o d e i s n o t a p p l i c a b l e a t
* all w h e n _ _ L I N U X _ A R M _ A R C H _ _ < 6 . T h i s a l l o w s s i m p l i f y i n g a s s u m p t i o n s t o b e
* made a b o u t . a r c h d i r e c t i v e s .
* /
# if _ _ L I N U X _ A R M _ A R C H _ _ < 7
/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
# define N E E D _ C P U _ A R C H I T E C T U R E
ldr r5 , . L C c p u _ a r c h i t e c t u r e
ldr r5 , [ r5 ]
cmp r5 , #C P U _ A R C H _ A R M v 7
2012-07-30 19:42:10 +01:00
blo _ _ u n d _ u s r _ f a u l t _ 1 6 @ 16bit undefined instruction
2011-08-19 18:00:08 +01:00
/ *
* The f o l l o w i n g c o d e w o n ' t g e t r u n u n l e s s t h e r u n n i n g C P U r e a l l y i s v7 , s o
* coding r o u n d t h e l a c k o f l d r h t o n o l d e r a r c h e s i s p o i n t l e s s . T e m p o r a r i l y
* override t h e a s s e m b l e r t a r g e t a r c h w i t h t h e m i n i m u m r e q u i r e d i n s t e a d :
* /
.arch armv6t2
# endif
2012-07-30 19:42:10 +01:00
2 : ldrht r5 , [ r4 ]
2014-01-21 06:45:11 +01:00
ARM_ B E 8 ( r e v16 r5 , r5 ) @ little endian instruction
2011-08-19 17:59:27 +01:00
cmp r5 , #0xe800 @ 32bit instruction if xx != 0
2012-07-30 19:42:10 +01:00
blo _ _ u n d _ u s r _ f a u l t _ 1 6 @ 16bit undefined instruction
3 : ldrht r0 , [ r2 ]
2014-01-21 06:45:11 +01:00
ARM_ B E 8 ( r e v16 r0 , r0 ) @ little endian instruction
2008-04-18 22:43:08 +01:00
add r2 , r2 , #2 @ r2 is PC + 2, make it PC + 4
2012-07-30 19:42:10 +01:00
str r2 , [ s p , #S _ P C ] @ i t ' s a 2 x16 b i t i n s t r , u p d a t e
2008-04-18 22:43:08 +01:00
orr r0 , r0 , r5 , l s l #16
2012-07-30 19:42:10 +01:00
adr l r , B S Y M ( _ _ u n d _ u s r _ f a u l t _ 3 2 )
@ r0 = the two 16-bit Thumb instructions which caused the exception
@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
@ r4 = PC value for the first 16-bit Thumb instruction
@ lr = 32bit undefined instruction function
2011-08-19 18:00:08 +01:00
# if _ _ L I N U X _ A R M _ A R C H _ _ < 7
/* If the target arch was overridden, change it back: */
# ifdef C O N F I G _ C P U _ 3 2 v6 K
.arch armv6k
2008-04-18 22:43:08 +01:00
# else
2011-08-19 18:00:08 +01:00
.arch armv6
# endif
# endif / * _ _ L I N U X _ A R M _ A R C H _ _ < 7 * /
# else / * ! ( C O N F I G _ A R M _ T H U M B & & _ _ L I N U X _ A R M _ A R C H _ _ > = 6 & & C O N F I G _ C P U _ V 7 ) * /
2012-07-30 19:42:10 +01:00
b _ _ u n d _ u s r _ f a u l t _ 1 6
2008-04-18 22:43:08 +01:00
# endif
2012-07-30 19:42:10 +01:00
UNWIND( . f n e n d )
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ u n d _ u s r )
2008-04-18 22:43:08 +01:00
2005-04-16 15:20:36 -07:00
/ *
2012-07-30 19:42:10 +01:00
* The o u t o f l i n e f i x u p f o r t h e l d r t i n s t r u c t i o n s a b o v e .
2005-04-16 15:20:36 -07:00
* /
2010-04-19 10:15:03 +01:00
.pushsection .fixup , " ax"
2012-06-15 16:49:58 +01:00
.align 2
ARM: 8062/1: Modify ldrt fixup handler to re-execute the userspace instruction
We will reach fixup handler when one thread(say cpu0) caused an undefined exception, while another thread(say cpu1) is unmmaping the page.
Fixup handler returns to the next userspace instruction which has caused the undef execption, rather than going to the same instruction.
ARM ARM says that after undefined exception, the PC will be pointing
to the next instruction. ie +4 offset in case of ARM and +2 in case of Thumb
And there is no correction offset passed to vector_stub in case of
undef exception.
File: arch/arm/kernel/entry-armv.S +1085
vector_stub und, UND_MODE
During an undefined exception, in normal scenario(ie when ldrt
instruction does not cause an abort) after resorting the context in
VFP hardware, the PC is modified as show below before jumping to
ret_from_exception which is in r9.
File: arch/arm/vfp/vfphw.S +169
@ The context stored in the VFP hardware is up to date with this thread
vfp_hw_state_valid:
tst r1, #FPEXC_EX
bne process_exception @ might as well handle the pending
@ exception before retrying branch
@ out before setting an FPEXC that
@ stops us reading stuff
VFPFMXR FPEXC, r1 @ Restore FPEXC last
sub r2, r2, #4 @ Retry current instruction - if Thumb
str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
But if ldrt results in an abort, we reach the fixup handler and return
to ret_from_execption without correcting the pc.
This patch modifes the fixup handler to re-execute the same instruction which caused undefined execption.
Signed-off-by: Vinayak Menon <vinayakm.list@gmail.com>
Signed-off-by: Arun KS <getarunks@gmail.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-05-19 11:43:00 +01:00
4 : str r4 , [ s p , #S _ P C ] @ r e t r y c u r r e n t i n s t r u c t i o n
2014-06-30 16:29:12 +01:00
ret r9
2010-04-19 10:15:03 +01:00
.popsection
.pushsection _ _ ex_ t a b l e ," a "
2008-04-18 22:43:08 +01:00
.long 1 b, 4 b
2011-11-22 23:42:12 +01:00
# if C O N F I G _ A R M _ T H U M B & & _ _ L I N U X _ A R M _ A R C H _ _ > = 6 & & C O N F I G _ C P U _ V 7
2008-04-18 22:43:08 +01:00
.long 2 b, 4 b
.long 3 b, 4 b
# endif
2010-04-19 10:15:03 +01:00
.popsection
2005-04-16 15:20:36 -07:00
/ *
* Check w h e t h e r t h e i n s t r u c t i o n i s a c o - p r o c e s s o r i n s t r u c t i o n .
* If y e s , w e n e e d t o c a l l t h e r e l e v a n t c o - p r o c e s s o r h a n d l e r .
*
* Note t h a t w e d o n ' t d o a f u l l c h e c k h e r e f o r t h e c o - p r o c e s s o r
* instructions; all instructions with bit 27 set are well
* defined. T h e o n l y i n s t r u c t i o n s t h a t s h o u l d f a u l t a r e t h e
* co- p r o c e s s o r i n s t r u c t i o n s . H o w e v e r , w e h a v e t o w a t c h o u t
* for t h e A R M 6 / A R M 7 S W I b u g .
*
2008-01-10 19:16:17 +01:00
* NEON i s a s p e c i a l c a s e t h a t h a s t o b e h a n d l e d h e r e . N o t a l l
* NEON i n s t r u c t i o n s a r e c o - p r o c e s s o r i n s t r u c t i o n s , s o w e h a v e
* to m a k e a s p e c i a l c a s e o f c h e c k i n g f o r t h e m . P l u s , t h e r e ' s
* five g r o u p s o f t h e m , s o w e h a v e a t a b l e o f m a s k / o p c o d e p a i r s
* to c h e c k a g a i n s t , a n d i f a n y m a t c h t h e n w e b r a n c h o f f i n t o t h e
* NEON h a n d l e r c o d e .
*
2005-04-16 15:20:36 -07:00
* Emulators m a y w i s h t o m a k e u s e o f t h e f o l l o w i n g r e g i s t e r s :
2012-07-30 19:42:10 +01:00
* r0 = i n s t r u c t i o n o p c o d e ( 3 2 - b i t A R M o r t w o 1 6 - b i t T h u m b )
* r2 = P C v a l u e t o r e s u m e e x e c u t i o n a f t e r s u c c e s s f u l e m u l a t i o n
2007-01-06 22:53:48 +00:00
* r9 = n o r m a l " s u c c e s s f u l " r e t u r n a d d r e s s
2012-07-30 19:42:10 +01:00
* r1 0 = t h i s t h r e a d s t h r e a d _ i n f o s t r u c t u r e
2007-01-06 22:53:48 +00:00
* lr = u n r e c o g n i s e d i n s t r u c t i o n r e t u r n a d d r e s s
2014-04-22 16:14:29 +01:00
* IRQs e n a b l e d , F I Q s e n a b l e d .
2005-04-16 15:20:36 -07:00
* /
2008-04-18 22:43:08 +01:00
@
@ Fall-through from Thumb-2 __und_usr
@
# ifdef C O N F I G _ N E O N
2013-02-23 17:53:52 +00:00
get_ t h r e a d _ i n f o r10 @ get current thread
2008-04-18 22:43:08 +01:00
adr r6 , . L C n e o n _ t h u m b _ o p c o d e s
b 2 f
# endif
2005-04-16 15:20:36 -07:00
call_fpe :
2013-02-23 17:53:52 +00:00
get_ t h r e a d _ i n f o r10 @ get current thread
2008-01-10 19:16:17 +01:00
# ifdef C O N F I G _ N E O N
2008-04-18 22:43:08 +01:00
adr r6 , . L C n e o n _ a r m _ o p c o d e s
2013-02-23 17:53:52 +00:00
2 : ldr r5 , [ r6 ] , #4 @ mask value
2008-01-10 19:16:17 +01:00
ldr r7 , [ r6 ] , #4 @ opcode bits matching in mask
2013-02-23 17:53:52 +00:00
cmp r5 , #0 @ end mask?
beq 1 f
and r8 , r0 , r5
2008-01-10 19:16:17 +01:00
cmp r8 , r7 @ NEON instruction?
bne 2 b
mov r7 , #1
strb r7 , [ r10 , #T I _ U S E D _ C P + 10 ] @ mark CP#10 as used
strb r7 , [ r10 , #T I _ U S E D _ C P + 11 ] @ mark CP#11 as used
b d o _ v f p @ let VFP handler handle this
1 :
# endif
2005-04-16 15:20:36 -07:00
tst r0 , #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
2008-04-18 22:43:08 +01:00
tstne r0 , #0x04000000 @ bit 26 set on both ARM and Thumb-2
2014-06-30 16:29:12 +01:00
reteq l r
2005-04-16 15:20:36 -07:00
and r8 , r0 , #0x00000f00 @ mask out CP number
2009-07-24 12:32:54 +01:00
THUMB( l s r r8 , r8 , #8 )
2005-04-16 15:20:36 -07:00
mov r7 , #1
add r6 , r10 , #T I _ U S E D _ C P
2009-07-24 12:32:54 +01:00
ARM( s t r b r7 , [ r6 , r8 , l s r #8 ] ) @ set appropriate used_cp[]
THUMB( s t r b r7 , [ r6 , r8 ] ) @ set appropriate used_cp[]
2005-04-16 15:20:36 -07:00
# ifdef C O N F I G _ I W M M X T
@ Test if we need to give access to iWMMXt coprocessors
ldr r5 , [ r10 , #T I _ F L A G S ]
rsbs r7 , r8 , #( 1 < < 8 ) @ CP 0 or 1 only
movcss r7 , r5 , l s r #( T I F _ U S I N G _ I W M M X T + 1 )
bcs i w m m x t _ t a s k _ e n a b l e
# endif
2009-07-24 12:32:54 +01:00
ARM( a d d p c , p c , r8 , l s r #6 )
THUMB( l s l r8 , r8 , #2 )
THUMB( a d d p c , r8 )
nop
2014-06-30 16:29:12 +01:00
ret. w l r @ CP#0
2009-07-24 12:32:54 +01:00
W( b ) d o _ f p e @ CP#1 (FPE)
W( b ) d o _ f p e @ CP#2 (FPE)
2014-06-30 16:29:12 +01:00
ret. w l r @ CP#3
2006-06-27 23:03:03 +01:00
# ifdef C O N F I G _ C R U N C H
b c r u n c h _ t a s k _ e n a b l e @ CP#4 (MaverickCrunch)
b c r u n c h _ t a s k _ e n a b l e @ CP#5 (MaverickCrunch)
b c r u n c h _ t a s k _ e n a b l e @ CP#6 (MaverickCrunch)
# else
2014-06-30 16:29:12 +01:00
ret. w l r @ CP#4
ret. w l r @ CP#5
ret. w l r @ CP#6
2006-06-27 23:03:03 +01:00
# endif
2014-06-30 16:29:12 +01:00
ret. w l r @ CP#7
ret. w l r @ CP#8
ret. w l r @ CP#9
2005-04-16 15:20:36 -07:00
# ifdef C O N F I G _ V F P
2009-07-24 12:32:54 +01:00
W( b ) d o _ v f p @ CP#10 (VFP)
W( b ) d o _ v f p @ CP#11 (VFP)
2005-04-16 15:20:36 -07:00
# else
2014-06-30 16:29:12 +01:00
ret. w l r @ CP#10 (VFP)
ret. w l r @ CP#11 (VFP)
2005-04-16 15:20:36 -07:00
# endif
2014-06-30 16:29:12 +01:00
ret. w l r @ CP#12
ret. w l r @ CP#13
ret. w l r @ CP#14 (Debug)
ret. w l r @ CP#15 (Control)
2005-04-16 15:20:36 -07:00
2011-08-19 18:00:08 +01:00
# ifdef N E E D _ C P U _ A R C H I T E C T U R E
.align 2
.LCcpu_architecture :
.word __cpu_architecture
# endif
2008-01-10 19:16:17 +01:00
# ifdef C O N F I G _ N E O N
.align 6
2008-04-18 22:43:08 +01:00
.LCneon_arm_opcodes :
2008-01-10 19:16:17 +01:00
.word 0xfe000000 @ mask
.word 0xf2000000 @ opcode
.word 0xff100000 @ mask
.word 0xf4000000 @ opcode
2008-04-18 22:43:08 +01:00
.word 0x00000000 @ mask
.word 0x00000000 @ opcode
.LCneon_thumb_opcodes :
.word 0xef000000 @ mask
.word 0xef000000 @ opcode
.word 0xff100000 @ mask
.word 0xf9000000 @ opcode
2008-01-10 19:16:17 +01:00
.word 0x00000000 @ mask
.word 0x00000000 @ opcode
# endif
2005-04-16 15:20:36 -07:00
do_fpe :
ldr r4 , . L C f p
add r10 , r10 , #T I _ F P S T A T E @ r 10 = w o r k s p a c e
ldr p c , [ r4 ] @ Call FP module USR entry point
/ *
* The F P m o d u l e i s c a l l e d w i t h t h e s e r e g i s t e r s s e t :
* r0 = i n s t r u c t i o n
* r2 = P C + 4
* r9 = n o r m a l " s u c c e s s f u l " r e t u r n a d d r e s s
* r1 0 = F P w o r k s p a c e
* lr = u n r e c o g n i s e d F P i n s t r u c t i o n r e t u r n a d d r e s s
* /
2010-04-30 10:45:46 +01:00
.pushsection .data
2005-04-16 15:20:36 -07:00
ENTRY( f p _ e n t e r )
2007-01-06 22:53:48 +00:00
.word no_fp
2010-04-30 10:45:46 +01:00
.popsection
2005-04-16 15:20:36 -07:00
2009-09-18 23:27:07 +01:00
ENTRY( n o _ f p )
2014-06-30 16:29:12 +01:00
ret l r
2009-09-18 23:27:07 +01:00
ENDPROC( n o _ f p )
2007-01-06 22:53:48 +00:00
2012-07-30 19:42:10 +01:00
__und_usr_fault_32 :
mov r1 , #4
b 1 f
__und_usr_fault_16 :
mov r1 , #2
2014-04-22 16:14:29 +01:00
1 : mov r0 , s p
2009-07-24 12:32:54 +01:00
adr l r , B S Y M ( r e t _ f r o m _ e x c e p t i o n )
2012-07-30 19:42:10 +01:00
b _ _ u n d _ f a u l t
ENDPROC( _ _ u n d _ u s r _ f a u l t _ 3 2 )
ENDPROC( _ _ u n d _ u s r _ f a u l t _ 1 6 )
2005-04-16 15:20:36 -07:00
.align 5
__pabt_usr :
2005-05-31 22:22:32 +01:00
usr_ e n t r y
2009-09-25 13:39:47 +01:00
mov r2 , s p @ regs
2011-06-26 12:37:35 +01:00
pabt_ h e l p e r
2009-02-16 11:42:09 +01:00
UNWIND( . f n e n d )
2005-04-16 15:20:36 -07:00
/* fall through */
/ *
* This i s t h e r e t u r n c o d e t o u s e r m o d e f o r a b o r t h a n d l e r s
* /
ENTRY( r e t _ f r o m _ e x c e p t i o n )
2009-02-16 11:42:09 +01:00
UNWIND( . f n s t a r t )
UNWIND( . c a n t u n w i n d )
2005-04-16 15:20:36 -07:00
get_ t h r e a d _ i n f o t s k
mov w h y , #0
b r e t _ t o _ u s e r
2009-02-16 11:42:09 +01:00
UNWIND( . f n e n d )
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ p a b t _ u s r )
ENDPROC( r e t _ f r o m _ e x c e p t i o n )
2005-04-16 15:20:36 -07:00
2014-09-17 17:12:06 +01:00
.align 5
__fiq_usr :
usr_ e n t r y t r a c e =0
kuser_ c m p x c h g _ c h e c k
mov r0 , s p @ struct pt_regs *regs
bl h a n d l e _ f i q _ a s _ n m i
get_ t h r e a d _ i n f o t s k
restore_ u s e r _ r e g s f a s t = 0 , o f f s e t = 0
UNWIND( . f n e n d )
ENDPROC( _ _ f i q _ u s r )
2005-04-16 15:20:36 -07:00
/ *
* Register s w i t c h f o r A R M v3 a n d A R M v4 p r o c e s s o r s
* r0 = p r e v i o u s t a s k _ s t r u c t , r1 = p r e v i o u s t h r e a d _ i n f o , r2 = n e x t t h r e a d _ i n f o
* previous a n d n e x t a r e g u a r a n t e e d n o t t o b e t h e s a m e .
* /
ENTRY( _ _ s w i t c h _ t o )
2009-02-16 11:42:09 +01:00
UNWIND( . f n s t a r t )
UNWIND( . c a n t u n w i n d )
2005-04-16 15:20:36 -07:00
add i p , r1 , #T I _ C P U _ S A V E
2009-07-24 12:32:54 +01:00
ARM( s t m i a i p ! , { r4 - s l , f p , s p , l r } ) @ Store most regs on stack
THUMB( s t m i a i p ! , { r4 - s l , f p } ) @ Store most regs on stack
THUMB( s t r s p , [ i p ] , #4 )
THUMB( s t r l r , [ i p ] , #4 )
2013-06-18 23:23:26 +01:00
ldr r4 , [ r2 , #T I _ T P _ V A L U E ]
ldr r5 , [ r2 , #T I _ T P _ V A L U E + 4 ]
2010-09-13 16:03:21 +01:00
# ifdef C O N F I G _ C P U _ U S E _ D O M A I N S
2006-06-21 13:31:52 +01:00
ldr r6 , [ r2 , #T I _ C P U _ D O M A I N ]
2006-01-13 21:05:25 +00:00
# endif
2013-06-18 23:23:26 +01:00
switch_ t l s r1 , r4 , r5 , r3 , r7
2010-06-07 21:50:33 -04:00
# if d e f i n e d ( C O N F I G _ C C _ S T A C K P R O T E C T O R ) & & ! d e f i n e d ( C O N F I G _ S M P )
ldr r7 , [ r2 , #T I _ T A S K ]
ldr r8 , =__stack_chk_guard
ldr r7 , [ r7 , #T S K _ S T A C K _ C A N A R Y ]
# endif
2010-09-13 16:03:21 +01:00
# ifdef C O N F I G _ C P U _ U S E _ D O M A I N S
2005-04-16 15:20:36 -07:00
mcr p15 , 0 , r6 , c3 , c0 , 0 @ Set domain register
# endif
2006-06-21 13:31:52 +01:00
mov r5 , r0
add r4 , r2 , #T I _ C P U _ S A V E
ldr r0 , =thread_notify_head
mov r1 , #T H R E A D _ N O T I F Y _ S W I T C H
bl a t o m i c _ n o t i f i e r _ c a l l _ c h a i n
2010-06-07 21:50:33 -04:00
# if d e f i n e d ( C O N F I G _ C C _ S T A C K P R O T E C T O R ) & & ! d e f i n e d ( C O N F I G _ S M P )
str r7 , [ r8 ]
# endif
2009-07-24 12:32:54 +01:00
THUMB( m o v i p , r4 )
2006-06-21 13:31:52 +01:00
mov r0 , r5
2009-07-24 12:32:54 +01:00
ARM( l d m i a r4 , { r4 - s l , f p , s p , p c } ) @ Load all regs saved previously
THUMB( l d m i a i p ! , { r4 - s l , f p } ) @ Load all regs saved previously
THUMB( l d r s p , [ i p ] , #4 )
THUMB( l d r p c , [ i p ] )
2009-02-16 11:42:09 +01:00
UNWIND( . f n e n d )
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ s w i t c h _ t o )
2005-04-16 15:20:36 -07:00
_ _ INIT
2005-04-29 22:08:33 +01:00
/ *
* User h e l p e r s .
*
* Each s e g m e n t i s 3 2 - b y t e a l i g n e d a n d w i l l b e m o v e d t o t h e t o p o f t h e h i g h
* vector p a g e . N e w s e g m e n t s ( i f e v e r n e e d e d ) m u s t b e a d d e d i n f r o n t o f
* existing o n e s . T h i s m e c h a n i s m s h o u l d b e u s e d o n l y f o r t h i n g s t h a t a r e
* really s m a l l a n d j u s t i f i e d , a n d n o t b e a b u s e d f r e e l y .
*
2011-06-19 23:36:03 -04:00
* See D o c u m e n t a t i o n / a r m / k e r n e l _ u s e r _ h e l p e r s . t x t f o r f o r m a l d e f i n i t i o n s .
2005-04-29 22:08:33 +01:00
* /
2009-07-24 12:32:54 +01:00
THUMB( . a r m )
2005-04-29 22:08:33 +01:00
2006-08-18 17:20:15 +01:00
.macro usr_ r e t , r e g
# ifdef C O N F I G _ A R M _ T H U M B
bx \ r e g
# else
2014-06-30 16:29:12 +01:00
ret \ r e g
2006-08-18 17:20:15 +01:00
# endif
.endm
2013-07-04 11:32:04 +01:00
.macro kuser_ p a d , s y m , s i z e
.if ( . - \ sym) & 3
.rept 4 - ( . - \ sym) & 3
.byte 0
.endr
.endif
.rept ( \ size - ( . - \ s y m ) ) / 4
.word 0xe7fddef1
.endr
.endm
2013-07-23 18:37:00 +01:00
# ifdef C O N F I G _ K U S E R _ H E L P E R S
2005-04-29 22:08:33 +01:00
.align 5
.globl __kuser_helper_start
__kuser_helper_start :
2005-12-19 22:20:51 +00:00
/ *
2011-06-19 23:36:03 -04:00
* Due t o t h e l e n g t h o f s o m e s e q u e n c e s , _ _ k u s e r _ c m p x c h g 6 4 s p a n s 2 r e g u l a r
* kuser " s l o t s " , t h e r e f o r e 0 x f f f f0 f80 i s n o t u s e d a s a v a l i d e n t r y p o i n t .
2005-12-19 22:20:51 +00:00
* /
2011-06-19 23:36:03 -04:00
__kuser_cmpxchg64 : @ 0xffff0f60
# if d e f i n e d ( C O N F I G _ N E E D S _ S Y S C A L L _ F O R _ C M P X C H G )
/ *
* Poor y o u . N o f a s t s o l u t i o n p o s s i b l e . . .
* The k e r n e l i t s e l f m u s t p e r f o r m t h e o p e r a t i o n .
* A s p e c i a l g h o s t s y s c a l l i s u s e d f o r t h a t ( s e e t r a p s . c ) .
* /
stmfd s p ! , { r7 , l r }
ldr r7 , 1 f @ it's 20 bits
swi _ _ A R M _ N R _ c m p x c h g 6 4
ldmfd s p ! , { r7 , p c }
1 : .word _ _ A R M _ N R _ c m p x c h g 64
# elif d e f i n e d ( C O N F I G _ C P U _ 3 2 v6 K )
stmfd s p ! , { r4 , r5 , r6 , r7 }
ldrd r4 , r5 , [ r0 ] @ load old val
ldrd r6 , r7 , [ r1 ] @ load new val
smp_ d m b a r m
1 : ldrexd r0 , r1 , [ r2 ] @ load current val
eors r3 , r0 , r4 @ compare with oldval (1)
eoreqs r3 , r1 , r5 @ compare with oldval (2)
strexdeq r3 , r6 , r7 , [ r2 ] @ store newval if eq
teqeq r3 , #1 @ success?
beq 1 b @ if no then retry
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
smp_ d m b a r m
2011-06-19 23:36:03 -04:00
rsbs r0 , r3 , #0 @ set returned val and C flag
ldmfd s p ! , { r4 , r5 , r6 , r7 }
2012-02-03 11:08:05 +01:00
usr_ r e t l r
2011-06-19 23:36:03 -04:00
# elif ! d e f i n e d ( C O N F I G _ S M P )
# ifdef C O N F I G _ M M U
/ *
* The o n l y t h i n g t h a t c a n b r e a k a t o m i c i t y i n t h i s c m p x c h g 6 4
* implementation i s e i t h e r a n I R Q o r a d a t a a b o r t e x c e p t i o n
* causing a n o t h e r p r o c e s s / t h r e a d t o b e s c h e d u l e d i n t h e m i d d l e o f
* the c r i t i c a l s e q u e n c e . T h e s a m e s t r a t e g y a s f o r c m p x c h g i s u s e d .
* /
stmfd s p ! , { r4 , r5 , r6 , l r }
ldmia r0 , { r4 , r5 } @ load old val
ldmia r1 , { r6 , l r } @ load new val
1 : ldmia r2 , { r0 , r1 } @ load current val
eors r3 , r0 , r4 @ compare with oldval (1)
eoreqs r3 , r1 , r5 @ compare with oldval (2)
2 : stmeqia r2 , { r6 , l r } @ store newval if eq
rsbs r0 , r3 , #0 @ set return val and C flag
ldmfd s p ! , { r4 , r5 , r6 , p c }
.text
kuser_cmpxchg64_fixup :
@ Called from kuser_cmpxchg_fixup.
2011-07-22 23:09:07 +01:00
@ r4 = address of interrupted insn (must be preserved).
2011-06-19 23:36:03 -04:00
@ sp = saved regs. r7 and r8 are clobbered.
@ 1b = first critical insn, 2b = last critical insn.
2011-07-22 23:09:07 +01:00
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
2011-06-19 23:36:03 -04:00
mov r7 , #0xffff0fff
sub r7 , r7 , #( 0xffff0fff - ( 0 x f f f f0 f60 + ( 1 b - _ _ k u s e r _ c m p x c h g 6 4 ) ) )
2011-07-22 23:09:07 +01:00
subs r8 , r4 , r7
2011-06-19 23:36:03 -04:00
rsbcss r8 , r8 , #( 2 b - 1 b )
strcs r7 , [ s p , #S _ P C ]
# if _ _ L I N U X _ A R M _ A R C H _ _ < 6
bcc k u s e r _ c m p x c h g 3 2 _ f i x u p
# endif
2014-06-30 16:29:12 +01:00
ret l r
2011-06-19 23:36:03 -04:00
.previous
# else
# warning " N P T L o n n o n M M U n e e d s f i x i n g "
mov r0 , #- 1
adds r0 , r0 , #0
2006-08-18 17:20:15 +01:00
usr_ r e t l r
2011-06-19 23:36:03 -04:00
# endif
# else
# error " i n c o h e r e n t k e r n e l c o n f i g u r a t i o n "
# endif
2013-07-04 11:32:04 +01:00
kuser_ p a d _ _ k u s e r _ c m p x c h g 6 4 , 6 4
2005-12-19 22:20:51 +00:00
__kuser_memory_barrier : @ 0xffff0fa0
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
smp_ d m b a r m
2006-08-18 17:20:15 +01:00
usr_ r e t l r
2005-12-19 22:20:51 +00:00
2013-07-04 11:32:04 +01:00
kuser_ p a d _ _ k u s e r _ m e m o r y _ b a r r i e r , 3 2
2005-04-29 22:08:33 +01:00
__kuser_cmpxchg : @ 0xffff0fc0
2005-06-08 19:00:47 +01:00
# if d e f i n e d ( C O N F I G _ N E E D S _ S Y S C A L L _ F O R _ C M P X C H G )
2005-04-29 22:08:33 +01:00
2005-06-08 19:00:47 +01:00
/ *
* Poor y o u . N o f a s t s o l u t i o n p o s s i b l e . . .
* The k e r n e l i t s e l f m u s t p e r f o r m t h e o p e r a t i o n .
* A s p e c i a l g h o s t s y s c a l l i s u s e d f o r t h a t ( s e e t r a p s . c ) .
* /
2006-01-18 22:38:49 +00:00
stmfd s p ! , { r7 , l r }
2010-12-01 18:12:43 +01:00
ldr r7 , 1 f @ it's 20 bits
2009-11-09 23:53:29 +00:00
swi _ _ A R M _ N R _ c m p x c h g
2006-01-18 22:38:49 +00:00
ldmfd s p ! , { r7 , p c }
2009-11-09 23:53:29 +00:00
1 : .word _ _ A R M _ N R _ c m p x c h g
2005-06-08 19:00:47 +01:00
# elif _ _ L I N U X _ A R M _ A R C H _ _ < 6
2005-04-29 22:08:33 +01:00
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
# ifdef C O N F I G _ M M U
2005-04-29 22:08:33 +01:00
/ *
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
* The o n l y t h i n g t h a t c a n b r e a k a t o m i c i t y i n t h i s c m p x c h g
* implementation i s e i t h e r a n I R Q o r a d a t a a b o r t e x c e p t i o n
* causing a n o t h e r p r o c e s s / t h r e a d t o b e s c h e d u l e d i n t h e m i d d l e
* of t h e c r i t i c a l s e q u e n c e . T o p r e v e n t t h i s , c o d e i s a d d e d t o
* the I R Q a n d d a t a a b o r t e x c e p t i o n h a n d l e r s t o s e t t h e p c b a c k
* to t h e b e g i n n i n g o f t h e c r i t i c a l s e c t i o n i f i t i s f o u n d t o b e
* within t h a t c r i t i c a l s e c t i o n ( s e e k u s e r _ c m p x c h g _ f i x u p ) .
2005-04-29 22:08:33 +01:00
* /
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
1 : ldr r3 , [ r2 ] @ load current val
subs r3 , r3 , r0 @ compare with oldval
2 : streq r1 , [ r2 ] @ store newval if eq
rsbs r0 , r3 , #0 @ set return val and C flag
usr_ r e t l r
.text
2011-06-19 23:36:03 -04:00
kuser_cmpxchg32_fixup :
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
@ Called from kuser_cmpxchg_check macro.
2011-06-25 15:44:20 +01:00
@ r4 = address of interrupted insn (must be preserved).
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
@ sp = saved regs. r7 and r8 are clobbered.
@ 1b = first critical insn, 2b = last critical insn.
2011-06-25 15:44:20 +01:00
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
mov r7 , #0xffff0fff
sub r7 , r7 , #( 0xffff0fff - ( 0 x f f f f0 f c0 + ( 1 b - _ _ k u s e r _ c m p x c h g ) ) )
2011-06-25 15:44:20 +01:00
subs r8 , r4 , r7
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
rsbcss r8 , r8 , #( 2 b - 1 b )
strcs r7 , [ s p , #S _ P C ]
2014-06-30 16:29:12 +01:00
ret l r
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
.previous
2006-02-08 21:19:37 +00:00
# else
# warning " N P T L o n n o n M M U n e e d s f i x i n g "
mov r0 , #- 1
adds r0 , r0 , #0
2006-08-18 17:20:15 +01:00
usr_ r e t l r
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
# endif
2005-04-29 22:08:33 +01:00
# else
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
smp_ d m b a r m
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
1 : ldrex r3 , [ r2 ]
2005-04-29 22:08:33 +01:00
subs r3 , r3 , r0
strexeq r3 , r1 , [ r2 ]
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
teqeq r3 , #1
beq 1 b
2005-04-29 22:08:33 +01:00
rsbs r0 , r3 , #0
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 17:20:29 +01:00
/* beware -- each __kuser slot must be 8 instructions max */
2010-09-04 10:47:48 +01:00
ALT_ S M P ( b _ _ k u s e r _ m e m o r y _ b a r r i e r )
ALT_ U P ( u s r _ r e t l r )
2005-04-29 22:08:33 +01:00
# endif
2013-07-04 11:32:04 +01:00
kuser_ p a d _ _ k u s e r _ c m p x c h g , 3 2
2005-04-29 22:08:33 +01:00
__kuser_get_tls : @ 0xffff0fe0
2010-07-05 14:53:10 +01:00
ldr r0 , [ p c , #( 16 - 8 ) ] @ read TLS, set in kuser_get_tls_init
2006-08-18 17:20:15 +01:00
usr_ r e t l r
2010-07-05 14:53:10 +01:00
mrc p15 , 0 , r0 , c13 , c0 , 3 @ 0xffff0fe8 hardware TLS code
2013-07-04 11:32:04 +01:00
kuser_ p a d _ _ k u s e r _ g e t _ t l s , 1 6
.rep 3
2010-07-05 14:53:10 +01:00
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
2005-04-29 22:08:33 +01:00
__kuser_helper_version : @ 0xffff0ffc
.word ( ( _ _ kuser_ h e l p e r _ e n d - _ _ k u s e r _ h e l p e r _ s t a r t ) > > 5 )
.globl __kuser_helper_end
__kuser_helper_end :
2013-07-23 18:37:00 +01:00
# endif
2009-07-24 12:32:54 +01:00
THUMB( . t h u m b )
2005-04-29 22:08:33 +01:00
2005-04-16 15:20:36 -07:00
/ *
* Vector s t u b s .
*
2013-07-04 11:40:32 +01:00
* This c o d e i s c o p i e d t o 0 x f f f f10 0 0 s o w e c a n u s e b r a n c h e s i n t h e
* vectors, r a t h e r t h a n l d r ' s . N o t e t h a t t h i s c o d e m u s t n o t e x c e e d
* a p a g e s i z e .
2005-04-16 15:20:36 -07:00
*
* Common s t u b e n t r y m a c r o :
* Enter i n I R Q m o d e , s p s r = S V C / U S R C P S R , l r = S V C / U S R P C
2005-05-31 22:22:32 +01:00
*
* SP p o i n t s t o a m i n i m a l a m o u n t o f p r o c e s s o r - p r i v a t e m e m o r y , t h e a d d r e s s
* of w h i c h i s c o p i e d i n t o r0 f o r t h e m o d e s p e c i f i c a b o r t h a n d l e r .
2005-04-16 15:20:36 -07:00
* /
2005-11-06 14:42:37 +00:00
.macro vector_ s t u b , n a m e , m o d e , c o r r e c t i o n =0
2005-04-16 15:20:36 -07:00
.align 5
vector_ \ n a m e :
.if \ correction
sub l r , l r , #\ c o r r e c t i o n
.endif
2005-05-31 22:22:32 +01:00
@
@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
@ (parent CPSR)
@
stmia s p , { r0 , l r } @ save r0, lr
2005-04-16 15:20:36 -07:00
mrs l r , s p s r
2005-05-31 22:22:32 +01:00
str l r , [ s p , #8 ] @ save spsr
2005-04-16 15:20:36 -07:00
@
2005-05-31 22:22:32 +01:00
@ Prepare for SVC32 mode. IRQs remain disabled.
2005-04-16 15:20:36 -07:00
@
2005-05-31 22:22:32 +01:00
mrs r0 , c p s r
2009-07-24 12:32:54 +01:00
eor r0 , r0 , #( \ m o d e ^ S V C _ M O D E | P S R _ I S E T S T A T E )
2005-05-31 22:22:32 +01:00
msr s p s r _ c x s f , r0
2005-04-16 15:20:36 -07:00
2005-05-31 22:22:32 +01:00
@
@ the branch table must immediately follow this code
@
and l r , l r , #0x0f
2009-07-24 12:32:54 +01:00
THUMB( a d r r0 , 1 f )
THUMB( l d r l r , [ r0 , l r , l s l #2 ] )
2005-11-06 14:42:37 +00:00
mov r0 , s p
2009-07-24 12:32:54 +01:00
ARM( l d r l r , [ p c , l r , l s l #2 ] )
2005-05-31 22:22:32 +01:00
movs p c , l r @ branch to handler in SVC mode
2008-08-28 11:22:32 +01:00
ENDPROC( v e c t o r _ \ n a m e )
2009-07-24 12:32:52 +01:00
.align 2
@ handler addresses follow this label
1 :
2005-04-16 15:20:36 -07:00
.endm
2013-07-04 12:03:31 +01:00
.section .stubs , " ax" , % p r o g b i t s
2005-04-16 15:20:36 -07:00
__stubs_start :
2013-07-04 11:40:32 +01:00
@ This must be the first word
.word vector_swi
vector_rst :
ARM( s w i S Y S _ E R R O R 0 )
THUMB( s v c #0 )
THUMB( n o p )
b v e c t o r _ u n d
2005-04-16 15:20:36 -07:00
/ *
* Interrupt d i s p a t c h e r
* /
2005-11-06 14:42:37 +00:00
vector_ s t u b i r q , I R Q _ M O D E , 4
2005-04-16 15:20:36 -07:00
.long __irq_usr @ 0 (USR_26 / USR_32)
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
.long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
.long __irq_svc @ 3 (SVC_26 / SVC_32)
.long __irq_invalid @ 4
.long __irq_invalid @ 5
.long __irq_invalid @ 6
.long __irq_invalid @ 7
.long __irq_invalid @ 8
.long __irq_invalid @ 9
.long __irq_invalid @ a
.long __irq_invalid @ b
.long __irq_invalid @ c
.long __irq_invalid @ d
.long __irq_invalid @ e
.long __irq_invalid @ f
/ *
* Data a b o r t d i s p a t c h e r
* Enter i n A B T m o d e , s p s r = U S R C P S R , l r = U S R P C
* /
2005-11-06 14:42:37 +00:00
vector_ s t u b d a b t , A B T _ M O D E , 8
2005-04-16 15:20:36 -07:00
.long __dabt_usr @ 0 (USR_26 / USR_32)
.long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
.long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
.long __dabt_svc @ 3 (SVC_26 / SVC_32)
.long __dabt_invalid @ 4
.long __dabt_invalid @ 5
.long __dabt_invalid @ 6
.long __dabt_invalid @ 7
.long __dabt_invalid @ 8
.long __dabt_invalid @ 9
.long __dabt_invalid @ a
.long __dabt_invalid @ b
.long __dabt_invalid @ c
.long __dabt_invalid @ d
.long __dabt_invalid @ e
.long __dabt_invalid @ f
/ *
* Prefetch a b o r t d i s p a t c h e r
* Enter i n A B T m o d e , s p s r = U S R C P S R , l r = U S R P C
* /
2005-11-06 14:42:37 +00:00
vector_ s t u b p a b t , A B T _ M O D E , 4
2005-04-16 15:20:36 -07:00
.long __pabt_usr @ 0 (USR_26 / USR_32)
.long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
.long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
.long __pabt_svc @ 3 (SVC_26 / SVC_32)
.long __pabt_invalid @ 4
.long __pabt_invalid @ 5
.long __pabt_invalid @ 6
.long __pabt_invalid @ 7
.long __pabt_invalid @ 8
.long __pabt_invalid @ 9
.long __pabt_invalid @ a
.long __pabt_invalid @ b
.long __pabt_invalid @ c
.long __pabt_invalid @ d
.long __pabt_invalid @ e
.long __pabt_invalid @ f
/ *
* Undef i n s t r e n t r y d i s p a t c h e r
* Enter i n U N D m o d e , s p s r = S V C / U S R C P S R , l r = S V C / U S R P C
* /
2005-11-06 14:42:37 +00:00
vector_ s t u b u n d , U N D _ M O D E
2005-04-16 15:20:36 -07:00
.long __und_usr @ 0 (USR_26 / USR_32)
.long __und_invalid @ 1 (FIQ_26 / FIQ_32)
.long __und_invalid @ 2 (IRQ_26 / IRQ_32)
.long __und_svc @ 3 (SVC_26 / SVC_32)
.long __und_invalid @ 4
.long __und_invalid @ 5
.long __und_invalid @ 6
.long __und_invalid @ 7
.long __und_invalid @ 8
.long __und_invalid @ 9
.long __und_invalid @ a
.long __und_invalid @ b
.long __und_invalid @ c
.long __und_invalid @ d
.long __und_invalid @ e
.long __und_invalid @ f
.align 5
2013-07-04 11:40:32 +01:00
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* Address e x c e p t i o n h a n d l e r
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* These a r e n ' t t o o c r i t i c a l .
* ( they' r e n o t s u p p o s e d t o h a p p e n , a n d w o n ' t h a p p e n i n 3 2 - b i t d a t a m o d e ) .
* /
vector_addrexcptn :
b v e c t o r _ a d d r e x c p t n
2005-04-16 15:20:36 -07:00
/ * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
2014-09-17 17:12:06 +01:00
* FIQ " N M I " h a n d l e r
2005-04-16 15:20:36 -07:00
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2014-09-17 17:12:06 +01:00
* Handle a F I Q u s i n g t h e S V C s t a c k a l l o w i n g F I Q a c t l i k e N M I o n x86
* systems.
2005-04-16 15:20:36 -07:00
* /
2014-09-17 17:12:06 +01:00
vector_ s t u b f i q , F I Q _ M O D E , 4
.long __fiq_usr @ 0 (USR_26 / USR_32)
.long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
.long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
.long __fiq_svc @ 3 (SVC_26 / SVC_32)
.long __fiq_svc @ 4
.long __fiq_svc @ 5
.long __fiq_svc @ 6
.long __fiq_abt @ 7
.long __fiq_svc @ 8
.long __fiq_svc @ 9
.long __fiq_svc @ a
.long __fiq_svc @ b
.long __fiq_svc @ c
.long __fiq_svc @ d
.long __fiq_svc @ e
.long __fiq_svc @ f
2005-04-16 15:20:36 -07:00
2013-07-09 01:03:17 +01:00
.globl vector_fiq_offset
.equ vector_ f i q _ o f f s e t , v e c t o r _ f i q
2013-07-04 12:03:31 +01:00
.section .vectors , " ax" , % p r o g b i t s
2005-04-26 15:17:42 +01:00
__vectors_start :
2013-07-04 12:03:31 +01:00
W( b ) v e c t o r _ r s t
W( b ) v e c t o r _ u n d
W( l d r ) p c , _ _ v e c t o r s _ s t a r t + 0 x10 0 0
W( b ) v e c t o r _ p a b t
W( b ) v e c t o r _ d a b t
W( b ) v e c t o r _ a d d r e x c p t n
W( b ) v e c t o r _ i r q
W( b ) v e c t o r _ f i q
2005-04-16 15:20:36 -07:00
.data
.globl cr_alignment
cr_alignment :
.space 4
2010-12-13 09:42:34 +01:00
# ifdef C O N F I G _ M U L T I _ I R Q _ H A N D L E R
.globl handle_arch_irq
handle_arch_irq :
.space 4
# endif