2005-10-10 16:36:14 +04:00
/ *
* PowerPC v e r s i o n
* Copyright ( C ) 1 9 9 5 - 1 9 9 6 G a r y T h o m a s ( g d t @linuxppc.org)
* Rewritten b y C o r t D o u g a n ( c o r t @fsmlabs.com) for PReP
* Copyright ( C ) 1 9 9 6 C o r t D o u g a n < c o r t @fsmlabs.com>
* Adapted f o r P o w e r M a c i n t o s h b y P a u l M a c k e r r a s .
* Low- l e v e l e x c e p t i o n h a n d l e r s a n d M M U s u p p o r t
* rewritten b y P a u l M a c k e r r a s .
* Copyright ( C ) 1 9 9 6 P a u l M a c k e r r a s .
* MPC8 x x m o d i f i c a t i o n s C o p y r i g h t ( C ) 1 9 9 7 D a n M a l e k ( d m a l e k @jlc.net).
*
* This f i l e c o n t a i n s t h e s y s t e m c a l l e n t r y c o d e , c o n t e x t s w i t c h
* code, a n d e x c e p t i o n / i n t e r r u p t r e t u r n c o d e f o r P o w e r P C .
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or
* modify i t u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e
* as p u b l i s h e d b y t h e F r e e S o f t w a r e F o u n d a t i o n ; either version
* 2 of t h e L i c e n s e , o r ( a t y o u r o p t i o n ) a n y l a t e r v e r s i o n .
*
* /
# include < l i n u x / e r r n o . h >
powerpc/kernel: Switch to using MAX_ERRNO
Currently on powerpc we have our own #define for the highest (negative)
errno value, called _LAST_ERRNO. This is defined to be 516, for reasons
which are not clear.
The generic code, and x86, use MAX_ERRNO, which is defined to be 4095.
In particular seccomp uses MAX_ERRNO to restrict the value that a
seccomp filter can return.
Currently with the mismatch between _LAST_ERRNO and MAX_ERRNO, a seccomp
tracer wanting to return 600, expecting it to be seen as an error, would
instead find on powerpc that userspace sees a successful syscall with a
return value of 600.
To avoid this inconsistency, switch powerpc to use MAX_ERRNO.
We are somewhat confident that generic syscalls that can return a
non-error value above negative MAX_ERRNO have already been updated to
use force_successful_syscall_return().
I have also checked all the powerpc specific syscalls, and believe that
none of them expect to return a non-error value between -MAX_ERRNO and
-516. So this change should be safe ...
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Reviewed-by: Kees Cook <keescook@chromium.org>
2015-07-23 13:21:01 +03:00
# include < l i n u x / e r r . h >
2005-10-10 16:36:14 +04:00
# include < l i n u x / s y s . h >
# include < l i n u x / t h r e a d s . h >
# include < a s m / r e g . h >
# include < a s m / p a g e . h >
# include < a s m / m m u . h >
# include < a s m / c p u t a b l e . h >
# include < a s m / t h r e a d _ i n f o . h >
# include < a s m / p p c _ a s m . h >
# include < a s m / a s m - o f f s e t s . h >
# include < a s m / u n i s t d . h >
2008-06-21 22:17:27 +04:00
# include < a s m / f t r a c e . h >
2010-11-18 18:06:17 +03:00
# include < a s m / p t r a c e . h >
2016-01-14 07:33:46 +03:00
# include < a s m / e x p o r t . h >
2005-10-10 16:36:14 +04:00
/ *
* MSR_ K E R N E L i s > 0 x10 0 0 0 o n 4 x x / B o o k - E s i n c e i t i n c l u d e M S R _ C E .
* /
# if M S R _ K E R N E L > = 0 x10 0 0 0
# define L O A D _ M S R _ K E R N E L ( r , x ) l i s r ,( x ) @h; ori r,r,(x)@l
# else
# define L O A D _ M S R _ K E R N E L ( r , x ) l i r ,( x )
# endif
# ifdef C O N F I G _ B O O K E
.globl mcheck_transfer_to_handler
mcheck_transfer_to_handler :
2008-04-30 14:23:21 +04:00
mfspr r0 ,S P R N _ D S R R 0
stw r0 ,_ D S R R 0 ( r11 )
mfspr r0 ,S P R N _ D S R R 1
stw r0 ,_ D S R R 1 ( r11 )
/* fall through */
2005-10-10 16:36:14 +04:00
.globl debug_transfer_to_handler
debug_transfer_to_handler :
2008-04-30 14:23:21 +04:00
mfspr r0 ,S P R N _ C S R R 0
stw r0 ,_ C S R R 0 ( r11 )
mfspr r0 ,S P R N _ C S R R 1
stw r0 ,_ C S R R 1 ( r11 )
/* fall through */
2005-10-10 16:36:14 +04:00
.globl crit_transfer_to_handler
crit_transfer_to_handler :
2009-02-13 01:12:40 +03:00
# ifdef C O N F I G _ P P C _ B O O K 3 E _ M M U
2008-04-30 14:23:21 +04:00
mfspr r0 ,S P R N _ M A S 0
stw r0 ,M A S 0 ( r11 )
mfspr r0 ,S P R N _ M A S 1
stw r0 ,M A S 1 ( r11 )
mfspr r0 ,S P R N _ M A S 2
stw r0 ,M A S 2 ( r11 )
mfspr r0 ,S P R N _ M A S 3
stw r0 ,M A S 3 ( r11 )
mfspr r0 ,S P R N _ M A S 6
stw r0 ,M A S 6 ( r11 )
# ifdef C O N F I G _ P H Y S _ 6 4 B I T
mfspr r0 ,S P R N _ M A S 7
stw r0 ,M A S 7 ( r11 )
# endif / * C O N F I G _ P H Y S _ 6 4 B I T * /
2009-02-13 01:12:40 +03:00
# endif / * C O N F I G _ P P C _ B O O K 3 E _ M M U * /
2008-04-30 14:23:21 +04:00
# ifdef C O N F I G _ 4 4 x
mfspr r0 ,S P R N _ M M U C R
stw r0 ,M M U C R ( r11 )
# endif
mfspr r0 ,S P R N _ S R R 0
stw r0 ,_ S R R 0 ( r11 )
mfspr r0 ,S P R N _ S R R 1
stw r0 ,_ S R R 1 ( r11 )
2012-07-16 13:06:48 +04:00
/ * set t h e s t a c k l i m i t t o t h e c u r r e n t s t a c k
* and s e t t h e l i m i t t o p r o t e c t t h e t h r e a d _ i n f o
* struct
* /
2009-07-15 00:52:54 +04:00
mfspr r8 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lwz r0 ,K S P _ L I M I T ( r8 )
stw r0 ,S A V E D _ K S P _ L I M I T ( r11 )
2012-07-16 13:06:48 +04:00
rlwimi r0 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T )
2008-04-30 14:23:21 +04:00
stw r0 ,K S P _ L I M I T ( r8 )
2005-10-10 16:36:14 +04:00
/* fall through */
# endif
# ifdef C O N F I G _ 4 0 x
.globl crit_transfer_to_handler
crit_transfer_to_handler :
lwz r0 ,c r i t _ r10 @l(0)
stw r0 ,G P R 1 0 ( r11 )
lwz r0 ,c r i t _ r11 @l(0)
stw r0 ,G P R 1 1 ( r11 )
2008-04-30 14:23:21 +04:00
mfspr r0 ,S P R N _ S R R 0
stw r0 ,c r i t _ s r r0 @l(0)
mfspr r0 ,S P R N _ S R R 1
stw r0 ,c r i t _ s r r1 @l(0)
2012-07-16 13:06:48 +04:00
/ * set t h e s t a c k l i m i t t o t h e c u r r e n t s t a c k
* and s e t t h e l i m i t t o p r o t e c t t h e t h r e a d _ i n f o
* struct
* /
2009-07-15 00:52:54 +04:00
mfspr r8 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lwz r0 ,K S P _ L I M I T ( r8 )
stw r0 ,s a v e d _ k s p _ l i m i t @l(0)
2012-07-16 13:06:48 +04:00
rlwimi r0 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T )
2008-04-30 14:23:21 +04:00
stw r0 ,K S P _ L I M I T ( r8 )
2005-10-10 16:36:14 +04:00
/* fall through */
# endif
/ *
* This c o d e f i n i s h e s s a v i n g t h e r e g i s t e r s t o t h e e x c e p t i o n f r a m e
* and j u m p s t o t h e a p p r o p r i a t e h a n d l e r f o r t h e e x c e p t i o n , t u r n i n g
* on a d d r e s s t r a n s l a t i o n .
* Note t h a t w e r e l y o n t h e c a l l e r h a v i n g s e t c r0 . e q i f f t h e e x c e p t i o n
* occurred i n k e r n e l m o d e ( i . e . M S R : P R = 0 ) .
* /
.globl transfer_to_handler_full
transfer_to_handler_full :
SAVE_ N V G P R S ( r11 )
/* fall through */
.globl transfer_to_handler
transfer_to_handler :
stw r2 ,G P R 2 ( r11 )
stw r12 ,_ N I P ( r11 )
stw r9 ,_ M S R ( r11 )
andi. r2 ,r9 ,M S R _ P R
mfctr r12
mfspr r2 ,S P R N _ X E R
stw r12 ,_ C T R ( r11 )
stw r2 ,_ X E R ( r11 )
2009-07-15 00:52:54 +04:00
mfspr r12 ,S P R N _ S P R G _ T H R E A D
2005-10-10 16:36:14 +04:00
addi r2 ,r12 ,- T H R E A D
tovirt( r2 ,r2 ) / * s e t r2 t o c u r r e n t * /
beq 2 f / * i f f r o m u s e r , f i x u p T H R E A D . r e g s * /
addi r11 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
stw r11 ,P T _ R E G S ( r12 )
# if d e f i n e d ( C O N F I G _ 4 0 x ) | | d e f i n e d ( C O N F I G _ B O O K E )
/ * Check t o s e e i f t h e d b c r0 r e g i s t e r i s s e t u p t o d e b u g . U s e t h e
2008-04-10 01:15:40 +04:00
internal d e b u g m o d e b i t t o d o t h i s . * /
2005-10-10 16:36:14 +04:00
lwz r12 ,T H R E A D _ D B C R 0 ( r12 )
2008-07-25 23:27:33 +04:00
andis. r12 ,r12 ,D B C R 0 _ I D M @h
2005-10-10 16:36:14 +04:00
beq+ 3 f
/* From user and task is ptraced - load up global dbcr0 */
li r12 ,- 1 / * c l e a r a l l p e n d i n g d e b u g e v e n t s * /
mtspr S P R N _ D B S R ,r12
lis r11 ,g l o b a l _ d b c r0 @ha
tophys( r11 ,r11 )
addi r11 ,r11 ,g l o b a l _ d b c r0 @l
2008-04-10 01:15:40 +04:00
# ifdef C O N F I G _ S M P
2012-07-05 08:41:35 +04:00
CURRENT_ T H R E A D _ I N F O ( r9 , r1 )
2008-04-10 01:15:40 +04:00
lwz r9 ,T I _ C P U ( r9 )
slwi r9 ,r9 ,3
add r11 ,r11 ,r9
# endif
2005-10-10 16:36:14 +04:00
lwz r12 ,0 ( r11 )
mtspr S P R N _ D B C R 0 ,r12
lwz r12 ,4 ( r11 )
addi r12 ,r12 ,- 1
stw r12 ,4 ( r11 )
# endif
2016-05-17 09:33:46 +03:00
# ifdef C O N F I G _ V I R T _ C P U _ A C C O U N T I N G _ N A T I V E
CURRENT_ T H R E A D _ I N F O ( r9 , r1 )
tophys( r9 , r9 )
ACCOUNT_ C P U _ U S E R _ E N T R Y ( r9 , r11 , r12 )
# endif
2005-10-10 16:36:14 +04:00
b 3 f
2006-04-18 15:49:11 +04:00
2005-10-10 16:36:14 +04:00
2 : / * if f r o m k e r n e l , c h e c k i n t e r r u p t e d D O Z E / N A P m o d e a n d
* check f o r s t a c k o v e r f l o w
* /
2008-04-28 10:21:22 +04:00
lwz r9 ,K S P _ L I M I T ( r12 )
cmplw r1 ,r9 / * i f r1 < = k s p _ l i m i t * /
2006-04-18 15:49:11 +04:00
ble- s t a c k _ o v f / * t h e n t h e k e r n e l s t a c k o v e r f l o w e d * /
5 :
2008-06-19 01:26:52 +04:00
# if d e f i n e d ( C O N F I G _ 6 x x ) | | d e f i n e d ( C O N F I G _ E 5 0 0 )
2012-07-05 08:41:35 +04:00
CURRENT_ T H R E A D _ I N F O ( r9 , r1 )
2006-04-18 15:49:11 +04:00
tophys( r9 ,r9 ) / * c h e c k l o c a l f l a g s * /
lwz r12 ,T I _ L O C A L _ F L A G S ( r9 )
mtcrf 0 x01 ,r12
bt- 3 1 - T L F _ N A P P I N G ,4 f
2008-05-14 08:30:48 +04:00
bt- 3 1 - T L F _ S L E E P I N G ,7 f
2008-06-19 01:26:52 +04:00
# endif / * C O N F I G _ 6 x x | | C O N F I G _ E 5 0 0 * /
2005-10-10 16:36:14 +04:00
.globl transfer_to_handler_cont
transfer_to_handler_cont :
3 :
mflr r9
lwz r11 ,0 ( r9 ) / * v i r t u a l a d d r e s s o f h a n d l e r * /
lwz r9 ,4 ( r9 ) / * w h e r e t o g o w h e n d o n e * /
2009-06-17 21:43:59 +04:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
lis r12 ,r e e n a b l e _ m m u @h
ori r12 ,r12 ,r e e n a b l e _ m m u @l
mtspr S P R N _ S R R 0 ,r12
mtspr S P R N _ S R R 1 ,r10
SYNC
RFI
reenable_mmu : /* re-enable mmu so we can */
mfmsr r10
lwz r12 ,_ M S R ( r1 )
xor r10 ,r10 ,r12
andi. r10 ,r10 ,M S R _ E E / * D i d E E c h a n g e ? * /
beq 1 f
2011-11-10 20:04:17 +04:00
/ *
* The t r a c e _ h a r d i r q s _ o f f w i l l u s e C A L L E R _ A D D R 0 a n d C A L L E R _ A D D R 1 .
* If f r o m u s e r m o d e t h e r e i s o n l y o n e s t a c k f r a m e o n t h e s t a c k , a n d
* accessing C A L L E R _ A D D R 1 w i l l c a u s e o o p s . S o w e n e e d c r e a t e a d u m m y
* stack f r a m e t o m a k e t r a c e _ h a r d i r q s _ o f f h a p p y .
2012-04-10 11:21:35 +04:00
*
* This i s h a n d y b e c a u s e w e a l s o n e e d t o s a v e a b u n c h o f G P R s ,
* r3 c a n b e d i f f e r e n t f r o m G P R 3 ( r1 ) a t t h i s p o i n t , r9 a n d r11
* contains t h e o l d M S R a n d h a n d l e r a d d r e s s r e s p e c t i v e l y ,
* r4 & r5 c a n c o n t a i n p a g e f a u l t a r g u m e n t s t h a t n e e d t o b e p a s s e d
* along a s w e l l . r12 , C C R , C T R , X E R e t c . . . a r e l e f t c l o b b e r e d a s
* they a r e n ' t u s e f u l p a s t t h i s p o i n t ( a r e n ' t s y s c a l l a r g u m e n t s ) ,
* the r e s t i s r e s t o r e d f r o m t h e e x c e p t i o n f r a m e .
2011-11-10 20:04:17 +04:00
* /
2012-04-10 11:21:35 +04:00
stwu r1 ,- 3 2 ( r1 )
stw r9 ,8 ( r1 )
stw r11 ,1 2 ( r1 )
stw r3 ,1 6 ( r1 )
stw r4 ,2 0 ( r1 )
stw r5 ,2 4 ( r1 )
2011-11-10 20:04:17 +04:00
bl t r a c e _ h a r d i r q s _ o f f
2012-04-10 11:21:35 +04:00
lwz r5 ,2 4 ( r1 )
lwz r4 ,2 0 ( r1 )
lwz r3 ,1 6 ( r1 )
lwz r11 ,1 2 ( r1 )
lwz r9 ,8 ( r1 )
addi r1 ,r1 ,3 2
2009-06-17 21:43:59 +04:00
lwz r0 ,G P R 0 ( r1 )
lwz r6 ,G P R 6 ( r1 )
lwz r7 ,G P R 7 ( r1 )
lwz r8 ,G P R 8 ( r1 )
1 : mtctr r11
mtlr r9
bctr / * j u m p t o h a n d l e r * /
# else / * C O N F I G _ T R A C E _ I R Q F L A G S * /
2005-10-10 16:36:14 +04:00
mtspr S P R N _ S R R 0 ,r11
mtspr S P R N _ S R R 1 ,r10
mtlr r9
SYNC
RFI / * j u m p t o h a n d l e r , e n a b l e M M U * /
2009-06-17 21:43:59 +04:00
# endif / * C O N F I G _ T R A C E _ I R Q F L A G S * /
2005-10-10 16:36:14 +04:00
2008-06-19 01:26:52 +04:00
# if d e f i n e d ( C O N F I G _ 6 x x ) | | d e f i n e d ( C O N F I G _ E 5 0 0 )
2006-04-18 15:49:11 +04:00
4 : rlwinm r12 ,r12 ,0 ,~ _ T L F _ N A P P I N G
stw r12 ,T I _ L O C A L _ F L A G S ( r9 )
2008-06-19 01:26:52 +04:00
b p o w e r _ s a v e _ p p c32 _ r e s t o r e
2008-05-14 08:30:48 +04:00
7 : rlwinm r12 ,r12 ,0 ,~ _ T L F _ S L E E P I N G
stw r12 ,T I _ L O C A L _ F L A G S ( r9 )
lwz r9 ,_ M S R ( r11 ) / * i f s l e e p i n g , c l e a r M S R . E E * /
rlwinm r9 ,r9 ,0 ,~ M S R _ E E
lwz r12 ,_ L I N K ( r11 ) / * a n d r e t u r n t o a d d r e s s i n L R * /
b f a s t _ e x c e p t i o n _ r e t u r n
2006-03-27 08:03:03 +04:00
# endif
2005-10-10 16:36:14 +04:00
/ *
* On k e r n e l s t a c k o v e r f l o w , l o a d u p a n i n i t i a l s t a c k p o i n t e r
* and c a l l S t a c k O v e r f l o w ( r e g s ) , w h i c h s h o u l d n o t r e t u r n .
* /
stack_ovf :
/* sometimes we use a statically-allocated stack, which is OK. */
2006-04-18 15:49:11 +04:00
lis r12 ,_ e n d @h
ori r12 ,r12 ,_ e n d @l
cmplw r1 ,r12
ble 5 b / * r1 < = & _ e n d i s O K * /
2005-10-10 16:36:14 +04:00
SAVE_ N V G P R S ( r11 )
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
lis r1 ,i n i t _ t h r e a d _ u n i o n @ha
addi r1 ,r1 ,i n i t _ t h r e a d _ u n i o n @l
addi r1 ,r1 ,T H R E A D _ S I Z E - S T A C K _ F R A M E _ O V E R H E A D
lis r9 ,S t a c k O v e r f l o w @ha
addi r9 ,r9 ,S t a c k O v e r f l o w @l
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L )
FIX_ S R R 1 ( r10 ,r12 )
mtspr S P R N _ S R R 0 ,r9
mtspr S P R N _ S R R 1 ,r10
SYNC
RFI
/ *
* Handle a s y s t e m c a l l .
* /
.stabs " arch/ p o w e r p c / k e r n e l / " ,N _ S O ,0 ,0 ,0 f
.stabs " entry_ 3 2 . S " ,N _ S O ,0 ,0 ,0 f
0 :
_ GLOBAL( D o S y s c a l l )
stw r3 ,O R I G _ G P R 3 ( r1 )
li r12 ,0
stw r12 ,R E S U L T ( r1 )
lwz r11 ,_ C C R ( r1 ) / * C l e a r S O b i t i n C R * /
rlwinm r11 ,r11 ,0 ,4 ,2
stw r11 ,_ C C R ( r1 )
2009-06-17 21:43:59 +04:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
/ * Return f r o m s y s c a l l s c a n ( a n d g e n e r a l l y w i l l ) h a r d e n a b l e
* interrupts. Y o u a r e n ' t s u p p o s e d t o c a l l a s y s c a l l w i t h
* interrupts d i s a b l e d i n t h e f i r s t p l a c e . H o w e v e r , t o e n s u r e
* that w e g e t i t r i g h t v s . l o c k d e p i f i t h a p p e n s , w e f o r c e
* that h a r d e n a b l e h e r e w i t h a p p r o p r i a t e t r a c i n g i f w e s e e
* that w e h a v e b e e n c a l l e d w i t h i n t e r r u p t s o f f
* /
mfmsr r11
andi. r12 ,r11 ,M S R _ E E
bne+ 1 f
/* We came in with interrupts disabled, we enable them now */
bl t r a c e _ h a r d i r q s _ o n
mfmsr r11
lwz r0 ,G P R 0 ( r1 )
lwz r3 ,G P R 3 ( r1 )
lwz r4 ,G P R 4 ( r1 )
ori r11 ,r11 ,M S R _ E E
lwz r5 ,G P R 5 ( r1 )
lwz r6 ,G P R 6 ( r1 )
lwz r7 ,G P R 7 ( r1 )
lwz r8 ,G P R 8 ( r1 )
mtmsr r11
1 :
# endif / * C O N F I G _ T R A C E _ I R Q F L A G S * /
2012-07-05 08:41:35 +04:00
CURRENT_ T H R E A D _ I N F O ( r10 , r1 )
2005-10-10 16:36:14 +04:00
lwz r11 ,T I _ F L A G S ( r10 )
2015-01-15 04:01:42 +03:00
andi. r11 ,r11 ,_ T I F _ S Y S C A L L _ D O T R A C E
2005-10-10 16:36:14 +04:00
bne- s y s c a l l _ d o t r a c e
syscall_dotrace_cont :
cmplwi 0 ,r0 ,N R _ s y s c a l l s
lis r10 ,s y s _ c a l l _ t a b l e @h
ori r10 ,r10 ,s y s _ c a l l _ t a b l e @l
slwi r0 ,r0 ,2
bge- 6 6 f
lwzx r10 ,r10 ,r0 / * F e t c h s y s t e m c a l l h a n d l e r [ p t r ] * /
mtlr r10
addi r9 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
PPC4 4 0 E P _ E R R 4 2
blrl / * C a l l h a n d l e r * /
.globl ret_from_syscall
ret_from_syscall :
mr r6 ,r3
2012-07-05 08:41:35 +04:00
CURRENT_ T H R E A D _ I N F O ( r12 , r1 )
2005-10-10 16:36:14 +04:00
/* disable interrupts so current_thread_info()->flags can't change */
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L ) / * d o e s n ' t i n c l u d e M S R _ E E * /
2009-06-17 21:43:59 +04:00
/* Note: We don't bother telling lockdep about it */
2005-10-10 16:36:14 +04:00
SYNC
MTMSRD( r10 )
lwz r9 ,T I _ F L A G S ( r12 )
powerpc/kernel: Switch to using MAX_ERRNO
Currently on powerpc we have our own #define for the highest (negative)
errno value, called _LAST_ERRNO. This is defined to be 516, for reasons
which are not clear.
The generic code, and x86, use MAX_ERRNO, which is defined to be 4095.
In particular seccomp uses MAX_ERRNO to restrict the value that a
seccomp filter can return.
Currently with the mismatch between _LAST_ERRNO and MAX_ERRNO, a seccomp
tracer wanting to return 600, expecting it to be seen as an error, would
instead find on powerpc that userspace sees a successful syscall with a
return value of 600.
To avoid this inconsistency, switch powerpc to use MAX_ERRNO.
We are somewhat confident that generic syscalls that can return a
non-error value above negative MAX_ERRNO have already been updated to
use force_successful_syscall_return().
I have also checked all the powerpc specific syscalls, and believe that
none of them expect to return a non-error value between -MAX_ERRNO and
-516. So this change should be safe ...
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Reviewed-by: Kees Cook <keescook@chromium.org>
2015-07-23 13:21:01 +03:00
li r8 ,- M A X _ E R R N O
2015-01-15 04:01:42 +03:00
andi. r0 ,r9 ,( _ T I F _ S Y S C A L L _ D O T R A C E | _ T I F _ S I N G L E S T E P | _ T I F _ U S E R _ W O R K _ M A S K | _ T I F _ P E R S Y S C A L L _ M A S K )
2005-10-10 16:36:14 +04:00
bne- s y s c a l l _ e x i t _ w o r k
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
cmplw 0 ,r3 ,r8
blt+ s y s c a l l _ e x i t _ c o n t
lwz r11 ,_ C C R ( r1 ) / * L o a d C R * /
neg r3 ,r3
oris r11 ,r11 ,0 x10 0 0 / * S e t S O b i t i n C R * /
stw r11 ,_ C C R ( r1 )
2005-10-10 16:36:14 +04:00
syscall_exit_cont :
2009-06-17 21:43:59 +04:00
lwz r8 ,_ M S R ( r1 )
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
/ * If w e a r e g o i n g t o r e t u r n f r o m t h e s y s c a l l w i t h i n t e r r u p t s
* off, w e t r a c e t h a t h e r e . I t s h o u l d n ' t h a p p e n t h o u g h b u t w e
* want t o c a t c h t h e b u g g e r i f i t d o e s r i g h t ?
* /
andi. r10 ,r8 ,M S R _ E E
bne+ 1 f
stw r3 ,G P R 3 ( r1 )
bl t r a c e _ h a r d i r q s _ o f f
lwz r3 ,G P R 3 ( r1 )
1 :
# endif / * C O N F I G _ T R A C E _ I R Q F L A G S * /
2005-10-10 16:36:14 +04:00
# if d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E )
2008-04-10 01:15:40 +04:00
/ * If t h e p r o c e s s h a s i t s o w n D B C R 0 v a l u e , l o a d i t u p . T h e i n t e r n a l
debug m o d e b i t t e l l s u s t h a t d b c r0 s h o u l d b e l o a d e d . * /
2005-10-10 16:36:14 +04:00
lwz r0 ,T H R E A D + T H R E A D _ D B C R 0 ( r2 )
2008-07-25 23:27:33 +04:00
andis. r10 ,r0 ,D B C R 0 _ I D M @h
2005-10-10 16:36:14 +04:00
bnel- l o a d _ d b c r0
# endif
2007-10-31 08:42:19 +03:00
# ifdef C O N F I G _ 4 4 x
2010-03-05 13:43:12 +03:00
BEGIN_ M M U _ F T R _ S E C T I O N
2007-10-31 08:42:19 +03:00
lis r4 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @ha
lwz r5 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @l(r4)
cmplwi c r0 ,r5 ,0
bne- 2 f
1 :
2010-03-05 13:43:12 +03:00
END_ M M U _ F T R _ S E C T I O N _ I F C L R ( M M U _ F T R _ T Y P E _ 4 7 x )
2007-10-31 08:42:19 +03:00
# endif / * C O N F I G _ 4 4 x * /
2007-11-10 01:17:49 +03:00
BEGIN_ F T R _ S E C T I O N
lwarx r7 ,0 ,r1
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ N E E D _ P A I R E D _ S T W C X )
2005-10-10 16:36:14 +04:00
stwcx. r0 ,0 ,r1 / * t o c l e a r t h e r e s e r v a t i o n * /
2016-05-17 09:33:46 +03:00
# ifdef C O N F I G _ V I R T _ C P U _ A C C O U N T I N G _ N A T I V E
andi. r4 ,r8 ,M S R _ P R
beq 3 f
CURRENT_ T H R E A D _ I N F O ( r4 , r1 )
ACCOUNT_ C P U _ U S E R _ E X I T ( r4 , r5 , r7 )
3 :
# endif
2005-10-10 16:36:14 +04:00
lwz r4 ,_ L I N K ( r1 )
lwz r5 ,_ C C R ( r1 )
mtlr r4
mtcr r5
lwz r7 ,_ N I P ( r1 )
FIX_ S R R 1 ( r8 , r0 )
lwz r2 ,G P R 2 ( r1 )
lwz r1 ,G P R 1 ( r1 )
mtspr S P R N _ S R R 0 ,r7
mtspr S P R N _ S R R 1 ,r8
SYNC
RFI
2007-10-31 08:42:19 +03:00
# ifdef C O N F I G _ 4 4 x
2 : li r7 ,0
iccci r0 ,r0
stw r7 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @l(r4)
b 1 b
# endif / * C O N F I G _ 4 4 x * /
2005-10-10 16:36:14 +04:00
66 : li r3 ,- E N O S Y S
b r e t _ f r o m _ s y s c a l l
.globl ret_from_fork
ret_from_fork :
REST_ N V G P R S ( r1 )
bl s c h e d u l e _ t a i l
li r3 ,0
b r e t _ f r o m _ s y s c a l l
2012-09-13 02:32:42 +04:00
.globl ret_from_kernel_thread
ret_from_kernel_thread :
REST_ N V G P R S ( r1 )
bl s c h e d u l e _ t a i l
mtlr r14
mr r3 ,r15
PPC4 4 0 E P _ E R R 4 2
blrl
li r3 ,0
2012-08-31 23:48:05 +04:00
b r e t _ f r o m _ s y s c a l l
2005-10-10 16:36:14 +04:00
/* Traced system call support */
syscall_dotrace :
SAVE_ N V G P R S ( r1 )
li r0 ,0 x c00
2005-10-28 16:45:25 +04:00
stw r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl d o _ s y s c a l l _ t r a c e _ e n t e r
2008-07-27 10:51:03 +04:00
/ *
* Restore a r g u m e n t r e g i s t e r s p o s s i b l y j u s t c h a n g e d .
* We u s e t h e r e t u r n v a l u e o f d o _ s y s c a l l _ t r a c e _ e n t e r
* for c a l l n u m b e r t o l o o k u p i n t h e t a b l e ( r0 ) .
* /
mr r0 ,r3
2005-10-10 16:36:14 +04:00
lwz r3 ,G P R 3 ( r1 )
lwz r4 ,G P R 4 ( r1 )
lwz r5 ,G P R 5 ( r1 )
lwz r6 ,G P R 6 ( r1 )
lwz r7 ,G P R 7 ( r1 )
lwz r8 ,G P R 8 ( r1 )
REST_ N V G P R S ( r1 )
2015-07-23 13:21:02 +03:00
cmplwi r0 ,N R _ s y s c a l l s
/* Return code is already in r3 thanks to do_syscall_trace_enter() */
bge- r e t _ f r o m _ s y s c a l l
2005-10-10 16:36:14 +04:00
b s y s c a l l _ d o t r a c e _ c o n t
syscall_exit_work :
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
andi. r0 ,r9 ,_ T I F _ R E S T O R E A L L
2006-03-08 05:24:22 +03:00
beq+ 0 f
REST_ N V G P R S ( r1 )
b 2 f
0 : cmplw 0 ,r3 ,r8
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
blt+ 1 f
andi. r0 ,r9 ,_ T I F _ N O E R R O R
bne- 1 f
lwz r11 ,_ C C R ( r1 ) / * L o a d C R * /
neg r3 ,r3
oris r11 ,r11 ,0 x10 0 0 / * S e t S O b i t i n C R * /
stw r11 ,_ C C R ( r1 )
1 : stw r6 ,R E S U L T ( r1 ) / * S a v e r e s u l t * /
2005-10-10 16:36:14 +04:00
stw r3 ,G P R 3 ( r1 ) / * U p d a t e r e t u r n v a l u e * /
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
2 : andi. r0 ,r9 ,( _ T I F _ P E R S Y S C A L L _ M A S K )
beq 4 f
2006-03-08 05:24:22 +03:00
/* Clear per-syscall TIF flags if any are set. */
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
li r11 ,_ T I F _ P E R S Y S C A L L _ M A S K
addi r12 ,r12 ,T I _ F L A G S
3 : lwarx r8 ,0 ,r12
andc r8 ,r8 ,r11
# ifdef C O N F I G _ I B M 4 0 5 _ E R R 7 7
dcbt 0 ,r12
# endif
stwcx. r8 ,0 ,r12
bne- 3 b
subi r12 ,r12 ,T I _ F L A G S
4 : /* Anything which requires enabling interrupts? */
2015-01-15 04:01:42 +03:00
andi. r0 ,r9 ,( _ T I F _ S Y S C A L L _ D O T R A C E | _ T I F _ S I N G L E S T E P )
2006-03-08 05:24:22 +03:00
beq r e t _ f r o m _ e x c e p t
2009-06-17 21:43:59 +04:00
/ * Re- e n a b l e i n t e r r u p t s . T h e r e i s n o n e e d t o t r a c e t h a t w i t h
* lockdep a s w e a r e s u p p o s e d t o h a v e I R Q s o n a t t h i s p o i n t
* /
2006-03-08 05:24:22 +03:00
ori r10 ,r10 ,M S R _ E E
SYNC
MTMSRD( r10 )
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
/* Save NVGPRS if they're not saved already */
2005-10-28 16:45:25 +04:00
lwz r4 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
andi. r4 ,r4 ,1
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
beq 5 f
2005-10-10 16:36:14 +04:00
SAVE_ N V G P R S ( r1 )
li r4 ,0 x c00
2005-10-28 16:45:25 +04:00
stw r4 ,_ T R A P ( r1 )
2006-03-08 05:24:22 +03:00
5 :
2005-10-10 16:36:14 +04:00
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl d o _ s y s c a l l _ t r a c e _ l e a v e
2006-03-08 05:24:22 +03:00
b r e t _ f r o m _ e x c e p t _ f u l l
2005-10-10 16:36:14 +04:00
/ *
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
* The f o r k / c l o n e f u n c t i o n s n e e d t o c o p y t h e f u l l r e g i s t e r s e t i n t o
* the c h i l d p r o c e s s . T h e r e f o r e w e n e e d t o s a v e a l l t h e n o n v o l a t i l e
* registers ( r13 - r31 ) b e f o r e c a l l i n g t h e C c o d e .
2005-10-10 16:36:14 +04:00
* /
.globl ppc_fork
ppc_fork :
SAVE_ N V G P R S ( r1 )
2005-10-28 16:45:25 +04:00
lwz r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
2005-10-28 16:45:25 +04:00
stw r0 ,_ T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
2005-10-10 16:36:14 +04:00
b s y s _ f o r k
.globl ppc_vfork
ppc_vfork :
SAVE_ N V G P R S ( r1 )
2005-10-28 16:45:25 +04:00
lwz r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
2005-10-28 16:45:25 +04:00
stw r0 ,_ T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
2005-10-10 16:36:14 +04:00
b s y s _ v f o r k
.globl ppc_clone
ppc_clone :
SAVE_ N V G P R S ( r1 )
2005-10-28 16:45:25 +04:00
lwz r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
2005-10-28 16:45:25 +04:00
stw r0 ,_ T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
2005-10-10 16:36:14 +04:00
b s y s _ c l o n e
2006-03-08 05:24:22 +03:00
.globl ppc_swapcontext
ppc_swapcontext :
SAVE_ N V G P R S ( r1 )
lwz r0 ,_ T R A P ( r1 )
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
stw r0 ,_ T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
b s y s _ s w a p c o n t e x t
2005-10-10 16:36:14 +04:00
/ *
* Top- l e v e l p a g e f a u l t h a n d l i n g .
* This i s i n a s s e m b l e r b e c a u s e i f d o _ p a g e _ f a u l t t e l l s u s t h a t
* it i s a b a d k e r n e l p a g e f a u l t , w e w a n t t o s a v e t h e n o n - v o l a t i l e
* registers b e f o r e c a l l i n g b a d _ p a g e _ f a u l t .
* /
.globl handle_page_fault
handle_page_fault :
stw r4 ,_ D A R ( r1 )
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl d o _ p a g e _ f a u l t
cmpwi r3 ,0
beq+ r e t _ f r o m _ e x c e p t
SAVE_ N V G P R S ( r1 )
2005-10-28 16:45:25 +04:00
lwz r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
clrrwi r0 ,r0 ,1
2005-10-28 16:45:25 +04:00
stw r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
mr r5 ,r3
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
lwz r4 ,_ D A R ( r1 )
bl b a d _ p a g e _ f a u l t
b r e t _ f r o m _ e x c e p t _ f u l l
/ *
* This r o u t i n e s w i t c h e s b e t w e e n t w o d i f f e r e n t t a s k s . T h e p r o c e s s
* state o f o n e i s s a v e d o n i t s k e r n e l s t a c k . T h e n t h e s t a t e
* of t h e o t h e r i s r e s t o r e d f r o m i t s k e r n e l s t a c k . T h e m e m o r y
* management h a r d w a r e i s u p d a t e d t o t h e s e c o n d p r o c e s s ' s s t a t e .
* Finally, w e c a n r e t u r n t o t h e s e c o n d p r o c e s s .
* On e n t r y , r3 p o i n t s t o t h e T H R E A D f o r t h e c u r r e n t t a s k , r4
* points t o t h e T H R E A D f o r t h e n e w t a s k .
*
* This r o u t i n e i s a l w a y s c a l l e d w i t h i n t e r r u p t s d i s a b l e d .
*
* Note : there a r e t w o w a y s t o g e t t o t h e " g o i n g o u t " p o r t i o n
* of t h i s c o d e ; either by coming in via the entry (_switch)
* or v i a " f o r k " w h i c h m u s t s e t u p a n e n v i r o n m e n t e q u i v a l e n t
* to t h e " _ s w i t c h " p a t h . I f y o u c h a n g e t h i s , y o u ' l l h a v e t o
* change t h e f o r k c o d e a l s o .
*
* The c o d e w h i c h c r e a t e s t h e n e w t a s k c o n t e x t i s i n ' c o p y _ t h r e a d '
* in a r c h / p p c / k e r n e l / p r o c e s s . c
* /
_ GLOBAL( _ s w i t c h )
stwu r1 ,- I N T _ F R A M E _ S I Z E ( r1 )
mflr r0
stw r0 ,I N T _ F R A M E _ S I Z E + 4 ( r1 )
/* r3-r12 are caller saved -- Cort */
SAVE_ N V G P R S ( r1 )
stw r0 ,_ N I P ( r1 ) / * R e t u r n t o s w i t c h c a l l e r * /
mfmsr r11
li r0 ,M S R _ F P / * D i s a b l e f l o a t i n g - p o i n t * /
# ifdef C O N F I G _ A L T I V E C
BEGIN_ F T R _ S E C T I O N
oris r0 ,r0 ,M S R _ V E C @h /* Disable altivec */
mfspr r12 ,S P R N _ V R S A V E / * s a v e v r s a v e r e g i s t e r v a l u e * /
stw r12 ,T H R E A D + T H R E A D _ V R S A V E ( r2 )
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ A L T I V E C )
# endif / * C O N F I G _ A L T I V E C * /
# ifdef C O N F I G _ S P E
2007-09-13 10:44:20 +04:00
BEGIN_ F T R _ S E C T I O N
2005-10-10 16:36:14 +04:00
oris r0 ,r0 ,M S R _ S P E @h /* Disable SPE */
mfspr r12 ,S P R N _ S P E F S C R / * s a v e s p e f s c r r e g i s t e r v a l u e * /
stw r12 ,T H R E A D + T H R E A D _ S P E F S C R ( r2 )
2007-09-13 10:44:20 +04:00
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ S P E )
2005-10-10 16:36:14 +04:00
# endif / * C O N F I G _ S P E * /
and. r0 ,r0 ,r11 / * F P o r a l t i v e c o r S P E e n a b l e d ? * /
beq+ 1 f
andc r11 ,r11 ,r0
MTMSRD( r11 )
isync
1 : stw r11 ,_ M S R ( r1 )
mfcr r10
stw r10 ,_ C C R ( r1 )
stw r1 ,K S P ( r3 ) / * S e t o l d s t a c k p o i n t e r * /
# ifdef C O N F I G _ S M P
/ * We n e e d a s y n c s o m e w h e r e h e r e t o m a k e s u r e t h a t i f t h e
* previous t a s k g e t s r e s c h e d u l e d o n a n o t h e r C P U , i t s e e s a l l
* stores i t h a s p e r f o r m e d o n t h i s o n e .
* /
sync
# endif / * C O N F I G _ S M P * /
tophys( r0 ,r4 )
2009-07-15 00:52:54 +04:00
mtspr S P R N _ S P R G _ T H R E A D ,r0 / * U p d a t e c u r r e n t T H R E A D p h y s a d d r * /
2005-10-10 16:36:14 +04:00
lwz r1 ,K S P ( r4 ) / * L o a d n e w s t a c k p o i n t e r * /
/* save the old current 'last' for return value */
mr r3 ,r2
addi r2 ,r4 ,- T H R E A D / * U p d a t e c u r r e n t * /
# ifdef C O N F I G _ A L T I V E C
BEGIN_ F T R _ S E C T I O N
lwz r0 ,T H R E A D + T H R E A D _ V R S A V E ( r2 )
mtspr S P R N _ V R S A V E ,r0 / * i f G 4 , r e s t o r e V R S A V E r e g * /
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ A L T I V E C )
# endif / * C O N F I G _ A L T I V E C * /
# ifdef C O N F I G _ S P E
2007-09-13 10:44:20 +04:00
BEGIN_ F T R _ S E C T I O N
2005-10-10 16:36:14 +04:00
lwz r0 ,T H R E A D + T H R E A D _ S P E F S C R ( r2 )
mtspr S P R N _ S P E F S C R ,r0 / * r e s t o r e S P E F S C R r e g * /
2007-09-13 10:44:20 +04:00
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ S P E )
2005-10-10 16:36:14 +04:00
# endif / * C O N F I G _ S P E * /
lwz r0 ,_ C C R ( r1 )
mtcrf 0 x F F ,r0
/* r3-r12 are destroyed -- Cort */
REST_ N V G P R S ( r1 )
lwz r4 ,_ N I P ( r1 ) / * R e t u r n t o _ s w i t c h c a l l e r i n n e w t a s k * /
mtlr r4
addi r1 ,r1 ,I N T _ F R A M E _ S I Z E
blr
.globl fast_exception_return
fast_exception_return :
# if ! ( d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E ) )
andi. r10 ,r9 ,M S R _ R I / * c h e c k f o r r e c o v e r a b l e i n t e r r u p t * /
beq 1 f / * i f n o t , w e ' v e g o t p r o b l e m s * /
# endif
2 : REST_ 4 G P R S ( 3 , r11 )
lwz r10 ,_ C C R ( r11 )
REST_ G P R ( 1 , r11 )
mtcr r10
lwz r10 ,_ L I N K ( r11 )
mtlr r10
REST_ G P R ( 1 0 , r11 )
mtspr S P R N _ S R R 1 ,r9
mtspr S P R N _ S R R 0 ,r12
REST_ G P R ( 9 , r11 )
REST_ G P R ( 1 2 , r11 )
lwz r11 ,G P R 1 1 ( r11 )
SYNC
RFI
# if ! ( d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E ) )
/* check if the exception happened in a restartable section */
1 : lis r3 ,e x c _ e x i t _ r e s t a r t _ e n d @ha
addi r3 ,r3 ,e x c _ e x i t _ r e s t a r t _ e n d @l
cmplw r12 ,r3
bge 3 f
lis r4 ,e x c _ e x i t _ r e s t a r t @ha
addi r4 ,r4 ,e x c _ e x i t _ r e s t a r t @l
cmplw r12 ,r4
blt 3 f
lis r3 ,f e e _ r e s t a r t s @ha
tophys( r3 ,r3 )
lwz r5 ,f e e _ r e s t a r t s @l(r3)
addi r5 ,r5 ,1
stw r5 ,f e e _ r e s t a r t s @l(r3)
mr r12 ,r4 / * r e s t a r t a t e x c _ e x i t _ r e s t a r t * /
b 2 b
2007-05-15 02:11:58 +04:00
.section .bss
.align 2
fee_restarts :
.space 4
.previous
2005-10-10 16:36:14 +04:00
/* aargh, a nonrecoverable interrupt, panic */
/* aargh, we don't know which trap this is */
/* but the 601 doesn't implement the RI bit, so assume it's OK */
3 :
BEGIN_ F T R _ S E C T I O N
b 2 b
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ 6 0 1 )
li r10 ,- 1
2005-10-28 16:45:25 +04:00
stw r10 ,_ T R A P ( r11 )
2005-10-10 16:36:14 +04:00
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
lis r10 ,M S R _ K E R N E L @h
ori r10 ,r10 ,M S R _ K E R N E L @l
bl t r a n s f e r _ t o _ h a n d l e r _ f u l l
.long nonrecoverable_exception
.long ret_from_except
# endif
.globl ret_from_except_full
ret_from_except_full :
REST_ N V G P R S ( r1 )
/* fall through */
.globl ret_from_except
ret_from_except :
/ * Hard- d i s a b l e i n t e r r u p t s s o t h a t c u r r e n t _ t h r e a d _ i n f o ( ) - > f l a g s
* can' t c h a n g e b e t w e e n w h e n w e t e s t i t a n d w h e n w e r e t u r n
* from t h e i n t e r r u p t . * /
2009-06-17 21:43:59 +04:00
/* Note: We don't bother telling lockdep about it */
2005-10-10 16:36:14 +04:00
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L )
SYNC / * S o m e c h i p r e v s h a v e p r o b l e m s h e r e . . . * /
MTMSRD( r10 ) / * d i s a b l e i n t e r r u p t s * /
lwz r3 ,_ M S R ( r1 ) / * R e t u r n i n g t o u s e r m o d e ? * /
andi. r0 ,r3 ,M S R _ P R
beq r e s u m e _ k e r n e l
user_exc_return : /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
2012-07-05 08:41:35 +04:00
CURRENT_ T H R E A D _ I N F O ( r9 , r1 )
2005-10-10 16:36:14 +04:00
lwz r9 ,T I _ F L A G S ( r9 )
2008-04-28 11:30:37 +04:00
andi. r0 ,r9 ,_ T I F _ U S E R _ W O R K _ M A S K
2005-10-10 16:36:14 +04:00
bne d o _ w o r k
restore_user :
# if d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E )
2008-04-10 01:15:40 +04:00
/ * Check w h e t h e r t h i s p r o c e s s h a s i t s o w n D B C R 0 v a l u e . T h e i n t e r n a l
debug m o d e b i t t e l l s u s t h a t d b c r0 s h o u l d b e l o a d e d . * /
2005-10-10 16:36:14 +04:00
lwz r0 ,T H R E A D + T H R E A D _ D B C R 0 ( r2 )
2008-07-25 23:27:33 +04:00
andis. r10 ,r0 ,D B C R 0 _ I D M @h
2005-10-10 16:36:14 +04:00
bnel- l o a d _ d b c r0
# endif
2016-05-17 09:33:46 +03:00
# ifdef C O N F I G _ V I R T _ C P U _ A C C O U N T I N G _ N A T I V E
CURRENT_ T H R E A D _ I N F O ( r9 , r1 )
ACCOUNT_ C P U _ U S E R _ E X I T ( r9 , r10 , r11 )
# endif
2005-10-10 16:36:14 +04:00
b r e s t o r e
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel :
2012-09-17 03:54:30 +04:00
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
2012-07-05 08:41:35 +04:00
CURRENT_ T H R E A D _ I N F O ( r9 , r1 )
2012-09-17 03:54:30 +04:00
lwz r8 ,T I _ F L A G S ( r9 )
2013-05-31 05:20:02 +04:00
andis. r0 ,r8 ,_ T I F _ E M U L A T E _ S T A C K _ S T O R E @h
2012-09-17 03:54:30 +04:00
beq+ 1 f
addi r8 ,r1 ,I N T _ F R A M E _ S I Z E / * G e t t h e k p r o b e d f u n c t i o n e n t r y * /
lwz r3 ,G P R 1 ( r1 )
subi r3 ,r3 ,I N T _ F R A M E _ S I Z E / * d s t : A l l o c a t e a t r a m p o l i n e e x c e p t i o n f r a m e * /
mr r4 ,r1 / * s r c : c u r r e n t e x c e p t i o n f r a m e * /
mr r1 ,r3 / * R e r o u t e t h e t r a m p o l i n e f r a m e t o r1 * /
/* Copy from the original to the trampoline. */
li r5 ,I N T _ F R A M E _ S I Z E / 4 / * s i z e : I N T _ F R A M E _ S I Z E * /
li r6 ,0 / * s t a r t o f f s e t : 0 * /
mtctr r5
2 : lwzx r0 ,r6 ,r4
stwx r0 ,r6 ,r3
addi r6 ,r6 ,4
bdnz 2 b
/* Do real store operation to complete stwu */
lwz r5 ,G P R 1 ( r1 )
stw r8 ,0 ( r5 )
/* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11 ,_ T I F _ E M U L A T E _ S T A C K _ S T O R E @h
addi r5 ,r9 ,T I _ F L A G S
0 : lwarx r8 ,0 ,r5
andc r8 ,r8 ,r11
# ifdef C O N F I G _ I B M 4 0 5 _ E R R 7 7
dcbt 0 ,r5
# endif
stwcx. r8 ,0 ,r5
bne- 0 b
1 :
# ifdef C O N F I G _ P R E E M P T
/* check current_thread_info->preempt_count */
2005-10-10 16:36:14 +04:00
lwz r0 ,T I _ P R E E M P T ( r9 )
cmpwi 0 ,r0 ,0 / * i f n o n - z e r o , j u s t r e s t o r e r e g s a n d r e t u r n * /
bne r e s t o r e
2012-09-17 03:54:30 +04:00
andi. r8 ,r8 ,_ T I F _ N E E D _ R E S C H E D
2005-10-10 16:36:14 +04:00
beq+ r e s t o r e
2012-09-17 03:54:30 +04:00
lwz r3 ,_ M S R ( r1 )
2005-10-10 16:36:14 +04:00
andi. r0 ,r3 ,M S R _ E E / * i n t e r r u p t s o f f ? * /
beq r e s t o r e / * d o n ' t s c h e d u l e i f s o * /
2009-06-17 21:43:59 +04:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
/ * Lockdep t h i n k s i r q s a r e e n a b l e d , w e n e e d t o c a l l
* preempt_ s c h e d u l e _ i r q w i t h I R Q s o f f , s o w e i n f o r m l o c k d e p
* now t h a t w e - d i d - t u r n t h e m o f f a l r e a d y
* /
bl t r a c e _ h a r d i r q s _ o f f
# endif
2005-10-10 16:36:14 +04:00
1 : bl p r e e m p t _ s c h e d u l e _ i r q
2012-07-05 08:41:35 +04:00
CURRENT_ T H R E A D _ I N F O ( r9 , r1 )
2005-10-10 16:36:14 +04:00
lwz r3 ,T I _ F L A G S ( r9 )
andi. r0 ,r3 ,_ T I F _ N E E D _ R E S C H E D
bne- 1 b
2009-06-17 21:43:59 +04:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
/ * And n o w , t o p r o p e r l y r e b a l a n c e t h e a b o v e , w e t e l l l o c k d e p t h e y
* are b e i n g t u r n e d b a c k o n , w h i c h w i l l h a p p e n w h e n w e r e t u r n
* /
bl t r a c e _ h a r d i r q s _ o n
# endif
2005-10-10 16:36:14 +04:00
# endif / * C O N F I G _ P R E E M P T * /
/* interrupts are hard-disabled at this point */
restore :
2007-10-31 08:42:19 +03:00
# ifdef C O N F I G _ 4 4 x
2010-03-05 13:43:12 +03:00
BEGIN_ M M U _ F T R _ S E C T I O N
b 1 f
END_ M M U _ F T R _ S E C T I O N _ I F S E T ( M M U _ F T R _ T Y P E _ 4 7 x )
2007-10-31 08:42:19 +03:00
lis r4 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @ha
lwz r5 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @l(r4)
cmplwi c r0 ,r5 ,0
beq+ 1 f
li r6 ,0
iccci r0 ,r0
stw r6 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @l(r4)
1 :
# endif / * C O N F I G _ 4 4 x * /
2009-06-17 21:43:59 +04:00
lwz r9 ,_ M S R ( r1 )
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
/ * Lockdep d o e s n ' t k n o w a b o u t t h e f a c t t h a t I R Q s a r e t e m p o r a r i l y t u r n e d
* off i n t h i s a s s e m b l y c o d e w h i l e p e e k i n g a t T I _ F L A G S ( ) a n d s u c h . H o w e v e r
* we n e e d t o i n f o r m i t i f t h e e x c e p t i o n t u r n e d i n t e r r u p t s o f f , a n d w e
* are a b o u t t o t r u n t h e m b a c k o n .
*
* The p r o b l e m h e r e s a d l y i s t h a t w e d o n ' t k n o w w h e t h e r t h e e x c e p t i o n s w a s
* one t h a t t u r n e d i n t e r r u p t s o f f o r n o t . S o w e a l w a y s t e l l l o c k d e p a b o u t
* turning t h e m o n h e r e w h e n w e g o b a c k t o w h e r e v e r w e c a m e f r o m w i t h E E
* on, e v e n i f t h a t m a y m e e n s o m e r e d u d a n t c a l l s b e i n g t r a c k e d . M a y b e l a t e r
* we c o u l d e n c o d e w h a t t h e e x c e p t i o n d i d s o m e w h e r e o r t e s t t h e e x c e p t i o n
* type i n t h e p t _ r e g s b u t t h a t s o u n d s o v e r k i l l
* /
andi. r10 ,r9 ,M S R _ E E
beq 1 f
2010-12-22 19:42:56 +03:00
/ *
* Since t h e f t r a c e i r q s o f f l a t e n c y t r a c e c h e c k s C A L L E R _ A D D R 1 ,
* which i s t h e s t a c k f r a m e h e r e , w e n e e d t o f o r c e a s t a c k f r a m e
* in c a s e w e c a m e f r o m u s e r s p a c e .
* /
stwu r1 ,- 3 2 ( r1 )
mflr r0
stw r0 ,4 ( r1 )
stwu r1 ,- 3 2 ( r1 )
2009-06-17 21:43:59 +04:00
bl t r a c e _ h a r d i r q s _ o n
2010-12-22 19:42:56 +03:00
lwz r1 ,0 ( r1 )
lwz r1 ,0 ( r1 )
2009-06-17 21:43:59 +04:00
lwz r9 ,_ M S R ( r1 )
1 :
# endif / * C O N F I G _ T R A C E _ I R Q F L A G S * /
2005-10-10 16:36:14 +04:00
lwz r0 ,G P R 0 ( r1 )
lwz r2 ,G P R 2 ( r1 )
REST_ 4 G P R S ( 3 , r1 )
REST_ 2 G P R S ( 7 , r1 )
lwz r10 ,_ X E R ( r1 )
lwz r11 ,_ C T R ( r1 )
mtspr S P R N _ X E R ,r10
mtctr r11
PPC4 0 5 _ E R R 7 7 ( 0 ,r1 )
2007-11-10 01:17:49 +03:00
BEGIN_ F T R _ S E C T I O N
lwarx r11 ,0 ,r1
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ N E E D _ P A I R E D _ S T W C X )
2005-10-10 16:36:14 +04:00
stwcx. r0 ,0 ,r1 / * t o c l e a r t h e r e s e r v a t i o n * /
# if ! ( d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E ) )
andi. r10 ,r9 ,M S R _ R I / * c h e c k i f t h i s e x c e p t i o n o c c u r r e d * /
beql n o n r e c o v e r a b l e / * a t a b a d p l a c e ( M S R : R I = 0 ) * /
lwz r10 ,_ C C R ( r1 )
lwz r11 ,_ L I N K ( r1 )
mtcrf 0 x F F ,r10
mtlr r11
/ *
* Once w e p u t v a l u e s i n S R R 0 a n d S R R 1 , w e a r e i n a s t a t e
* where e x c e p t i o n s a r e n o t r e c o v e r a b l e , s i n c e t a k i n g a n
* exception w i l l t r a s h S R R 0 a n d S R R 1 . T h e r e f o r e w e c l e a r t h e
* MSR : RI b i t t o i n d i c a t e t h i s . I f w e d o t a k e a n e x c e p t i o n ,
* we c a n ' t r e t u r n t o t h e p o i n t o f t h e e x c e p t i o n b u t w e
* can r e s t a r t t h e e x c e p t i o n e x i t p a t h a t t h e l a b e l
* exc_ e x i t _ r e s t a r t b e l o w . - - p a u l u s
* /
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L & ~ M S R _ R I )
SYNC
MTMSRD( r10 ) / * c l e a r t h e R I b i t * /
.globl exc_exit_restart
exc_exit_restart :
lwz r12 ,_ N I P ( r1 )
FIX_ S R R 1 ( r9 ,r10 )
mtspr S P R N _ S R R 0 ,r12
mtspr S P R N _ S R R 1 ,r9
REST_ 4 G P R S ( 9 , r1 )
lwz r1 ,G P R 1 ( r1 )
.globl exc_exit_restart_end
exc_exit_restart_end :
SYNC
RFI
# else / * ! ( C O N F I G _ 4 x x | | C O N F I G _ B O O K E ) * /
/ *
* This i s a b i t d i f f e r e n t o n 4 x x / B o o k - E b e c a u s e i t d o e s n ' t h a v e
* the R I b i t i n t h e M S R .
* The T L B m i s s h a n d l e r c h e c k s i f w e h a v e i n t e r r u p t e d
* the e x c e p t i o n e x i t p a t h a n d r e s t a r t s i t i f s o
* ( well m a y b e o n e d a y i t w i l l . . . : ) .
* /
lwz r11 ,_ L I N K ( r1 )
mtlr r11
lwz r10 ,_ C C R ( r1 )
mtcrf 0 x f f ,r10
REST_ 2 G P R S ( 9 , r1 )
.globl exc_exit_restart
exc_exit_restart :
lwz r11 ,_ N I P ( r1 )
lwz r12 ,_ M S R ( r1 )
exc_exit_start :
mtspr S P R N _ S R R 0 ,r11
mtspr S P R N _ S R R 1 ,r12
REST_ 2 G P R S ( 1 1 , r1 )
lwz r1 ,G P R 1 ( r1 )
.globl exc_exit_restart_end
exc_exit_restart_end :
PPC4 0 5 _ E R R 7 7 _ S Y N C
rfi
b . / * p r e v e n t p r e f e t c h p a s t r f i * /
/ *
* Returning f r o m a c r i t i c a l i n t e r r u p t i n u s e r m o d e d o e s n ' t n e e d
* to b e a n y d i f f e r e n t f r o m a n o r m a l e x c e p t i o n . F o r a c r i t i c a l
* interrupt i n t h e k e r n e l , w e j u s t r e t u r n ( w i t h o u t c h e c k i n g f o r
* preemption) s i n c e t h e i n t e r r u p t m a y h a v e h a p p e n e d a t s o m e c r u c i a l
* place ( e . g . i n s i d e t h e T L B m i s s h a n d l e r ) , a n d b e c a u s e w e w i l l b e
* running w i t h r1 p o i n t i n g i n t o c r i t i c a l _ s t a c k , n o t t h e c u r r e n t
* process' s k e r n e l s t a c k ( a n d t h e r e f o r e c u r r e n t _ t h r e a d _ i n f o ( ) w i l l
* give t h e w r o n g a n s w e r ) .
* We h a v e t o r e s t o r e v a r i o u s S P R s t h a t m a y h a v e b e e n i n u s e a t t h e
* time o f t h e c r i t i c a l i n t e r r u p t .
*
* /
# ifdef C O N F I G _ 4 0 x
# define P P C _ 4 0 x _ T U R N _ O F F _ M S R _ D R \
/ * avoid a n y p o s s i b l e T L B m i s s e s h e r e b y t u r n i n g o f f M S R . D R , w e \
* assume t h e i n s t r u c t i o n s h e r e a r e m a p p e d b y a p i n n e d T L B e n t r y * / \
li r10 ,M S R _ I R ; \
mtmsr r10 ; \
isync; \
tophys( r1 , r1 ) ;
# else
# define P P C _ 4 0 x _ T U R N _ O F F _ M S R _ D R
# endif
# define R E T _ F R O M _ E X C _ L E V E L ( e x c _ l v l _ s r r0 , e x c _ l v l _ s r r1 , e x c _ l v l _ r f i ) \
REST_ N V G P R S ( r1 ) ; \
lwz r3 ,_ M S R ( r1 ) ; \
andi. r3 ,r3 ,M S R _ P R ; \
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L ) ; \
bne u s e r _ e x c _ r e t u r n ; \
lwz r0 ,G P R 0 ( r1 ) ; \
lwz r2 ,G P R 2 ( r1 ) ; \
REST_ 4 G P R S ( 3 , r1 ) ; \
REST_ 2 G P R S ( 7 , r1 ) ; \
lwz r10 ,_ X E R ( r1 ) ; \
lwz r11 ,_ C T R ( r1 ) ; \
mtspr S P R N _ X E R ,r10 ; \
mtctr r11 ; \
PPC4 0 5 _ E R R 7 7 ( 0 ,r1 ) ; \
stwcx. r0 ,0 ,r1 ; /* to clear the reservation */ \
lwz r11 ,_ L I N K ( r1 ) ; \
mtlr r11 ; \
lwz r10 ,_ C C R ( r1 ) ; \
mtcrf 0 x f f ,r10 ; \
PPC_ 4 0 x _ T U R N _ O F F _ M S R _ D R ; \
lwz r9 ,_ D E A R ( r1 ) ; \
lwz r10 ,_ E S R ( r1 ) ; \
mtspr S P R N _ D E A R ,r9 ; \
mtspr S P R N _ E S R ,r10 ; \
lwz r11 ,_ N I P ( r1 ) ; \
lwz r12 ,_ M S R ( r1 ) ; \
mtspr e x c _ l v l _ s r r0 ,r11 ; \
mtspr e x c _ l v l _ s r r1 ,r12 ; \
lwz r9 ,G P R 9 ( r1 ) ; \
lwz r12 ,G P R 1 2 ( r1 ) ; \
lwz r10 ,G P R 1 0 ( r1 ) ; \
lwz r11 ,G P R 1 1 ( r1 ) ; \
lwz r1 ,G P R 1 ( r1 ) ; \
PPC4 0 5 _ E R R 7 7 _ S Y N C ; \
exc_ l v l _ r f i ; \
b . ; /* prevent prefetch past exc_lvl_rfi */
2008-04-30 14:23:21 +04:00
# define R E S T O R E _ x S R R ( e x c _ l v l _ s r r0 , e x c _ l v l _ s r r1 ) \
lwz r9 ,_ ## e x c _ l v l _ s r r 0 ( r1 ) ; \
lwz r10 ,_ ## e x c _ l v l _ s r r 1 ( r1 ) ; \
mtspr S P R N _ ## e x c _ l v l _ s r r 0 ,r9 ; \
mtspr S P R N _ ## e x c _ l v l _ s r r 1 ,r10 ;
2009-02-13 01:12:40 +03:00
# if d e f i n e d ( C O N F I G _ P P C _ B O O K 3 E _ M M U )
2008-04-30 14:23:21 +04:00
# ifdef C O N F I G _ P H Y S _ 6 4 B I T
# define R E S T O R E _ M A S 7 \
lwz r11 ,M A S 7 ( r1 ) ; \
mtspr S P R N _ M A S 7 ,r11 ;
# else
# define R E S T O R E _ M A S 7
# endif / * C O N F I G _ P H Y S _ 6 4 B I T * /
# define R E S T O R E _ M M U _ R E G S \
lwz r9 ,M A S 0 ( r1 ) ; \
lwz r10 ,M A S 1 ( r1 ) ; \
lwz r11 ,M A S 2 ( r1 ) ; \
mtspr S P R N _ M A S 0 ,r9 ; \
lwz r9 ,M A S 3 ( r1 ) ; \
mtspr S P R N _ M A S 1 ,r10 ; \
lwz r10 ,M A S 6 ( r1 ) ; \
mtspr S P R N _ M A S 2 ,r11 ; \
mtspr S P R N _ M A S 3 ,r9 ; \
mtspr S P R N _ M A S 6 ,r10 ; \
RESTORE_ M A S 7 ;
# elif d e f i n e d ( C O N F I G _ 4 4 x )
# define R E S T O R E _ M M U _ R E G S \
lwz r9 ,M M U C R ( r1 ) ; \
mtspr S P R N _ M M U C R ,r9 ;
# else
# define R E S T O R E _ M M U _ R E G S
# endif
# ifdef C O N F I G _ 4 0 x
2005-10-10 16:36:14 +04:00
.globl ret_from_crit_exc
ret_from_crit_exc :
2009-07-15 00:52:54 +04:00
mfspr r9 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lis r10 ,s a v e d _ k s p _ l i m i t @ha;
lwz r10 ,s a v e d _ k s p _ l i m i t @l(r10);
tovirt( r9 ,r9 ) ;
stw r10 ,K S P _ L I M I T ( r9 )
lis r9 ,c r i t _ s r r0 @ha;
lwz r9 ,c r i t _ s r r0 @l(r9);
lis r10 ,c r i t _ s r r1 @ha;
lwz r10 ,c r i t _ s r r1 @l(r10);
mtspr S P R N _ S R R 0 ,r9 ;
mtspr S P R N _ S R R 1 ,r10 ;
2009-02-10 23:10:44 +03:00
RET_ F R O M _ E X C _ L E V E L ( S P R N _ C S R R 0 , S P R N _ C S R R 1 , P P C _ R F C I )
2008-04-30 14:23:21 +04:00
# endif / * C O N F I G _ 4 0 x * /
2005-10-10 16:36:14 +04:00
# ifdef C O N F I G _ B O O K E
2008-04-30 14:23:21 +04:00
.globl ret_from_crit_exc
ret_from_crit_exc :
2009-07-15 00:52:54 +04:00
mfspr r9 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lwz r10 ,S A V E D _ K S P _ L I M I T ( r1 )
stw r10 ,K S P _ L I M I T ( r9 )
RESTORE_ x S R R ( S R R 0 ,S R R 1 ) ;
RESTORE_ M M U _ R E G S ;
2009-02-10 23:10:44 +03:00
RET_ F R O M _ E X C _ L E V E L ( S P R N _ C S R R 0 , S P R N _ C S R R 1 , P P C _ R F C I )
2008-04-30 14:23:21 +04:00
2005-10-10 16:36:14 +04:00
.globl ret_from_debug_exc
ret_from_debug_exc :
2009-07-15 00:52:54 +04:00
mfspr r9 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lwz r10 ,S A V E D _ K S P _ L I M I T ( r1 )
stw r10 ,K S P _ L I M I T ( r9 )
lwz r9 ,T H R E A D _ I N F O - T H R E A D ( r9 )
2012-07-05 08:41:35 +04:00
CURRENT_ T H R E A D _ I N F O ( r10 , r1 )
2008-04-30 14:23:21 +04:00
lwz r10 ,T I _ P R E E M P T ( r10 )
stw r10 ,T I _ P R E E M P T ( r9 )
RESTORE_ x S R R ( S R R 0 ,S R R 1 ) ;
RESTORE_ x S R R ( C S R R 0 ,C S R R 1 ) ;
RESTORE_ M M U _ R E G S ;
2009-02-10 23:10:44 +03:00
RET_ F R O M _ E X C _ L E V E L ( S P R N _ D S R R 0 , S P R N _ D S R R 1 , P P C _ R F D I )
2005-10-10 16:36:14 +04:00
.globl ret_from_mcheck_exc
ret_from_mcheck_exc :
2009-07-15 00:52:54 +04:00
mfspr r9 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lwz r10 ,S A V E D _ K S P _ L I M I T ( r1 )
stw r10 ,K S P _ L I M I T ( r9 )
RESTORE_ x S R R ( S R R 0 ,S R R 1 ) ;
RESTORE_ x S R R ( C S R R 0 ,C S R R 1 ) ;
RESTORE_ x S R R ( D S R R 0 ,D S R R 1 ) ;
RESTORE_ M M U _ R E G S ;
2009-02-10 23:10:44 +03:00
RET_ F R O M _ E X C _ L E V E L ( S P R N _ M C S R R 0 , S P R N _ M C S R R 1 , P P C _ R F M C I )
2005-10-10 16:36:14 +04:00
# endif / * C O N F I G _ B O O K E * /
/ *
* Load t h e D B C R 0 v a l u e f o r a t a s k t h a t i s b e i n g p t r a c e d ,
* having f i r s t s a v e d a w a y t h e g l o b a l D B C R 0 . N o t e t h a t r0
* has t h e d b c r0 v a l u e t o s e t u p o n e n t r y t o t h i s .
* /
load_dbcr0 :
mfmsr r10 / * f i r s t d i s a b l e d e b u g e x c e p t i o n s * /
rlwinm r10 ,r10 ,0 ,~ M S R _ D E
mtmsr r10
isync
mfspr r10 ,S P R N _ D B C R 0
lis r11 ,g l o b a l _ d b c r0 @ha
addi r11 ,r11 ,g l o b a l _ d b c r0 @l
2008-04-10 01:15:40 +04:00
# ifdef C O N F I G _ S M P
2012-07-05 08:41:35 +04:00
CURRENT_ T H R E A D _ I N F O ( r9 , r1 )
2008-04-10 01:15:40 +04:00
lwz r9 ,T I _ C P U ( r9 )
slwi r9 ,r9 ,3
add r11 ,r11 ,r9
# endif
2005-10-10 16:36:14 +04:00
stw r10 ,0 ( r11 )
mtspr S P R N _ D B C R 0 ,r0
lwz r10 ,4 ( r11 )
addi r10 ,r10 ,1
stw r10 ,4 ( r11 )
li r11 ,- 1
mtspr S P R N _ D B S R ,r11 / * c l e a r a l l p e n d i n g d e b u g e v e n t s * /
blr
2007-05-15 02:11:58 +04:00
.section .bss
.align 4
global_dbcr0 :
2008-04-10 01:15:40 +04:00
.space 8 * NR_ C P U S
2007-05-15 02:11:58 +04:00
.previous
2005-10-10 16:36:14 +04:00
# endif / * ! ( C O N F I G _ 4 x x | | C O N F I G _ B O O K E ) * /
do_work : /* r10 contains MSR_KERNEL here */
andi. r0 ,r9 ,_ T I F _ N E E D _ R E S C H E D
beq d o _ u s e r _ s i g n a l
do_resched : /* r10 contains MSR_KERNEL here */
2009-06-17 21:43:59 +04:00
/ * Note : We d o n ' t n e e d t o i n f o r m l o c k d e p t h a t w e a r e e n a b l i n g
* interrupts h e r e . A s f a r a s i t k n o w s , t h e y a r e a l r e a d y e n a b l e d
* /
2005-10-10 16:36:14 +04:00
ori r10 ,r10 ,M S R _ E E
SYNC
MTMSRD( r10 ) / * h a r d - e n a b l e i n t e r r u p t s * /
bl s c h e d u l e
recheck :
2009-06-17 21:43:59 +04:00
/ * Note : And w e d o n ' t t e l l i t w e a r e d i s a b l i n g t h e m a g a i n
* neither. T h o s e d i s a b l e / e n a b l e c y c l e s u s e d t o p e e k a t
* TI_ F L A G S a r e n ' t a d v e r t i s e d .
* /
2005-10-10 16:36:14 +04:00
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L )
SYNC
MTMSRD( r10 ) / * d i s a b l e i n t e r r u p t s * /
2012-07-05 08:41:35 +04:00
CURRENT_ T H R E A D _ I N F O ( r9 , r1 )
2005-10-10 16:36:14 +04:00
lwz r9 ,T I _ F L A G S ( r9 )
andi. r0 ,r9 ,_ T I F _ N E E D _ R E S C H E D
bne- d o _ r e s c h e d
2008-04-28 11:30:37 +04:00
andi. r0 ,r9 ,_ T I F _ U S E R _ W O R K _ M A S K
2005-10-10 16:36:14 +04:00
beq r e s t o r e _ u s e r
do_user_signal : /* r10 contains MSR_KERNEL here */
ori r10 ,r10 ,M S R _ E E
SYNC
MTMSRD( r10 ) / * h a r d - e n a b l e i n t e r r u p t s * /
/* save r13-r31 in the exception frame, if not already done */
2005-10-28 16:45:25 +04:00
lwz r3 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
andi. r0 ,r3 ,1
beq 2 f
SAVE_ N V G P R S ( r1 )
rlwinm r3 ,r3 ,0 ,0 ,3 0
2005-10-28 16:45:25 +04:00
stw r3 ,_ T R A P ( r1 )
2008-07-27 10:52:52 +04:00
2 : addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
mr r4 ,r9
2012-02-22 09:48:32 +04:00
bl d o _ n o t i f y _ r e s u m e
2005-10-10 16:36:14 +04:00
REST_ N V G P R S ( r1 )
b r e c h e c k
/ *
* We c o m e h e r e w h e n w e a r e a t t h e e n d o f h a n d l i n g a n e x c e p t i o n
* that o c c u r r e d a t a p l a c e w h e r e t a k i n g a n e x c e p t i o n w i l l l o s e
* state i n f o r m a t i o n , s u c h a s t h e c o n t e n t s o f S R R 0 a n d S R R 1 .
* /
nonrecoverable :
lis r10 ,e x c _ e x i t _ r e s t a r t _ e n d @ha
addi r10 ,r10 ,e x c _ e x i t _ r e s t a r t _ e n d @l
cmplw r12 ,r10
bge 3 f
lis r11 ,e x c _ e x i t _ r e s t a r t @ha
addi r11 ,r11 ,e x c _ e x i t _ r e s t a r t @l
cmplw r12 ,r11
blt 3 f
lis r10 ,e e _ r e s t a r t s @ha
lwz r12 ,e e _ r e s t a r t s @l(r10)
addi r12 ,r12 ,1
stw r12 ,e e _ r e s t a r t s @l(r10)
mr r12 ,r11 / * r e s t a r t a t e x c _ e x i t _ r e s t a r t * /
blr
3 : /* OK, we can't recover, kill this process */
/* but the 601 doesn't implement the RI bit, so assume it's OK */
BEGIN_ F T R _ S E C T I O N
blr
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ 6 0 1 )
2005-10-28 16:45:25 +04:00
lwz r3 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
andi. r0 ,r3 ,1
beq 4 f
SAVE_ N V G P R S ( r1 )
rlwinm r3 ,r3 ,0 ,0 ,3 0
2005-10-28 16:45:25 +04:00
stw r3 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
4 : addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl n o n r e c o v e r a b l e _ e x c e p t i o n
/* shouldn't return */
b 4 b
2007-05-15 02:11:58 +04:00
.section .bss
.align 2
ee_restarts :
.space 4
.previous
2005-10-10 16:36:14 +04:00
/ *
* PROM c o d e f o r s p e c i f i c m a c h i n e s f o l l o w s . P u t i t
* here s o i t ' s e a s y t o a d d a r c h - s p e c i f i c s e c t i o n s l a t e r .
* - - Cort
* /
2005-10-26 11:05:24 +04:00
# ifdef C O N F I G _ P P C _ R T A S
2005-10-10 16:36:14 +04:00
/ *
* On C H R P , t h e R u n - T i m e A b s t r a c t i o n S e r v i c e s ( R T A S ) h a v e t o b e
* called w i t h t h e M M U o f f .
* /
_ GLOBAL( e n t e r _ r t a s )
stwu r1 ,- I N T _ F R A M E _ S I Z E ( r1 )
mflr r0
stw r0 ,I N T _ F R A M E _ S I Z E + 4 ( r1 )
2006-01-13 06:56:25 +03:00
LOAD_ R E G _ A D D R ( r4 , r t a s )
2005-10-10 16:36:14 +04:00
lis r6 ,1 f @ha /* physical return address for rtas */
addi r6 ,r6 ,1 f @l
tophys( r6 ,r6 )
tophys( r7 ,r1 )
2005-10-26 11:05:24 +04:00
lwz r8 ,R T A S E N T R Y ( r4 )
lwz r4 ,R T A S B A S E ( r4 )
2005-10-10 16:36:14 +04:00
mfmsr r9
stw r9 ,8 ( r1 )
LOAD_ M S R _ K E R N E L ( r0 ,M S R _ K E R N E L )
SYNC / * d i s a b l e i n t e r r u p t s s o S R R 0 / 1 * /
MTMSRD( r0 ) / * d o n ' t g e t t r a s h e d * /
li r9 ,M S R _ K E R N E L & ~ ( M S R _ I R | M S R _ D R )
mtlr r6
2009-07-15 00:52:54 +04:00
mtspr S P R N _ S P R G _ R T A S ,r7
2005-10-10 16:36:14 +04:00
mtspr S P R N _ S R R 0 ,r8
mtspr S P R N _ S R R 1 ,r9
RFI
1 : tophys( r9 ,r1 )
lwz r8 ,I N T _ F R A M E _ S I Z E + 4 ( r9 ) / * g e t r e t u r n a d d r e s s * /
lwz r9 ,8 ( r9 ) / * o r i g i n a l m s r v a l u e * /
FIX_ S R R 1 ( r9 ,r0 )
addi r1 ,r1 ,I N T _ F R A M E _ S I Z E
li r0 ,0
2009-07-15 00:52:54 +04:00
mtspr S P R N _ S P R G _ R T A S ,r0
2005-10-10 16:36:14 +04:00
mtspr S P R N _ S R R 0 ,r8
mtspr S P R N _ S R R 1 ,r9
RFI / * r e t u r n t o c a l l e r * /
.globl machine_check_in_rtas
machine_check_in_rtas :
twi 3 1 ,0 ,0
/* XXX load up BATs and panic */
2005-10-26 11:05:24 +04:00
# endif / * C O N F I G _ P P C _ R T A S * /
2008-05-15 07:49:44 +04:00
2008-10-07 03:06:12 +04:00
# ifdef C O N F I G _ F U N C T I O N _ T R A C E R
2008-05-15 07:49:44 +04:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
_ GLOBAL( m c o u n t )
_ GLOBAL( _ m c o u n t )
2008-11-21 00:18:55 +03:00
/ *
* It i s r e q u i r e d t h a t _ m c o u n t o n P P C 3 2 m u s t p r e s e r v e t h e
* link r e g i s t e r . B u t w e h a v e r0 t o p l a y w i t h . W e u s e r0
* to p u s h t h e r e t u r n a d d r e s s b a c k t o t h e c a l l e r o f m c o u n t
* into t h e c t r r e g i s t e r , r e s t o r e t h e l i n k r e g i s t e r a n d
* then j u m p b a c k u s i n g t h e c t r r e g i s t e r .
* /
mflr r0
2008-05-15 07:49:44 +04:00
mtctr r0
2008-11-21 00:18:55 +03:00
lwz r0 , 4 ( r1 )
2008-05-15 07:49:44 +04:00
mtlr r0
bctr
_ GLOBAL( f t r a c e _ c a l l e r )
2009-02-11 23:01:18 +03:00
MCOUNT_ S A V E _ F R A M E
/* r3 ends up with link register */
2008-06-21 22:17:27 +04:00
subi r3 , r3 , M C O U N T _ I N S N _ S I Z E
2008-05-15 07:49:44 +04:00
.globl ftrace_call
ftrace_call :
bl f t r a c e _ s t u b
nop
2009-02-12 04:06:43 +03:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
.globl ftrace_graph_call
ftrace_graph_call :
b f t r a c e _ g r a p h _ s t u b
_ GLOBAL( f t r a c e _ g r a p h _ s t u b )
# endif
2009-02-11 23:01:18 +03:00
MCOUNT_ R E S T O R E _ F R A M E
/* old link register ends up in ctr reg */
2008-05-15 07:49:44 +04:00
bctr
# else
_ GLOBAL( m c o u n t )
_ GLOBAL( _ m c o u n t )
2009-02-11 23:01:18 +03:00
MCOUNT_ S A V E _ F R A M E
2008-05-15 07:49:44 +04:00
2008-06-21 22:17:27 +04:00
subi r3 , r3 , M C O U N T _ I N S N _ S I Z E
2008-05-15 07:49:44 +04:00
LOAD_ R E G _ A D D R ( r5 , f t r a c e _ t r a c e _ f u n c t i o n )
lwz r5 ,0 ( r5 )
2008-05-22 22:31:07 +04:00
2008-05-15 07:49:44 +04:00
mtctr r5
bctrl
nop
2009-02-12 03:10:57 +03:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
b f t r a c e _ g r a p h _ c a l l e r
# endif
2009-02-11 23:01:18 +03:00
MCOUNT_ R E S T O R E _ F R A M E
2008-05-15 07:49:44 +04:00
bctr
# endif
2016-01-14 07:33:46 +03:00
EXPORT_ S Y M B O L ( _ m c o u n t )
2008-05-15 07:49:44 +04:00
_ GLOBAL( f t r a c e _ s t u b )
blr
2009-02-12 03:10:57 +03:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
_ GLOBAL( f t r a c e _ g r a p h _ c a l l e r )
/* load r4 with local address */
lwz r4 , 4 4 ( r1 )
subi r4 , r4 , M C O U N T _ I N S N _ S I Z E
2014-09-17 11:07:04 +04:00
/* Grab the LR out of the caller stack frame */
lwz r3 ,5 2 ( r1 )
2009-02-12 03:10:57 +03:00
bl p r e p a r e _ f t r a c e _ r e t u r n
nop
2014-09-17 11:07:04 +04:00
/ *
* prepare_ f t r a c e _ r e t u r n g i v e s u s t h e a d d r e s s w e d i v e r t t o .
* Change t h e L R i n t h e c a l l e r s s t a c k f r a m e t o t h i s .
* /
stw r3 ,5 2 ( r1 )
2009-02-12 03:10:57 +03:00
MCOUNT_ R E S T O R E _ F R A M E
/* old link register ends up in ctr reg */
bctr
_ GLOBAL( r e t u r n _ t o _ h a n d l e r )
/* need to save return values */
stwu r1 , - 3 2 ( r1 )
stw r3 , 2 0 ( r1 )
stw r4 , 1 6 ( r1 )
stw r31 , 1 2 ( r1 )
mr r31 , r1
bl f t r a c e _ r e t u r n _ t o _ h a n d l e r
nop
/* return value has real return address */
mtlr r3
lwz r3 , 2 0 ( r1 )
lwz r4 , 1 6 ( r1 )
lwz r31 ,1 2 ( r1 )
lwz r1 , 0 ( r1 )
/* Jump back to real return address */
blr
# endif / * C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R * /
2014-04-29 11:24:06 +04:00
# endif / * C O N F I G _ F U N C T I O N _ T R A C E R * /