2005-10-10 16:36:14 +04:00
/ *
* PowerPC v e r s i o n
* Copyright ( C ) 1 9 9 5 - 1 9 9 6 G a r y T h o m a s ( g d t @linuxppc.org)
* Rewritten b y C o r t D o u g a n ( c o r t @fsmlabs.com) for PReP
* Copyright ( C ) 1 9 9 6 C o r t D o u g a n < c o r t @fsmlabs.com>
* Adapted f o r P o w e r M a c i n t o s h b y P a u l M a c k e r r a s .
* Low- l e v e l e x c e p t i o n h a n d l e r s a n d M M U s u p p o r t
* rewritten b y P a u l M a c k e r r a s .
* Copyright ( C ) 1 9 9 6 P a u l M a c k e r r a s .
* MPC8 x x m o d i f i c a t i o n s C o p y r i g h t ( C ) 1 9 9 7 D a n M a l e k ( d m a l e k @jlc.net).
*
* This f i l e c o n t a i n s t h e s y s t e m c a l l e n t r y c o d e , c o n t e x t s w i t c h
* code, a n d e x c e p t i o n / i n t e r r u p t r e t u r n c o d e f o r P o w e r P C .
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or
* modify i t u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e
* as p u b l i s h e d b y t h e F r e e S o f t w a r e F o u n d a t i o n ; either version
* 2 of t h e L i c e n s e , o r ( a t y o u r o p t i o n ) a n y l a t e r v e r s i o n .
*
* /
# include < l i n u x / c o n f i g . h >
# include < l i n u x / e r r n o . h >
# include < l i n u x / s y s . h >
# include < l i n u x / t h r e a d s . h >
# include < a s m / r e g . h >
# include < a s m / p a g e . h >
# include < a s m / m m u . h >
# include < a s m / c p u t a b l e . h >
# include < a s m / t h r e a d _ i n f o . h >
# include < a s m / p p c _ a s m . h >
# include < a s m / a s m - o f f s e t s . h >
# include < a s m / u n i s t d . h >
# undef S H O W _ S Y S C A L L S
# undef S H O W _ S Y S C A L L S _ T A S K
/ *
* MSR_ K E R N E L i s > 0 x10 0 0 0 o n 4 x x / B o o k - E s i n c e i t i n c l u d e M S R _ C E .
* /
# if M S R _ K E R N E L > = 0 x10 0 0 0
# define L O A D _ M S R _ K E R N E L ( r , x ) l i s r ,( x ) @h; ori r,r,(x)@l
# else
# define L O A D _ M S R _ K E R N E L ( r , x ) l i r ,( x )
# endif
# ifdef C O N F I G _ B O O K E
# include " h e a d _ b o o k e . h "
# define T R A N S F E R _ T O _ H A N D L E R _ E X C _ L E V E L ( e x c _ l e v e l ) \
mtspr e x c _ l e v e l ## _ S P R G , r 8 ; \
BOOKE_ L O A D _ E X C _ L E V E L _ S T A C K ( e x c _ l e v e l ) ; \
lwz r0 ,G P R 1 0 - I N T _ F R A M E _ S I Z E ( r8 ) ; \
stw r0 ,G P R 1 0 ( r11 ) ; \
lwz r0 ,G P R 1 1 - I N T _ F R A M E _ S I Z E ( r8 ) ; \
stw r0 ,G P R 1 1 ( r11 ) ; \
mfspr r8 ,e x c _ l e v e l ## _ S P R G
.globl mcheck_transfer_to_handler
mcheck_transfer_to_handler :
TRANSFER_ T O _ H A N D L E R _ E X C _ L E V E L ( M C H E C K )
b t r a n s f e r _ t o _ h a n d l e r _ f u l l
.globl debug_transfer_to_handler
debug_transfer_to_handler :
TRANSFER_ T O _ H A N D L E R _ E X C _ L E V E L ( D E B U G )
b t r a n s f e r _ t o _ h a n d l e r _ f u l l
.globl crit_transfer_to_handler
crit_transfer_to_handler :
TRANSFER_ T O _ H A N D L E R _ E X C _ L E V E L ( C R I T )
/* fall through */
# endif
# ifdef C O N F I G _ 4 0 x
.globl crit_transfer_to_handler
crit_transfer_to_handler :
lwz r0 ,c r i t _ r10 @l(0)
stw r0 ,G P R 1 0 ( r11 )
lwz r0 ,c r i t _ r11 @l(0)
stw r0 ,G P R 1 1 ( r11 )
/* fall through */
# endif
/ *
* This c o d e f i n i s h e s s a v i n g t h e r e g i s t e r s t o t h e e x c e p t i o n f r a m e
* and j u m p s t o t h e a p p r o p r i a t e h a n d l e r f o r t h e e x c e p t i o n , t u r n i n g
* on a d d r e s s t r a n s l a t i o n .
* Note t h a t w e r e l y o n t h e c a l l e r h a v i n g s e t c r0 . e q i f f t h e e x c e p t i o n
* occurred i n k e r n e l m o d e ( i . e . M S R : P R = 0 ) .
* /
.globl transfer_to_handler_full
transfer_to_handler_full :
SAVE_ N V G P R S ( r11 )
/* fall through */
.globl transfer_to_handler
transfer_to_handler :
stw r2 ,G P R 2 ( r11 )
stw r12 ,_ N I P ( r11 )
stw r9 ,_ M S R ( r11 )
andi. r2 ,r9 ,M S R _ P R
mfctr r12
mfspr r2 ,S P R N _ X E R
stw r12 ,_ C T R ( r11 )
stw r2 ,_ X E R ( r11 )
mfspr r12 ,S P R N _ S P R G 3
addi r2 ,r12 ,- T H R E A D
tovirt( r2 ,r2 ) / * s e t r2 t o c u r r e n t * /
beq 2 f / * i f f r o m u s e r , f i x u p T H R E A D . r e g s * /
addi r11 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
stw r11 ,P T _ R E G S ( r12 )
# if d e f i n e d ( C O N F I G _ 4 0 x ) | | d e f i n e d ( C O N F I G _ B O O K E )
/ * Check t o s e e i f t h e d b c r0 r e g i s t e r i s s e t u p t o d e b u g . U s e t h e
single- s t e p b i t t o d o t h i s . * /
lwz r12 ,T H R E A D _ D B C R 0 ( r12 )
andis. r12 ,r12 ,D B C R 0 _ I C @h
beq+ 3 f
/* From user and task is ptraced - load up global dbcr0 */
li r12 ,- 1 / * c l e a r a l l p e n d i n g d e b u g e v e n t s * /
mtspr S P R N _ D B S R ,r12
lis r11 ,g l o b a l _ d b c r0 @ha
tophys( r11 ,r11 )
addi r11 ,r11 ,g l o b a l _ d b c r0 @l
lwz r12 ,0 ( r11 )
mtspr S P R N _ D B C R 0 ,r12
lwz r12 ,4 ( r11 )
addi r12 ,r12 ,- 1
stw r12 ,4 ( r11 )
# endif
b 3 f
2 : / * if f r o m k e r n e l , c h e c k i n t e r r u p t e d D O Z E / N A P m o d e a n d
* check f o r s t a c k o v e r f l o w
* /
# ifdef C O N F I G _ 6 x x
mfspr r11 ,S P R N _ H I D 0
mtcr r11
BEGIN_ F T R _ S E C T I O N
bt- 8 ,p o w e r _ s a v e _ 6 x x _ r e s t o r e / * C h e c k D O Z E * /
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ C A N _ D O Z E )
BEGIN_ F T R _ S E C T I O N
bt- 9 ,p o w e r _ s a v e _ 6 x x _ r e s t o r e / * C h e c k N A P * /
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ C A N _ N A P )
# endif / * C O N F I G _ 6 x x * /
.globl transfer_to_handler_cont
transfer_to_handler_cont :
lwz r11 ,T H R E A D _ I N F O - T H R E A D ( r12 )
cmplw r1 ,r11 / * i f r1 < = c u r r e n t - > t h r e a d _ i n f o * /
ble- s t a c k _ o v f / * t h e n t h e k e r n e l s t a c k o v e r f l o w e d * /
3 :
mflr r9
lwz r11 ,0 ( r9 ) / * v i r t u a l a d d r e s s o f h a n d l e r * /
lwz r9 ,4 ( r9 ) / * w h e r e t o g o w h e n d o n e * /
FIX_ S R R 1 ( r10 ,r12 )
mtspr S P R N _ S R R 0 ,r11
mtspr S P R N _ S R R 1 ,r10
mtlr r9
SYNC
RFI / * j u m p t o h a n d l e r , e n a b l e M M U * /
/ *
* On k e r n e l s t a c k o v e r f l o w , l o a d u p a n i n i t i a l s t a c k p o i n t e r
* and c a l l S t a c k O v e r f l o w ( r e g s ) , w h i c h s h o u l d n o t r e t u r n .
* /
stack_ovf :
/* sometimes we use a statically-allocated stack, which is OK. */
lis r11 ,_ e n d @h
ori r11 ,r11 ,_ e n d @l
cmplw r1 ,r11
ble 3 b / * r1 < = & _ e n d i s O K * /
SAVE_ N V G P R S ( r11 )
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
lis r1 ,i n i t _ t h r e a d _ u n i o n @ha
addi r1 ,r1 ,i n i t _ t h r e a d _ u n i o n @l
addi r1 ,r1 ,T H R E A D _ S I Z E - S T A C K _ F R A M E _ O V E R H E A D
lis r9 ,S t a c k O v e r f l o w @ha
addi r9 ,r9 ,S t a c k O v e r f l o w @l
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L )
FIX_ S R R 1 ( r10 ,r12 )
mtspr S P R N _ S R R 0 ,r9
mtspr S P R N _ S R R 1 ,r10
SYNC
RFI
/ *
* Handle a s y s t e m c a l l .
* /
.stabs " arch/ p o w e r p c / k e r n e l / " ,N _ S O ,0 ,0 ,0 f
.stabs " entry_ 3 2 . S " ,N _ S O ,0 ,0 ,0 f
0 :
_ GLOBAL( D o S y s c a l l )
stw r0 ,T H R E A D + L A S T _ S Y S C A L L ( r2 )
stw r3 ,O R I G _ G P R 3 ( r1 )
li r12 ,0
stw r12 ,R E S U L T ( r1 )
lwz r11 ,_ C C R ( r1 ) / * C l e a r S O b i t i n C R * /
rlwinm r11 ,r11 ,0 ,4 ,2
stw r11 ,_ C C R ( r1 )
# ifdef S H O W _ S Y S C A L L S
bl d o _ s h o w _ s y s c a l l
# endif / * S H O W _ S Y S C A L L S * /
[PATCH] powerpc: Merge thread_info.h
Merge ppc32 and ppc64 versions of thread_info.h. They were pretty
similar already, the chief changes are:
- Instead of inline asm to implement current_thread_info(),
which needs to be different for ppc32 and ppc64, we use C with an
asm("r1") register variable. gcc turns it into the same asm as we
used to have for both platforms.
- We replace ppc32's 'local_flags' with the ppc64
'syscall_noerror' field. The noerror flag was in fact the only thing
in the local_flags field anyway, so the ppc64 approach is simpler, and
means we only need a load-immediate/store instead of load/mask/store
when clearing the flag.
- In readiness for 64k pages, when THREAD_SIZE will be less
than a page, ppc64 used kmalloc() rather than get_free_pages() to
allocate the kernel stack. With this patch we do the same for ppc32,
since there's no strong reason not to.
- For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE
via asm-offsets, thread_info.h can now be safely included in asm, as
on ppc32.
Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and
Power5 (ARCH=ppc64 and ARCH=powerpc).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-10-21 09:45:50 +04:00
rlwinm r10 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T ) / * c u r r e n t _ t h r e a d _ i n f o ( ) * /
li r11 ,0
stb r11 ,T I _ S C _ N O E R R ( r10 )
2005-10-10 16:36:14 +04:00
lwz r11 ,T I _ F L A G S ( r10 )
andi. r11 ,r11 ,_ T I F _ S Y S C A L L _ T _ O R _ A
bne- s y s c a l l _ d o t r a c e
syscall_dotrace_cont :
cmplwi 0 ,r0 ,N R _ s y s c a l l s
lis r10 ,s y s _ c a l l _ t a b l e @h
ori r10 ,r10 ,s y s _ c a l l _ t a b l e @l
slwi r0 ,r0 ,2
bge- 6 6 f
lwzx r10 ,r10 ,r0 / * F e t c h s y s t e m c a l l h a n d l e r [ p t r ] * /
mtlr r10
addi r9 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
PPC4 4 0 E P _ E R R 4 2
blrl / * C a l l h a n d l e r * /
.globl ret_from_syscall
ret_from_syscall :
# ifdef S H O W _ S Y S C A L L S
bl d o _ s h o w _ s y s c a l l _ e x i t
# endif
mr r6 ,r3
li r11 ,- _ L A S T _ E R R N O
cmplw 0 ,r3 ,r11
[PATCH] powerpc: Merge thread_info.h
Merge ppc32 and ppc64 versions of thread_info.h. They were pretty
similar already, the chief changes are:
- Instead of inline asm to implement current_thread_info(),
which needs to be different for ppc32 and ppc64, we use C with an
asm("r1") register variable. gcc turns it into the same asm as we
used to have for both platforms.
- We replace ppc32's 'local_flags' with the ppc64
'syscall_noerror' field. The noerror flag was in fact the only thing
in the local_flags field anyway, so the ppc64 approach is simpler, and
means we only need a load-immediate/store instead of load/mask/store
when clearing the flag.
- In readiness for 64k pages, when THREAD_SIZE will be less
than a page, ppc64 used kmalloc() rather than get_free_pages() to
allocate the kernel stack. With this patch we do the same for ppc32,
since there's no strong reason not to.
- For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE
via asm-offsets, thread_info.h can now be safely included in asm, as
on ppc32.
Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and
Power5 (ARCH=ppc64 and ARCH=powerpc).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-10-21 09:45:50 +04:00
rlwinm r12 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T ) / * c u r r e n t _ t h r e a d _ i n f o ( ) * /
2005-10-10 16:36:14 +04:00
blt+ 3 0 f
[PATCH] powerpc: Merge thread_info.h
Merge ppc32 and ppc64 versions of thread_info.h. They were pretty
similar already, the chief changes are:
- Instead of inline asm to implement current_thread_info(),
which needs to be different for ppc32 and ppc64, we use C with an
asm("r1") register variable. gcc turns it into the same asm as we
used to have for both platforms.
- We replace ppc32's 'local_flags' with the ppc64
'syscall_noerror' field. The noerror flag was in fact the only thing
in the local_flags field anyway, so the ppc64 approach is simpler, and
means we only need a load-immediate/store instead of load/mask/store
when clearing the flag.
- In readiness for 64k pages, when THREAD_SIZE will be less
than a page, ppc64 used kmalloc() rather than get_free_pages() to
allocate the kernel stack. With this patch we do the same for ppc32,
since there's no strong reason not to.
- For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE
via asm-offsets, thread_info.h can now be safely included in asm, as
on ppc32.
Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and
Power5 (ARCH=ppc64 and ARCH=powerpc).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-10-21 09:45:50 +04:00
lbz r11 ,T I _ S C _ N O E R R ( r12 )
cmpwi r11 ,0
2005-10-10 16:36:14 +04:00
bne 3 0 f
neg r3 ,r3
lwz r10 ,_ C C R ( r1 ) / * S e t S O b i t i n C R * /
oris r10 ,r10 ,0 x10 0 0
stw r10 ,_ C C R ( r1 )
/* disable interrupts so current_thread_info()->flags can't change */
30 : LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L ) / * d o e s n ' t i n c l u d e M S R _ E E * /
SYNC
MTMSRD( r10 )
lwz r9 ,T I _ F L A G S ( r12 )
andi. r0 ,r9 ,( _ T I F _ S Y S C A L L _ T _ O R _ A | _ T I F _ S I G P E N D I N G | _ T I F _ N E E D _ R E S C H E D )
bne- s y s c a l l _ e x i t _ w o r k
syscall_exit_cont :
# if d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E )
/ * If t h e p r o c e s s h a s i t s o w n D B C R 0 v a l u e , l o a d i t u p . T h e s i n g l e
step b i t t e l l s u s t h a t d b c r0 s h o u l d b e l o a d e d . * /
lwz r0 ,T H R E A D + T H R E A D _ D B C R 0 ( r2 )
andis. r10 ,r0 ,D B C R 0 _ I C @h
bnel- l o a d _ d b c r0
# endif
stwcx. r0 ,0 ,r1 / * t o c l e a r t h e r e s e r v a t i o n * /
lwz r4 ,_ L I N K ( r1 )
lwz r5 ,_ C C R ( r1 )
mtlr r4
mtcr r5
lwz r7 ,_ N I P ( r1 )
lwz r8 ,_ M S R ( r1 )
FIX_ S R R 1 ( r8 , r0 )
lwz r2 ,G P R 2 ( r1 )
lwz r1 ,G P R 1 ( r1 )
mtspr S P R N _ S R R 0 ,r7
mtspr S P R N _ S R R 1 ,r8
SYNC
RFI
66 : li r3 ,- E N O S Y S
b r e t _ f r o m _ s y s c a l l
.globl ret_from_fork
ret_from_fork :
REST_ N V G P R S ( r1 )
bl s c h e d u l e _ t a i l
li r3 ,0
b r e t _ f r o m _ s y s c a l l
/* Traced system call support */
syscall_dotrace :
SAVE_ N V G P R S ( r1 )
li r0 ,0 x c00
stw r0 ,T R A P ( r1 )
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl d o _ s y s c a l l _ t r a c e _ e n t e r
lwz r0 ,G P R 0 ( r1 ) / * R e s t o r e o r i g i n a l r e g i s t e r s * /
lwz r3 ,G P R 3 ( r1 )
lwz r4 ,G P R 4 ( r1 )
lwz r5 ,G P R 5 ( r1 )
lwz r6 ,G P R 6 ( r1 )
lwz r7 ,G P R 7 ( r1 )
lwz r8 ,G P R 8 ( r1 )
REST_ N V G P R S ( r1 )
b s y s c a l l _ d o t r a c e _ c o n t
syscall_exit_work :
stw r6 ,R E S U L T ( r1 ) / * S a v e r e s u l t * /
stw r3 ,G P R 3 ( r1 ) / * U p d a t e r e t u r n v a l u e * /
andi. r0 ,r9 ,_ T I F _ S Y S C A L L _ T _ O R _ A
beq 5 f
ori r10 ,r10 ,M S R _ E E
SYNC
MTMSRD( r10 ) / * r e - e n a b l e i n t e r r u p t s * /
lwz r4 ,T R A P ( r1 )
andi. r4 ,r4 ,1
beq 4 f
SAVE_ N V G P R S ( r1 )
li r4 ,0 x c00
stw r4 ,T R A P ( r1 )
4 :
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl d o _ s y s c a l l _ t r a c e _ l e a v e
REST_ N V G P R S ( r1 )
2 :
lwz r3 ,G P R 3 ( r1 )
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L ) / * d o e s n ' t i n c l u d e M S R _ E E * /
SYNC
MTMSRD( r10 ) / * d i s a b l e i n t e r r u p t s a g a i n * /
[PATCH] powerpc: Merge thread_info.h
Merge ppc32 and ppc64 versions of thread_info.h. They were pretty
similar already, the chief changes are:
- Instead of inline asm to implement current_thread_info(),
which needs to be different for ppc32 and ppc64, we use C with an
asm("r1") register variable. gcc turns it into the same asm as we
used to have for both platforms.
- We replace ppc32's 'local_flags' with the ppc64
'syscall_noerror' field. The noerror flag was in fact the only thing
in the local_flags field anyway, so the ppc64 approach is simpler, and
means we only need a load-immediate/store instead of load/mask/store
when clearing the flag.
- In readiness for 64k pages, when THREAD_SIZE will be less
than a page, ppc64 used kmalloc() rather than get_free_pages() to
allocate the kernel stack. With this patch we do the same for ppc32,
since there's no strong reason not to.
- For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE
via asm-offsets, thread_info.h can now be safely included in asm, as
on ppc32.
Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and
Power5 (ARCH=ppc64 and ARCH=powerpc).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-10-21 09:45:50 +04:00
rlwinm r12 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T ) / * c u r r e n t _ t h r e a d _ i n f o ( ) * /
2005-10-10 16:36:14 +04:00
lwz r9 ,T I _ F L A G S ( r12 )
5 :
andi. r0 ,r9 ,_ T I F _ N E E D _ R E S C H E D
bne 1 f
lwz r5 ,_ M S R ( r1 )
andi. r5 ,r5 ,M S R _ P R
beq s y s c a l l _ e x i t _ c o n t
andi. r0 ,r9 ,_ T I F _ S I G P E N D I N G
beq s y s c a l l _ e x i t _ c o n t
b d o _ u s e r _ s i g n a l
1 :
ori r10 ,r10 ,M S R _ E E
SYNC
MTMSRD( r10 ) / * r e - e n a b l e i n t e r r u p t s * /
bl s c h e d u l e
b 2 b
# ifdef S H O W _ S Y S C A L L S
do_show_syscall :
# ifdef S H O W _ S Y S C A L L S _ T A S K
lis r11 ,s h o w _ s y s c a l l s _ t a s k @ha
lwz r11 ,s h o w _ s y s c a l l s _ t a s k @l(r11)
cmp 0 ,r2 ,r11
bnelr
# endif
stw r31 ,G P R 3 1 ( r1 )
mflr r31
lis r3 ,7 f @ha
addi r3 ,r3 ,7 f @l
lwz r4 ,G P R 0 ( r1 )
lwz r5 ,G P R 3 ( r1 )
lwz r6 ,G P R 4 ( r1 )
lwz r7 ,G P R 5 ( r1 )
lwz r8 ,G P R 6 ( r1 )
lwz r9 ,G P R 7 ( r1 )
bl p r i n t k
lis r3 ,7 7 f @ha
addi r3 ,r3 ,7 7 f @l
lwz r4 ,G P R 8 ( r1 )
mr r5 ,r2
bl p r i n t k
lwz r0 ,G P R 0 ( r1 )
lwz r3 ,G P R 3 ( r1 )
lwz r4 ,G P R 4 ( r1 )
lwz r5 ,G P R 5 ( r1 )
lwz r6 ,G P R 6 ( r1 )
lwz r7 ,G P R 7 ( r1 )
lwz r8 ,G P R 8 ( r1 )
mtlr r31
lwz r31 ,G P R 3 1 ( r1 )
blr
do_show_syscall_exit :
# ifdef S H O W _ S Y S C A L L S _ T A S K
lis r11 ,s h o w _ s y s c a l l s _ t a s k @ha
lwz r11 ,s h o w _ s y s c a l l s _ t a s k @l(r11)
cmp 0 ,r2 ,r11
bnelr
# endif
stw r31 ,G P R 3 1 ( r1 )
mflr r31
stw r3 ,R E S U L T ( r1 ) / * S a v e r e s u l t * /
mr r4 ,r3
lis r3 ,7 9 f @ha
addi r3 ,r3 ,7 9 f @l
bl p r i n t k
lwz r3 ,R E S U L T ( r1 )
mtlr r31
lwz r31 ,G P R 3 1 ( r1 )
blr
7 : .string " syscall %d(%x, %x, %x, %x, %x, "
77 : .string " %x), current=%p\n "
79 : .string " -> %x\n "
.align 2 , 0
# ifdef S H O W _ S Y S C A L L S _ T A S K
.data
.globl show_syscalls_task
show_syscalls_task :
.long - 1
.text
# endif
# endif / * S H O W _ S Y S C A L L S * /
/ *
* The s i g s u s p e n d a n d r t _ s i g s u s p e n d s y s t e m c a l l s c a n c a l l d o _ s i g n a l
* and t h u s p u t t h e p r o c e s s i n t o t h e s t o p p e d s t a t e w h e r e w e m i g h t
* want t o e x a m i n e i t s u s e r s t a t e w i t h p t r a c e . T h e r e f o r e w e n e e d
* to s a v e a l l t h e n o n v o l a t i l e r e g i s t e r s ( r13 - r31 ) b e f o r e c a l l i n g
* the C c o d e .
* /
.globl ppc_sigsuspend
ppc_sigsuspend :
SAVE_ N V G P R S ( r1 )
lwz r0 ,T R A P ( r1 )
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
stw r0 ,T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
b s y s _ s i g s u s p e n d
.globl ppc_rt_sigsuspend
ppc_rt_sigsuspend :
SAVE_ N V G P R S ( r1 )
lwz r0 ,T R A P ( r1 )
rlwinm r0 ,r0 ,0 ,0 ,3 0
stw r0 ,T R A P ( r1 )
b s y s _ r t _ s i g s u s p e n d
.globl ppc_fork
ppc_fork :
SAVE_ N V G P R S ( r1 )
lwz r0 ,T R A P ( r1 )
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
stw r0 ,T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
b s y s _ f o r k
.globl ppc_vfork
ppc_vfork :
SAVE_ N V G P R S ( r1 )
lwz r0 ,T R A P ( r1 )
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
stw r0 ,T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
b s y s _ v f o r k
.globl ppc_clone
ppc_clone :
SAVE_ N V G P R S ( r1 )
lwz r0 ,T R A P ( r1 )
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
stw r0 ,T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
b s y s _ c l o n e
.globl ppc_swapcontext
ppc_swapcontext :
SAVE_ N V G P R S ( r1 )
lwz r0 ,T R A P ( r1 )
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
stw r0 ,T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
b s y s _ s w a p c o n t e x t
/ *
* Top- l e v e l p a g e f a u l t h a n d l i n g .
* This i s i n a s s e m b l e r b e c a u s e i f d o _ p a g e _ f a u l t t e l l s u s t h a t
* it i s a b a d k e r n e l p a g e f a u l t , w e w a n t t o s a v e t h e n o n - v o l a t i l e
* registers b e f o r e c a l l i n g b a d _ p a g e _ f a u l t .
* /
.globl handle_page_fault
handle_page_fault :
stw r4 ,_ D A R ( r1 )
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl d o _ p a g e _ f a u l t
cmpwi r3 ,0
beq+ r e t _ f r o m _ e x c e p t
SAVE_ N V G P R S ( r1 )
lwz r0 ,T R A P ( r1 )
clrrwi r0 ,r0 ,1
stw r0 ,T R A P ( r1 )
mr r5 ,r3
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
lwz r4 ,_ D A R ( r1 )
bl b a d _ p a g e _ f a u l t
b r e t _ f r o m _ e x c e p t _ f u l l
/ *
* This r o u t i n e s w i t c h e s b e t w e e n t w o d i f f e r e n t t a s k s . T h e p r o c e s s
* state o f o n e i s s a v e d o n i t s k e r n e l s t a c k . T h e n t h e s t a t e
* of t h e o t h e r i s r e s t o r e d f r o m i t s k e r n e l s t a c k . T h e m e m o r y
* management h a r d w a r e i s u p d a t e d t o t h e s e c o n d p r o c e s s ' s s t a t e .
* Finally, w e c a n r e t u r n t o t h e s e c o n d p r o c e s s .
* On e n t r y , r3 p o i n t s t o t h e T H R E A D f o r t h e c u r r e n t t a s k , r4
* points t o t h e T H R E A D f o r t h e n e w t a s k .
*
* This r o u t i n e i s a l w a y s c a l l e d w i t h i n t e r r u p t s d i s a b l e d .
*
* Note : there a r e t w o w a y s t o g e t t o t h e " g o i n g o u t " p o r t i o n
* of t h i s c o d e ; either by coming in via the entry (_switch)
* or v i a " f o r k " w h i c h m u s t s e t u p a n e n v i r o n m e n t e q u i v a l e n t
* to t h e " _ s w i t c h " p a t h . I f y o u c h a n g e t h i s , y o u ' l l h a v e t o
* change t h e f o r k c o d e a l s o .
*
* The c o d e w h i c h c r e a t e s t h e n e w t a s k c o n t e x t i s i n ' c o p y _ t h r e a d '
* in a r c h / p p c / k e r n e l / p r o c e s s . c
* /
_ GLOBAL( _ s w i t c h )
stwu r1 ,- I N T _ F R A M E _ S I Z E ( r1 )
mflr r0
stw r0 ,I N T _ F R A M E _ S I Z E + 4 ( r1 )
/* r3-r12 are caller saved -- Cort */
SAVE_ N V G P R S ( r1 )
stw r0 ,_ N I P ( r1 ) / * R e t u r n t o s w i t c h c a l l e r * /
mfmsr r11
li r0 ,M S R _ F P / * D i s a b l e f l o a t i n g - p o i n t * /
# ifdef C O N F I G _ A L T I V E C
BEGIN_ F T R _ S E C T I O N
oris r0 ,r0 ,M S R _ V E C @h /* Disable altivec */
mfspr r12 ,S P R N _ V R S A V E / * s a v e v r s a v e r e g i s t e r v a l u e * /
stw r12 ,T H R E A D + T H R E A D _ V R S A V E ( r2 )
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ A L T I V E C )
# endif / * C O N F I G _ A L T I V E C * /
# ifdef C O N F I G _ S P E
oris r0 ,r0 ,M S R _ S P E @h /* Disable SPE */
mfspr r12 ,S P R N _ S P E F S C R / * s a v e s p e f s c r r e g i s t e r v a l u e * /
stw r12 ,T H R E A D + T H R E A D _ S P E F S C R ( r2 )
# endif / * C O N F I G _ S P E * /
and. r0 ,r0 ,r11 / * F P o r a l t i v e c o r S P E e n a b l e d ? * /
beq+ 1 f
andc r11 ,r11 ,r0
MTMSRD( r11 )
isync
1 : stw r11 ,_ M S R ( r1 )
mfcr r10
stw r10 ,_ C C R ( r1 )
stw r1 ,K S P ( r3 ) / * S e t o l d s t a c k p o i n t e r * /
# ifdef C O N F I G _ S M P
/ * We n e e d a s y n c s o m e w h e r e h e r e t o m a k e s u r e t h a t i f t h e
* previous t a s k g e t s r e s c h e d u l e d o n a n o t h e r C P U , i t s e e s a l l
* stores i t h a s p e r f o r m e d o n t h i s o n e .
* /
sync
# endif / * C O N F I G _ S M P * /
tophys( r0 ,r4 )
CLR_ T O P 3 2 ( r0 )
mtspr S P R N _ S P R G 3 ,r0 / * U p d a t e c u r r e n t T H R E A D p h y s a d d r * /
lwz r1 ,K S P ( r4 ) / * L o a d n e w s t a c k p o i n t e r * /
/* save the old current 'last' for return value */
mr r3 ,r2
addi r2 ,r4 ,- T H R E A D / * U p d a t e c u r r e n t * /
# ifdef C O N F I G _ A L T I V E C
BEGIN_ F T R _ S E C T I O N
lwz r0 ,T H R E A D + T H R E A D _ V R S A V E ( r2 )
mtspr S P R N _ V R S A V E ,r0 / * i f G 4 , r e s t o r e V R S A V E r e g * /
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ A L T I V E C )
# endif / * C O N F I G _ A L T I V E C * /
# ifdef C O N F I G _ S P E
lwz r0 ,T H R E A D + T H R E A D _ S P E F S C R ( r2 )
mtspr S P R N _ S P E F S C R ,r0 / * r e s t o r e S P E F S C R r e g * /
# endif / * C O N F I G _ S P E * /
lwz r0 ,_ C C R ( r1 )
mtcrf 0 x F F ,r0
/* r3-r12 are destroyed -- Cort */
REST_ N V G P R S ( r1 )
lwz r4 ,_ N I P ( r1 ) / * R e t u r n t o _ s w i t c h c a l l e r i n n e w t a s k * /
mtlr r4
addi r1 ,r1 ,I N T _ F R A M E _ S I Z E
blr
.globl fast_exception_return
fast_exception_return :
# if ! ( d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E ) )
andi. r10 ,r9 ,M S R _ R I / * c h e c k f o r r e c o v e r a b l e i n t e r r u p t * /
beq 1 f / * i f n o t , w e ' v e g o t p r o b l e m s * /
# endif
2 : REST_ 4 G P R S ( 3 , r11 )
lwz r10 ,_ C C R ( r11 )
REST_ G P R ( 1 , r11 )
mtcr r10
lwz r10 ,_ L I N K ( r11 )
mtlr r10
REST_ G P R ( 1 0 , r11 )
mtspr S P R N _ S R R 1 ,r9
mtspr S P R N _ S R R 0 ,r12
REST_ G P R ( 9 , r11 )
REST_ G P R ( 1 2 , r11 )
lwz r11 ,G P R 1 1 ( r11 )
SYNC
RFI
# if ! ( d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E ) )
/* check if the exception happened in a restartable section */
1 : lis r3 ,e x c _ e x i t _ r e s t a r t _ e n d @ha
addi r3 ,r3 ,e x c _ e x i t _ r e s t a r t _ e n d @l
cmplw r12 ,r3
bge 3 f
lis r4 ,e x c _ e x i t _ r e s t a r t @ha
addi r4 ,r4 ,e x c _ e x i t _ r e s t a r t @l
cmplw r12 ,r4
blt 3 f
lis r3 ,f e e _ r e s t a r t s @ha
tophys( r3 ,r3 )
lwz r5 ,f e e _ r e s t a r t s @l(r3)
addi r5 ,r5 ,1
stw r5 ,f e e _ r e s t a r t s @l(r3)
mr r12 ,r4 / * r e s t a r t a t e x c _ e x i t _ r e s t a r t * /
b 2 b
.comm fee_ r e s t a r t s ,4
/* aargh, a nonrecoverable interrupt, panic */
/* aargh, we don't know which trap this is */
/* but the 601 doesn't implement the RI bit, so assume it's OK */
3 :
BEGIN_ F T R _ S E C T I O N
b 2 b
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ 6 0 1 )
li r10 ,- 1
stw r10 ,T R A P ( r11 )
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
lis r10 ,M S R _ K E R N E L @h
ori r10 ,r10 ,M S R _ K E R N E L @l
bl t r a n s f e r _ t o _ h a n d l e r _ f u l l
.long nonrecoverable_exception
.long ret_from_except
# endif
.globl sigreturn_exit
sigreturn_exit :
subi r1 ,r3 ,S T A C K _ F R A M E _ O V E R H E A D
[PATCH] powerpc: Merge thread_info.h
Merge ppc32 and ppc64 versions of thread_info.h. They were pretty
similar already, the chief changes are:
- Instead of inline asm to implement current_thread_info(),
which needs to be different for ppc32 and ppc64, we use C with an
asm("r1") register variable. gcc turns it into the same asm as we
used to have for both platforms.
- We replace ppc32's 'local_flags' with the ppc64
'syscall_noerror' field. The noerror flag was in fact the only thing
in the local_flags field anyway, so the ppc64 approach is simpler, and
means we only need a load-immediate/store instead of load/mask/store
when clearing the flag.
- In readiness for 64k pages, when THREAD_SIZE will be less
than a page, ppc64 used kmalloc() rather than get_free_pages() to
allocate the kernel stack. With this patch we do the same for ppc32,
since there's no strong reason not to.
- For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE
via asm-offsets, thread_info.h can now be safely included in asm, as
on ppc32.
Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and
Power5 (ARCH=ppc64 and ARCH=powerpc).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-10-21 09:45:50 +04:00
rlwinm r12 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T ) / * c u r r e n t _ t h r e a d _ i n f o ( ) * /
2005-10-10 16:36:14 +04:00
lwz r9 ,T I _ F L A G S ( r12 )
andi. r0 ,r9 ,_ T I F _ S Y S C A L L _ T _ O R _ A
2005-10-20 03:24:18 +04:00
beq+ r e t _ f r o m _ e x c e p t _ f u l l
bl d o _ s y s c a l l _ t r a c e _ l e a v e
2005-10-10 16:36:14 +04:00
/* fall through */
.globl ret_from_except_full
ret_from_except_full :
REST_ N V G P R S ( r1 )
/* fall through */
.globl ret_from_except
ret_from_except :
/ * Hard- d i s a b l e i n t e r r u p t s s o t h a t c u r r e n t _ t h r e a d _ i n f o ( ) - > f l a g s
* can' t c h a n g e b e t w e e n w h e n w e t e s t i t a n d w h e n w e r e t u r n
* from t h e i n t e r r u p t . * /
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L )
SYNC / * S o m e c h i p r e v s h a v e p r o b l e m s h e r e . . . * /
MTMSRD( r10 ) / * d i s a b l e i n t e r r u p t s * /
lwz r3 ,_ M S R ( r1 ) / * R e t u r n i n g t o u s e r m o d e ? * /
andi. r0 ,r3 ,M S R _ P R
beq r e s u m e _ k e r n e l
user_exc_return : /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
[PATCH] powerpc: Merge thread_info.h
Merge ppc32 and ppc64 versions of thread_info.h. They were pretty
similar already, the chief changes are:
- Instead of inline asm to implement current_thread_info(),
which needs to be different for ppc32 and ppc64, we use C with an
asm("r1") register variable. gcc turns it into the same asm as we
used to have for both platforms.
- We replace ppc32's 'local_flags' with the ppc64
'syscall_noerror' field. The noerror flag was in fact the only thing
in the local_flags field anyway, so the ppc64 approach is simpler, and
means we only need a load-immediate/store instead of load/mask/store
when clearing the flag.
- In readiness for 64k pages, when THREAD_SIZE will be less
than a page, ppc64 used kmalloc() rather than get_free_pages() to
allocate the kernel stack. With this patch we do the same for ppc32,
since there's no strong reason not to.
- For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE
via asm-offsets, thread_info.h can now be safely included in asm, as
on ppc32.
Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and
Power5 (ARCH=ppc64 and ARCH=powerpc).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-10-21 09:45:50 +04:00
rlwinm r9 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T )
2005-10-10 16:36:14 +04:00
lwz r9 ,T I _ F L A G S ( r9 )
andi. r0 ,r9 ,( _ T I F _ S I G P E N D I N G | _ T I F _ N E E D _ R E S C H E D )
bne d o _ w o r k
restore_user :
# if d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E )
/ * Check w h e t h e r t h i s p r o c e s s h a s i t s o w n D B C R 0 v a l u e . T h e s i n g l e
step b i t t e l l s u s t h a t d b c r0 s h o u l d b e l o a d e d . * /
lwz r0 ,T H R E A D + T H R E A D _ D B C R 0 ( r2 )
andis. r10 ,r0 ,D B C R 0 _ I C @h
bnel- l o a d _ d b c r0
# endif
# ifdef C O N F I G _ P R E E M P T
b r e s t o r e
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel :
/* check current_thread_info->preempt_count */
[PATCH] powerpc: Merge thread_info.h
Merge ppc32 and ppc64 versions of thread_info.h. They were pretty
similar already, the chief changes are:
- Instead of inline asm to implement current_thread_info(),
which needs to be different for ppc32 and ppc64, we use C with an
asm("r1") register variable. gcc turns it into the same asm as we
used to have for both platforms.
- We replace ppc32's 'local_flags' with the ppc64
'syscall_noerror' field. The noerror flag was in fact the only thing
in the local_flags field anyway, so the ppc64 approach is simpler, and
means we only need a load-immediate/store instead of load/mask/store
when clearing the flag.
- In readiness for 64k pages, when THREAD_SIZE will be less
than a page, ppc64 used kmalloc() rather than get_free_pages() to
allocate the kernel stack. With this patch we do the same for ppc32,
since there's no strong reason not to.
- For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE
via asm-offsets, thread_info.h can now be safely included in asm, as
on ppc32.
Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and
Power5 (ARCH=ppc64 and ARCH=powerpc).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-10-21 09:45:50 +04:00
rlwinm r9 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T )
2005-10-10 16:36:14 +04:00
lwz r0 ,T I _ P R E E M P T ( r9 )
cmpwi 0 ,r0 ,0 / * i f n o n - z e r o , j u s t r e s t o r e r e g s a n d r e t u r n * /
bne r e s t o r e
lwz r0 ,T I _ F L A G S ( r9 )
andi. r0 ,r0 ,_ T I F _ N E E D _ R E S C H E D
beq+ r e s t o r e
andi. r0 ,r3 ,M S R _ E E / * i n t e r r u p t s o f f ? * /
beq r e s t o r e / * d o n ' t s c h e d u l e i f s o * /
1 : bl p r e e m p t _ s c h e d u l e _ i r q
[PATCH] powerpc: Merge thread_info.h
Merge ppc32 and ppc64 versions of thread_info.h. They were pretty
similar already, the chief changes are:
- Instead of inline asm to implement current_thread_info(),
which needs to be different for ppc32 and ppc64, we use C with an
asm("r1") register variable. gcc turns it into the same asm as we
used to have for both platforms.
- We replace ppc32's 'local_flags' with the ppc64
'syscall_noerror' field. The noerror flag was in fact the only thing
in the local_flags field anyway, so the ppc64 approach is simpler, and
means we only need a load-immediate/store instead of load/mask/store
when clearing the flag.
- In readiness for 64k pages, when THREAD_SIZE will be less
than a page, ppc64 used kmalloc() rather than get_free_pages() to
allocate the kernel stack. With this patch we do the same for ppc32,
since there's no strong reason not to.
- For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE
via asm-offsets, thread_info.h can now be safely included in asm, as
on ppc32.
Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and
Power5 (ARCH=ppc64 and ARCH=powerpc).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-10-21 09:45:50 +04:00
rlwinm r9 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T )
2005-10-10 16:36:14 +04:00
lwz r3 ,T I _ F L A G S ( r9 )
andi. r0 ,r3 ,_ T I F _ N E E D _ R E S C H E D
bne- 1 b
# else
resume_kernel :
# endif / * C O N F I G _ P R E E M P T * /
/* interrupts are hard-disabled at this point */
restore :
lwz r0 ,G P R 0 ( r1 )
lwz r2 ,G P R 2 ( r1 )
REST_ 4 G P R S ( 3 , r1 )
REST_ 2 G P R S ( 7 , r1 )
lwz r10 ,_ X E R ( r1 )
lwz r11 ,_ C T R ( r1 )
mtspr S P R N _ X E R ,r10
mtctr r11
PPC4 0 5 _ E R R 7 7 ( 0 ,r1 )
stwcx. r0 ,0 ,r1 / * t o c l e a r t h e r e s e r v a t i o n * /
# if ! ( d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E ) )
lwz r9 ,_ M S R ( r1 )
andi. r10 ,r9 ,M S R _ R I / * c h e c k i f t h i s e x c e p t i o n o c c u r r e d * /
beql n o n r e c o v e r a b l e / * a t a b a d p l a c e ( M S R : R I = 0 ) * /
lwz r10 ,_ C C R ( r1 )
lwz r11 ,_ L I N K ( r1 )
mtcrf 0 x F F ,r10
mtlr r11
/ *
* Once w e p u t v a l u e s i n S R R 0 a n d S R R 1 , w e a r e i n a s t a t e
* where e x c e p t i o n s a r e n o t r e c o v e r a b l e , s i n c e t a k i n g a n
* exception w i l l t r a s h S R R 0 a n d S R R 1 . T h e r e f o r e w e c l e a r t h e
* MSR : RI b i t t o i n d i c a t e t h i s . I f w e d o t a k e a n e x c e p t i o n ,
* we c a n ' t r e t u r n t o t h e p o i n t o f t h e e x c e p t i o n b u t w e
* can r e s t a r t t h e e x c e p t i o n e x i t p a t h a t t h e l a b e l
* exc_ e x i t _ r e s t a r t b e l o w . - - p a u l u s
* /
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L & ~ M S R _ R I )
SYNC
MTMSRD( r10 ) / * c l e a r t h e R I b i t * /
.globl exc_exit_restart
exc_exit_restart :
lwz r9 ,_ M S R ( r1 )
lwz r12 ,_ N I P ( r1 )
FIX_ S R R 1 ( r9 ,r10 )
mtspr S P R N _ S R R 0 ,r12
mtspr S P R N _ S R R 1 ,r9
REST_ 4 G P R S ( 9 , r1 )
lwz r1 ,G P R 1 ( r1 )
.globl exc_exit_restart_end
exc_exit_restart_end :
SYNC
RFI
# else / * ! ( C O N F I G _ 4 x x | | C O N F I G _ B O O K E ) * /
/ *
* This i s a b i t d i f f e r e n t o n 4 x x / B o o k - E b e c a u s e i t d o e s n ' t h a v e
* the R I b i t i n t h e M S R .
* The T L B m i s s h a n d l e r c h e c k s i f w e h a v e i n t e r r u p t e d
* the e x c e p t i o n e x i t p a t h a n d r e s t a r t s i t i f s o
* ( well m a y b e o n e d a y i t w i l l . . . : ) .
* /
lwz r11 ,_ L I N K ( r1 )
mtlr r11
lwz r10 ,_ C C R ( r1 )
mtcrf 0 x f f ,r10
REST_ 2 G P R S ( 9 , r1 )
.globl exc_exit_restart
exc_exit_restart :
lwz r11 ,_ N I P ( r1 )
lwz r12 ,_ M S R ( r1 )
exc_exit_start :
mtspr S P R N _ S R R 0 ,r11
mtspr S P R N _ S R R 1 ,r12
REST_ 2 G P R S ( 1 1 , r1 )
lwz r1 ,G P R 1 ( r1 )
.globl exc_exit_restart_end
exc_exit_restart_end :
PPC4 0 5 _ E R R 7 7 _ S Y N C
rfi
b . / * p r e v e n t p r e f e t c h p a s t r f i * /
/ *
* Returning f r o m a c r i t i c a l i n t e r r u p t i n u s e r m o d e d o e s n ' t n e e d
* to b e a n y d i f f e r e n t f r o m a n o r m a l e x c e p t i o n . F o r a c r i t i c a l
* interrupt i n t h e k e r n e l , w e j u s t r e t u r n ( w i t h o u t c h e c k i n g f o r
* preemption) s i n c e t h e i n t e r r u p t m a y h a v e h a p p e n e d a t s o m e c r u c i a l
* place ( e . g . i n s i d e t h e T L B m i s s h a n d l e r ) , a n d b e c a u s e w e w i l l b e
* running w i t h r1 p o i n t i n g i n t o c r i t i c a l _ s t a c k , n o t t h e c u r r e n t
* process' s k e r n e l s t a c k ( a n d t h e r e f o r e c u r r e n t _ t h r e a d _ i n f o ( ) w i l l
* give t h e w r o n g a n s w e r ) .
* We h a v e t o r e s t o r e v a r i o u s S P R s t h a t m a y h a v e b e e n i n u s e a t t h e
* time o f t h e c r i t i c a l i n t e r r u p t .
*
* /
# ifdef C O N F I G _ 4 0 x
# define P P C _ 4 0 x _ T U R N _ O F F _ M S R _ D R \
/ * avoid a n y p o s s i b l e T L B m i s s e s h e r e b y t u r n i n g o f f M S R . D R , w e \
* assume t h e i n s t r u c t i o n s h e r e a r e m a p p e d b y a p i n n e d T L B e n t r y * / \
li r10 ,M S R _ I R ; \
mtmsr r10 ; \
isync; \
tophys( r1 , r1 ) ;
# else
# define P P C _ 4 0 x _ T U R N _ O F F _ M S R _ D R
# endif
# define R E T _ F R O M _ E X C _ L E V E L ( e x c _ l v l _ s r r0 , e x c _ l v l _ s r r1 , e x c _ l v l _ r f i ) \
REST_ N V G P R S ( r1 ) ; \
lwz r3 ,_ M S R ( r1 ) ; \
andi. r3 ,r3 ,M S R _ P R ; \
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L ) ; \
bne u s e r _ e x c _ r e t u r n ; \
lwz r0 ,G P R 0 ( r1 ) ; \
lwz r2 ,G P R 2 ( r1 ) ; \
REST_ 4 G P R S ( 3 , r1 ) ; \
REST_ 2 G P R S ( 7 , r1 ) ; \
lwz r10 ,_ X E R ( r1 ) ; \
lwz r11 ,_ C T R ( r1 ) ; \
mtspr S P R N _ X E R ,r10 ; \
mtctr r11 ; \
PPC4 0 5 _ E R R 7 7 ( 0 ,r1 ) ; \
stwcx. r0 ,0 ,r1 ; /* to clear the reservation */ \
lwz r11 ,_ L I N K ( r1 ) ; \
mtlr r11 ; \
lwz r10 ,_ C C R ( r1 ) ; \
mtcrf 0 x f f ,r10 ; \
PPC_ 4 0 x _ T U R N _ O F F _ M S R _ D R ; \
lwz r9 ,_ D E A R ( r1 ) ; \
lwz r10 ,_ E S R ( r1 ) ; \
mtspr S P R N _ D E A R ,r9 ; \
mtspr S P R N _ E S R ,r10 ; \
lwz r11 ,_ N I P ( r1 ) ; \
lwz r12 ,_ M S R ( r1 ) ; \
mtspr e x c _ l v l _ s r r0 ,r11 ; \
mtspr e x c _ l v l _ s r r1 ,r12 ; \
lwz r9 ,G P R 9 ( r1 ) ; \
lwz r12 ,G P R 1 2 ( r1 ) ; \
lwz r10 ,G P R 1 0 ( r1 ) ; \
lwz r11 ,G P R 1 1 ( r1 ) ; \
lwz r1 ,G P R 1 ( r1 ) ; \
PPC4 0 5 _ E R R 7 7 _ S Y N C ; \
exc_ l v l _ r f i ; \
b . ; /* prevent prefetch past exc_lvl_rfi */
.globl ret_from_crit_exc
ret_from_crit_exc :
RET_ F R O M _ E X C _ L E V E L ( S P R N _ C S R R 0 , S P R N _ C S R R 1 , R F C I )
# ifdef C O N F I G _ B O O K E
.globl ret_from_debug_exc
ret_from_debug_exc :
RET_ F R O M _ E X C _ L E V E L ( S P R N _ D S R R 0 , S P R N _ D S R R 1 , R F D I )
.globl ret_from_mcheck_exc
ret_from_mcheck_exc :
RET_ F R O M _ E X C _ L E V E L ( S P R N _ M C S R R 0 , S P R N _ M C S R R 1 , R F M C I )
# endif / * C O N F I G _ B O O K E * /
/ *
* Load t h e D B C R 0 v a l u e f o r a t a s k t h a t i s b e i n g p t r a c e d ,
* having f i r s t s a v e d a w a y t h e g l o b a l D B C R 0 . N o t e t h a t r0
* has t h e d b c r0 v a l u e t o s e t u p o n e n t r y t o t h i s .
* /
load_dbcr0 :
mfmsr r10 / * f i r s t d i s a b l e d e b u g e x c e p t i o n s * /
rlwinm r10 ,r10 ,0 ,~ M S R _ D E
mtmsr r10
isync
mfspr r10 ,S P R N _ D B C R 0
lis r11 ,g l o b a l _ d b c r0 @ha
addi r11 ,r11 ,g l o b a l _ d b c r0 @l
stw r10 ,0 ( r11 )
mtspr S P R N _ D B C R 0 ,r0
lwz r10 ,4 ( r11 )
addi r10 ,r10 ,1
stw r10 ,4 ( r11 )
li r11 ,- 1
mtspr S P R N _ D B S R ,r11 / * c l e a r a l l p e n d i n g d e b u g e v e n t s * /
blr
.comm global_ d b c r0 ,8
# endif / * ! ( C O N F I G _ 4 x x | | C O N F I G _ B O O K E ) * /
do_work : /* r10 contains MSR_KERNEL here */
andi. r0 ,r9 ,_ T I F _ N E E D _ R E S C H E D
beq d o _ u s e r _ s i g n a l
do_resched : /* r10 contains MSR_KERNEL here */
ori r10 ,r10 ,M S R _ E E
SYNC
MTMSRD( r10 ) / * h a r d - e n a b l e i n t e r r u p t s * /
bl s c h e d u l e
recheck :
LOAD_ M S R _ K E R N E L ( r10 ,M S R _ K E R N E L )
SYNC
MTMSRD( r10 ) / * d i s a b l e i n t e r r u p t s * /
[PATCH] powerpc: Merge thread_info.h
Merge ppc32 and ppc64 versions of thread_info.h. They were pretty
similar already, the chief changes are:
- Instead of inline asm to implement current_thread_info(),
which needs to be different for ppc32 and ppc64, we use C with an
asm("r1") register variable. gcc turns it into the same asm as we
used to have for both platforms.
- We replace ppc32's 'local_flags' with the ppc64
'syscall_noerror' field. The noerror flag was in fact the only thing
in the local_flags field anyway, so the ppc64 approach is simpler, and
means we only need a load-immediate/store instead of load/mask/store
when clearing the flag.
- In readiness for 64k pages, when THREAD_SIZE will be less
than a page, ppc64 used kmalloc() rather than get_free_pages() to
allocate the kernel stack. With this patch we do the same for ppc32,
since there's no strong reason not to.
- For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE
via asm-offsets, thread_info.h can now be safely included in asm, as
on ppc32.
Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and
Power5 (ARCH=ppc64 and ARCH=powerpc).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-10-21 09:45:50 +04:00
rlwinm r9 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T )
2005-10-10 16:36:14 +04:00
lwz r9 ,T I _ F L A G S ( r9 )
andi. r0 ,r9 ,_ T I F _ N E E D _ R E S C H E D
bne- d o _ r e s c h e d
andi. r0 ,r9 ,_ T I F _ S I G P E N D I N G
beq r e s t o r e _ u s e r
do_user_signal : /* r10 contains MSR_KERNEL here */
ori r10 ,r10 ,M S R _ E E
SYNC
MTMSRD( r10 ) / * h a r d - e n a b l e i n t e r r u p t s * /
/* save r13-r31 in the exception frame, if not already done */
lwz r3 ,T R A P ( r1 )
andi. r0 ,r3 ,1
beq 2 f
SAVE_ N V G P R S ( r1 )
rlwinm r3 ,r3 ,0 ,0 ,3 0
stw r3 ,T R A P ( r1 )
2 : li r3 ,0
addi r4 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl d o _ s i g n a l
REST_ N V G P R S ( r1 )
b r e c h e c k
/ *
* We c o m e h e r e w h e n w e a r e a t t h e e n d o f h a n d l i n g a n e x c e p t i o n
* that o c c u r r e d a t a p l a c e w h e r e t a k i n g a n e x c e p t i o n w i l l l o s e
* state i n f o r m a t i o n , s u c h a s t h e c o n t e n t s o f S R R 0 a n d S R R 1 .
* /
nonrecoverable :
lis r10 ,e x c _ e x i t _ r e s t a r t _ e n d @ha
addi r10 ,r10 ,e x c _ e x i t _ r e s t a r t _ e n d @l
cmplw r12 ,r10
bge 3 f
lis r11 ,e x c _ e x i t _ r e s t a r t @ha
addi r11 ,r11 ,e x c _ e x i t _ r e s t a r t @l
cmplw r12 ,r11
blt 3 f
lis r10 ,e e _ r e s t a r t s @ha
lwz r12 ,e e _ r e s t a r t s @l(r10)
addi r12 ,r12 ,1
stw r12 ,e e _ r e s t a r t s @l(r10)
mr r12 ,r11 / * r e s t a r t a t e x c _ e x i t _ r e s t a r t * /
blr
3 : /* OK, we can't recover, kill this process */
/* but the 601 doesn't implement the RI bit, so assume it's OK */
BEGIN_ F T R _ S E C T I O N
blr
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ 6 0 1 )
lwz r3 ,T R A P ( r1 )
andi. r0 ,r3 ,1
beq 4 f
SAVE_ N V G P R S ( r1 )
rlwinm r3 ,r3 ,0 ,0 ,3 0
stw r3 ,T R A P ( r1 )
4 : addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl n o n r e c o v e r a b l e _ e x c e p t i o n
/* shouldn't return */
b 4 b
.comm ee_ r e s t a r t s ,4
/ *
* PROM c o d e f o r s p e c i f i c m a c h i n e s f o l l o w s . P u t i t
* here s o i t ' s e a s y t o a d d a r c h - s p e c i f i c s e c t i o n s l a t e r .
* - - Cort
* /
# ifdef C O N F I G _ P P C _ O F
/ *
* On C H R P , t h e R u n - T i m e A b s t r a c t i o n S e r v i c e s ( R T A S ) h a v e t o b e
* called w i t h t h e M M U o f f .
* /
_ GLOBAL( e n t e r _ r t a s )
stwu r1 ,- I N T _ F R A M E _ S I Z E ( r1 )
mflr r0
stw r0 ,I N T _ F R A M E _ S I Z E + 4 ( r1 )
lis r4 ,r t a s _ d a t a @ha
lwz r4 ,r t a s _ d a t a @l(r4)
lis r6 ,1 f @ha /* physical return address for rtas */
addi r6 ,r6 ,1 f @l
tophys( r6 ,r6 )
tophys( r7 ,r1 )
lis r8 ,r t a s _ e n t r y @ha
lwz r8 ,r t a s _ e n t r y @l(r8)
mfmsr r9
stw r9 ,8 ( r1 )
LOAD_ M S R _ K E R N E L ( r0 ,M S R _ K E R N E L )
SYNC / * d i s a b l e i n t e r r u p t s s o S R R 0 / 1 * /
MTMSRD( r0 ) / * d o n ' t g e t t r a s h e d * /
li r9 ,M S R _ K E R N E L & ~ ( M S R _ I R | M S R _ D R )
mtlr r6
CLR_ T O P 3 2 ( r7 )
mtspr S P R N _ S P R G 2 ,r7
mtspr S P R N _ S R R 0 ,r8
mtspr S P R N _ S R R 1 ,r9
RFI
1 : tophys( r9 ,r1 )
lwz r8 ,I N T _ F R A M E _ S I Z E + 4 ( r9 ) / * g e t r e t u r n a d d r e s s * /
lwz r9 ,8 ( r9 ) / * o r i g i n a l m s r v a l u e * /
FIX_ S R R 1 ( r9 ,r0 )
addi r1 ,r1 ,I N T _ F R A M E _ S I Z E
li r0 ,0
mtspr S P R N _ S P R G 2 ,r0
mtspr S P R N _ S R R 0 ,r8
mtspr S P R N _ S R R 1 ,r9
RFI / * r e t u r n t o c a l l e r * /
.globl machine_check_in_rtas
machine_check_in_rtas :
twi 3 1 ,0 ,0
/* XXX load up BATs and panic */
# endif / * C O N F I G _ P P C _ O F * /