2019-05-27 09:55:01 +03:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2005-10-10 16:36:14 +04:00
/ *
* PowerPC v e r s i o n
* Copyright ( C ) 1 9 9 5 - 1 9 9 6 G a r y T h o m a s ( g d t @linuxppc.org)
* Rewritten b y C o r t D o u g a n ( c o r t @fsmlabs.com) for PReP
* Copyright ( C ) 1 9 9 6 C o r t D o u g a n < c o r t @fsmlabs.com>
* Adapted f o r P o w e r M a c i n t o s h b y P a u l M a c k e r r a s .
* Low- l e v e l e x c e p t i o n h a n d l e r s a n d M M U s u p p o r t
* rewritten b y P a u l M a c k e r r a s .
* Copyright ( C ) 1 9 9 6 P a u l M a c k e r r a s .
* MPC8 x x m o d i f i c a t i o n s C o p y r i g h t ( C ) 1 9 9 7 D a n M a l e k ( d m a l e k @jlc.net).
*
* This f i l e c o n t a i n s t h e s y s t e m c a l l e n t r y c o d e , c o n t e x t s w i t c h
* code, a n d e x c e p t i o n / i n t e r r u p t r e t u r n c o d e f o r P o w e r P C .
* /
# include < l i n u x / e r r n o . h >
powerpc/kernel: Switch to using MAX_ERRNO
Currently on powerpc we have our own #define for the highest (negative)
errno value, called _LAST_ERRNO. This is defined to be 516, for reasons
which are not clear.
The generic code, and x86, use MAX_ERRNO, which is defined to be 4095.
In particular seccomp uses MAX_ERRNO to restrict the value that a
seccomp filter can return.
Currently with the mismatch between _LAST_ERRNO and MAX_ERRNO, a seccomp
tracer wanting to return 600, expecting it to be seen as an error, would
instead find on powerpc that userspace sees a successful syscall with a
return value of 600.
To avoid this inconsistency, switch powerpc to use MAX_ERRNO.
We are somewhat confident that generic syscalls that can return a
non-error value above negative MAX_ERRNO have already been updated to
use force_successful_syscall_return().
I have also checked all the powerpc specific syscalls, and believe that
none of them expect to return a non-error value between -MAX_ERRNO and
-516. So this change should be safe ...
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Reviewed-by: Kees Cook <keescook@chromium.org>
2015-07-23 13:21:01 +03:00
# include < l i n u x / e r r . h >
2005-10-10 16:36:14 +04:00
# include < l i n u x / s y s . h >
# include < l i n u x / t h r e a d s . h >
# include < a s m / r e g . h >
# include < a s m / p a g e . h >
# include < a s m / m m u . h >
# include < a s m / c p u t a b l e . h >
# include < a s m / t h r e a d _ i n f o . h >
# include < a s m / p p c _ a s m . h >
# include < a s m / a s m - o f f s e t s . h >
# include < a s m / u n i s t d . h >
2010-11-18 18:06:17 +03:00
# include < a s m / p t r a c e . h >
2016-01-14 07:33:46 +03:00
# include < a s m / e x p o r t . h >
2018-07-05 19:25:01 +03:00
# include < a s m / f e a t u r e - f i x u p s . h >
2018-07-28 02:06:38 +03:00
# include < a s m / b a r r i e r . h >
2019-03-11 11:30:31 +03:00
# include < a s m / k u p . h >
2019-04-30 15:39:01 +03:00
# include < a s m / b u g . h >
2005-10-10 16:36:14 +04:00
2019-04-30 15:38:51 +03:00
# include " h e a d _ 3 2 . h "
2005-10-10 16:36:14 +04:00
2020-07-16 04:35:22 +03:00
/ *
* powerpc r e l i e s o n r e t u r n f r o m i n t e r r u p t / s y s c a l l b e i n g c o n t e x t s y n c h r o n i s i n g
* ( which r f i i s ) t o s u p p o r t A R C H _ H A S _ M E M B A R R I E R _ S Y N C _ C O R E w i t h o u t a d d i t i o n a l
* synchronisation i n s t r u c t i o n s .
* /
2017-07-12 13:08:49 +03:00
/ *
* Align t o 4 k i n o r d e r t o e n s u r e t h a t a l l f u n c t i o n s m o d y f i n g s r r0 / s r r1
* fit i n t o o n e p a g e i n o r d e r t o n o t e n c o u n t e r a T L B m i s s b e t w e e n t h e
* modification o f s r r0 / s r r1 a n d t h e a s s o c i a t e d r f i .
* /
.align 12
2005-10-10 16:36:14 +04:00
# ifdef C O N F I G _ B O O K E
.globl mcheck_transfer_to_handler
mcheck_transfer_to_handler :
2008-04-30 14:23:21 +04:00
mfspr r0 ,S P R N _ D S R R 0
stw r0 ,_ D S R R 0 ( r11 )
mfspr r0 ,S P R N _ D S R R 1
stw r0 ,_ D S R R 1 ( r11 )
/* fall through */
2020-03-31 19:03:45 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( m c h e c k _ t r a n s f e r _ t o _ h a n d l e r )
2005-10-10 16:36:14 +04:00
.globl debug_transfer_to_handler
debug_transfer_to_handler :
2008-04-30 14:23:21 +04:00
mfspr r0 ,S P R N _ C S R R 0
stw r0 ,_ C S R R 0 ( r11 )
mfspr r0 ,S P R N _ C S R R 1
stw r0 ,_ C S R R 1 ( r11 )
/* fall through */
2020-03-31 19:03:45 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( d e b u g _ t r a n s f e r _ t o _ h a n d l e r )
2005-10-10 16:36:14 +04:00
.globl crit_transfer_to_handler
crit_transfer_to_handler :
2009-02-13 01:12:40 +03:00
# ifdef C O N F I G _ P P C _ B O O K 3 E _ M M U
2008-04-30 14:23:21 +04:00
mfspr r0 ,S P R N _ M A S 0
stw r0 ,M A S 0 ( r11 )
mfspr r0 ,S P R N _ M A S 1
stw r0 ,M A S 1 ( r11 )
mfspr r0 ,S P R N _ M A S 2
stw r0 ,M A S 2 ( r11 )
mfspr r0 ,S P R N _ M A S 3
stw r0 ,M A S 3 ( r11 )
mfspr r0 ,S P R N _ M A S 6
stw r0 ,M A S 6 ( r11 )
# ifdef C O N F I G _ P H Y S _ 6 4 B I T
mfspr r0 ,S P R N _ M A S 7
stw r0 ,M A S 7 ( r11 )
# endif / * C O N F I G _ P H Y S _ 6 4 B I T * /
2009-02-13 01:12:40 +03:00
# endif / * C O N F I G _ P P C _ B O O K 3 E _ M M U * /
2008-04-30 14:23:21 +04:00
# ifdef C O N F I G _ 4 4 x
mfspr r0 ,S P R N _ M M U C R
stw r0 ,M M U C R ( r11 )
# endif
mfspr r0 ,S P R N _ S R R 0
stw r0 ,_ S R R 0 ( r11 )
mfspr r0 ,S P R N _ S R R 1
stw r0 ,_ S R R 1 ( r11 )
2019-01-31 13:09:00 +03:00
/* set the stack limit to the current stack */
2009-07-15 00:52:54 +04:00
mfspr r8 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lwz r0 ,K S P _ L I M I T ( r8 )
stw r0 ,S A V E D _ K S P _ L I M I T ( r11 )
2019-01-31 13:09:00 +03:00
rlwinm r0 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T )
2008-04-30 14:23:21 +04:00
stw r0 ,K S P _ L I M I T ( r8 )
2005-10-10 16:36:14 +04:00
/* fall through */
2020-03-31 19:03:45 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( c r i t _ t r a n s f e r _ t o _ h a n d l e r )
2005-10-10 16:36:14 +04:00
# endif
# ifdef C O N F I G _ 4 0 x
.globl crit_transfer_to_handler
crit_transfer_to_handler :
lwz r0 ,c r i t _ r10 @l(0)
stw r0 ,G P R 1 0 ( r11 )
lwz r0 ,c r i t _ r11 @l(0)
stw r0 ,G P R 1 1 ( r11 )
2008-04-30 14:23:21 +04:00
mfspr r0 ,S P R N _ S R R 0
stw r0 ,c r i t _ s r r0 @l(0)
mfspr r0 ,S P R N _ S R R 1
stw r0 ,c r i t _ s r r1 @l(0)
2019-01-31 13:09:00 +03:00
/* set the stack limit to the current stack */
2009-07-15 00:52:54 +04:00
mfspr r8 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lwz r0 ,K S P _ L I M I T ( r8 )
stw r0 ,s a v e d _ k s p _ l i m i t @l(0)
2019-01-31 13:09:00 +03:00
rlwinm r0 ,r1 ,0 ,0 ,( 3 1 - T H R E A D _ S H I F T )
2008-04-30 14:23:21 +04:00
stw r0 ,K S P _ L I M I T ( r8 )
2005-10-10 16:36:14 +04:00
/* fall through */
2020-03-31 19:03:45 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( c r i t _ t r a n s f e r _ t o _ h a n d l e r )
2005-10-10 16:36:14 +04:00
# endif
/ *
* This c o d e f i n i s h e s s a v i n g t h e r e g i s t e r s t o t h e e x c e p t i o n f r a m e
* and j u m p s t o t h e a p p r o p r i a t e h a n d l e r f o r t h e e x c e p t i o n , t u r n i n g
* on a d d r e s s t r a n s l a t i o n .
* Note t h a t w e r e l y o n t h e c a l l e r h a v i n g s e t c r0 . e q i f f t h e e x c e p t i o n
* occurred i n k e r n e l m o d e ( i . e . M S R : P R = 0 ) .
* /
.globl transfer_to_handler_full
transfer_to_handler_full :
SAVE_ N V G P R S ( r11 )
2020-03-31 19:03:45 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( t r a n s f e r _ t o _ h a n d l e r _ f u l l )
2005-10-10 16:36:14 +04:00
/* fall through */
.globl transfer_to_handler
transfer_to_handler :
stw r2 ,G P R 2 ( r11 )
stw r12 ,_ N I P ( r11 )
stw r9 ,_ M S R ( r11 )
andi. r2 ,r9 ,M S R _ P R
mfctr r12
mfspr r2 ,S P R N _ X E R
stw r12 ,_ C T R ( r11 )
stw r2 ,_ X E R ( r11 )
2009-07-15 00:52:54 +04:00
mfspr r12 ,S P R N _ S P R G _ T H R E A D
2019-12-21 11:32:27 +03:00
tovirt_ v m s t a c k r12 , r12
2005-10-10 16:36:14 +04:00
beq 2 f / * i f f r o m u s e r , f i x u p T H R E A D . r e g s * /
2019-03-11 11:30:31 +03:00
addi r2 , r12 , - T H R E A D
2005-10-10 16:36:14 +04:00
addi r11 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
stw r11 ,P T _ R E G S ( r12 )
# if d e f i n e d ( C O N F I G _ 4 0 x ) | | d e f i n e d ( C O N F I G _ B O O K E )
/ * Check t o s e e i f t h e d b c r0 r e g i s t e r i s s e t u p t o d e b u g . U s e t h e
2008-04-10 01:15:40 +04:00
internal d e b u g m o d e b i t t o d o t h i s . * /
2005-10-10 16:36:14 +04:00
lwz r12 ,T H R E A D _ D B C R 0 ( r12 )
2008-07-25 23:27:33 +04:00
andis. r12 ,r12 ,D B C R 0 _ I D M @h
2019-01-31 13:10:31 +03:00
# endif
2019-01-31 13:09:04 +03:00
ACCOUNT_ C P U _ U S E R _ E N T R Y ( r2 , r11 , r12 )
2019-03-11 11:30:35 +03:00
# ifdef C O N F I G _ P P C _ B O O K 3 S _ 3 2
kuep_ l o c k r11 , r12
# endif
2019-01-31 13:10:31 +03:00
# if d e f i n e d ( C O N F I G _ 4 0 x ) | | d e f i n e d ( C O N F I G _ B O O K E )
2005-10-10 16:36:14 +04:00
beq+ 3 f
/* From user and task is ptraced - load up global dbcr0 */
li r12 ,- 1 / * c l e a r a l l p e n d i n g d e b u g e v e n t s * /
mtspr S P R N _ D B S R ,r12
lis r11 ,g l o b a l _ d b c r0 @ha
tophys( r11 ,r11 )
addi r11 ,r11 ,g l o b a l _ d b c r0 @l
2008-04-10 01:15:40 +04:00
# ifdef C O N F I G _ S M P
2019-01-31 13:09:04 +03:00
lwz r9 ,T A S K _ C P U ( r2 )
2008-04-10 01:15:40 +04:00
slwi r9 ,r9 ,3
add r11 ,r11 ,r9
# endif
2005-10-10 16:36:14 +04:00
lwz r12 ,0 ( r11 )
mtspr S P R N _ D B C R 0 ,r12
lwz r12 ,4 ( r11 )
addi r12 ,r12 ,- 1
stw r12 ,4 ( r11 )
# endif
2016-05-17 09:33:46 +03:00
2005-10-10 16:36:14 +04:00
b 3 f
2006-04-18 15:49:11 +04:00
2005-10-10 16:36:14 +04:00
2 : / * if f r o m k e r n e l , c h e c k i n t e r r u p t e d D O Z E / N A P m o d e a n d
* check f o r s t a c k o v e r f l o w
* /
2020-01-27 13:42:04 +03:00
kuap_ s a v e _ a n d _ l o c k r11 , r12 , r9 , r2 , r6
2019-03-11 11:30:31 +03:00
addi r2 , r12 , - T H R E A D
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 11:32:29 +03:00
# ifndef C O N F I G _ V M A P _ S T A C K
2008-04-28 10:21:22 +04:00
lwz r9 ,K S P _ L I M I T ( r12 )
cmplw r1 ,r9 / * i f r1 < = k s p _ l i m i t * /
2006-04-18 15:49:11 +04:00
ble- s t a c k _ o v f / * t h e n t h e k e r n e l s t a c k o v e r f l o w e d * /
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 11:32:29 +03:00
# endif
2006-04-18 15:49:11 +04:00
5 :
2018-11-17 13:24:56 +03:00
# if d e f i n e d ( C O N F I G _ P P C _ B O O K 3 S _ 3 2 ) | | d e f i n e d ( C O N F I G _ E 5 0 0 )
2019-01-31 13:09:04 +03:00
lwz r12 ,T I _ L O C A L _ F L A G S ( r2 )
2006-04-18 15:49:11 +04:00
mtcrf 0 x01 ,r12
bt- 3 1 - T L F _ N A P P I N G ,4 f
2008-05-14 08:30:48 +04:00
bt- 3 1 - T L F _ S L E E P I N G ,7 f
2018-11-17 13:24:56 +03:00
# endif / * C O N F I G _ P P C _ B O O K 3 S _ 3 2 | | C O N F I G _ E 5 0 0 * /
2005-10-10 16:36:14 +04:00
.globl transfer_to_handler_cont
transfer_to_handler_cont :
3 :
mflr r9
2019-12-21 11:32:27 +03:00
tovirt_ n o v m s t a c k r2 , r2 / * s e t r2 t o c u r r e n t * /
tovirt_ v m s t a c k r9 , r9
2005-10-10 16:36:14 +04:00
lwz r11 ,0 ( r9 ) / * v i r t u a l a d d r e s s o f h a n d l e r * /
lwz r9 ,4 ( r9 ) / * w h e r e t o g o w h e n d o n e * /
2018-01-12 15:45:23 +03:00
# if d e f i n e d ( C O N F I G _ P P C _ 8 x x ) & & d e f i n e d ( C O N F I G _ P E R F _ E V E N T S )
powerpc/8xx: Perf events on PPC 8xx
This patch has been reworked since RFC version. In the RFC, this patch
was preceded by a patch clearing MSR RI for all PPC32 at all time at
exception prologs. Now MSR RI clearing is done only when this 8xx perf
events functionality is compiled in, it is therefore limited to 8xx
and merged inside this patch.
Other main changes have been to take into account detailed review from
Peter Zijlstra. The instructions counter has been reworked to behave
as a free running counter like the three other counters.
The 8xx has no PMU, however some events can be emulated by other means.
This patch implements the following events (as reported by 'perf list'):
cpu-cycles OR cycles [Hardware event]
instructions [Hardware event]
dTLB-load-misses [Hardware cache event]
iTLB-load-misses [Hardware cache event]
'cycles' event is implemented using the timebase clock. Timebase clock
corresponds to CPU clock divided by 16, so number of cycles is
approximatly 16 times the number of TB ticks
On the 8xx, TLB misses are handled by software. It is therefore
easy to count all TLB misses each time the TLB miss exception is
called.
'instructions' is calculated by using instruction watchpoint counter.
This patch sets counter A to count instructions at address greater
than 0, hence we count all instructions executed while MSR RI bit is
set. The counter is set to the maximum which is 0xffff. Every 65535
instructions, debug instruction breakpoint exception fires. The
exception handler increments a counter in memory which then
represent the upper part of the instruction counter. We therefore
end up with a 48 bits counter. In order to avoid unnecessary overhead
while no perf event is active, this counter is started when the first
event referring to this counter is added, and the counter is stopped
when the last event referring to it is deleted. In order to properly
support breakpoint exceptions, MSR RI bit has to be unset in exception
epilogs in order to avoid breakpoint exceptions during critical
sections during changes to SRR0 and SRR1 would be problematic.
All counters are handled as free running counters.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-12-15 15:42:18 +03:00
mtspr S P R N _ N R I , r0
# endif
2009-06-17 21:43:59 +04:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
2019-04-30 15:39:01 +03:00
/ *
* When t r a c i n g I R Q s t a t e ( l o c k d e p ) w e e n a b l e t h e M M U b e f o r e w e c a l l
* the I R Q t r a c i n g f u n c t i o n s a s t h e y m i g h t a c c e s s v m a l l o c s p a c e o r
* perform I O s f o r c o n s o l e o u t p u t .
*
* To s p e e d u p t h e s y s c a l l p a t h w h e r e i n t e r r u p t s s t a y o n , l e t ' s c h e c k
* first i f w e a r e c h a n g i n g t h e M S R v a l u e a t a l l .
* /
2020-02-07 20:20:57 +03:00
tophys_ n o v m s t a c k r12 , r1
2019-04-30 15:39:01 +03:00
lwz r12 ,_ M S R ( r12 )
andi. r12 ,r12 ,M S R _ E E
bne 1 f
/* MSR isn't changing, just transition directly */
# endif
mtspr S P R N _ S R R 0 ,r11
mtspr S P R N _ S R R 1 ,r10
mtlr r9
2020-11-08 19:57:37 +03:00
rfi / * j u m p t o h a n d l e r , e n a b l e M M U * /
# ifdef C O N F I G _ 4 0 x
b . / * P r e v e n t p r e f e t c h p a s t r f i * /
# endif
2019-04-30 15:39:01 +03:00
2020-03-31 19:03:45 +03:00
# if d e f i n e d ( C O N F I G _ P P C _ B O O K 3 S _ 3 2 ) | | d e f i n e d ( C O N F I G _ E 5 0 0 )
4 : rlwinm r12 ,r12 ,0 ,~ _ T L F _ N A P P I N G
stw r12 ,T I _ L O C A L _ F L A G S ( r2 )
b p o w e r _ s a v e _ p p c32 _ r e s t o r e
7 : rlwinm r12 ,r12 ,0 ,~ _ T L F _ S L E E P I N G
stw r12 ,T I _ L O C A L _ F L A G S ( r2 )
lwz r9 ,_ M S R ( r11 ) / * i f s l e e p i n g , c l e a r M S R . E E * /
rlwinm r9 ,r9 ,0 ,~ M S R _ E E
lwz r12 ,_ L I N K ( r11 ) / * a n d r e t u r n t o a d d r e s s i n L R * /
kuap_ r e s t o r e r11 , r2 , r3 , r4 , r5
lwz r2 , G P R 2 ( r11 )
b f a s t _ e x c e p t i o n _ r e t u r n
# endif
_ ASM_ N O K P R O B E _ S Y M B O L ( t r a n s f e r _ t o _ h a n d l e r )
_ ASM_ N O K P R O B E _ S Y M B O L ( t r a n s f e r _ t o _ h a n d l e r _ c o n t )
2019-04-30 15:39:01 +03:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
1 : / * MSR i s c h a n g i n g , r e - e n a b l e M M U s o w e c a n n o t i f y l o c k d e p . W e n e e d t o
* keep i n t e r r u p t s d i s a b l e d a t t h i s p o i n t o t h e r w i s e w e m i g h t r i s k
* taking a n i n t e r r u p t b e f o r e w e t e l l l o c k d e p t h e y a r e e n a b l e d .
* /
2009-06-17 21:43:59 +04:00
lis r12 ,r e e n a b l e _ m m u @h
ori r12 ,r12 ,r e e n a b l e _ m m u @l
2019-08-20 17:34:13 +03:00
LOAD_ R E G _ I M M E D I A T E ( r0 , M S R _ K E R N E L )
2009-06-17 21:43:59 +04:00
mtspr S P R N _ S R R 0 ,r12
2019-04-30 15:39:01 +03:00
mtspr S P R N _ S R R 1 ,r0
2020-11-08 19:57:37 +03:00
rfi
# ifdef C O N F I G _ 4 0 x
b . / * P r e v e n t p r e f e t c h p a s t r f i * /
# endif
2009-06-17 21:43:59 +04:00
2019-04-30 15:39:01 +03:00
reenable_mmu :
2011-11-10 20:04:17 +04:00
/ *
2019-04-30 15:39:05 +03:00
* We s a v e a b u n c h o f G P R s ,
2012-04-10 11:21:35 +04:00
* r3 c a n b e d i f f e r e n t f r o m G P R 3 ( r1 ) a t t h i s p o i n t , r9 a n d r11
* contains t h e o l d M S R a n d h a n d l e r a d d r e s s r e s p e c t i v e l y ,
2021-01-30 16:08:20 +03:00
* r0 , r4 - r8 , r12 , C C R , C T R , X E R e t c . . . a r e l e f t
2020-01-07 12:16:40 +03:00
* clobbered a s t h e y a r e n ' t u s e f u l p a s t t h i s p o i n t .
2011-11-10 20:04:17 +04:00
* /
2019-04-30 15:39:01 +03:00
2012-04-10 11:21:35 +04:00
stwu r1 ,- 3 2 ( r1 )
stw r9 ,8 ( r1 )
stw r11 ,1 2 ( r1 )
stw r3 ,1 6 ( r1 )
2019-04-30 15:39:01 +03:00
/ * If w e a r e d i s a b l i n g i n t e r r u p t s ( n o r m a l c a s e ) , s i m p l y l o g i t w i t h
* lockdep
* /
1 : bl t r a c e _ h a r d i r q s _ o f f
2012-04-10 11:21:35 +04:00
lwz r3 ,1 6 ( r1 )
lwz r11 ,1 2 ( r1 )
lwz r9 ,8 ( r1 )
addi r1 ,r1 ,3 2
2019-04-30 15:39:01 +03:00
mtctr r11
2009-06-17 21:43:59 +04:00
mtlr r9
bctr / * j u m p t o h a n d l e r * /
# endif / * C O N F I G _ T R A C E _ I R Q F L A G S * /
2005-10-10 16:36:14 +04:00
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 11:32:29 +03:00
# ifndef C O N F I G _ V M A P _ S T A C K
2005-10-10 16:36:14 +04:00
/ *
* On k e r n e l s t a c k o v e r f l o w , l o a d u p a n i n i t i a l s t a c k p o i n t e r
* and c a l l S t a c k O v e r f l o w ( r e g s ) , w h i c h s h o u l d n o t r e t u r n .
* /
stack_ovf :
/* sometimes we use a statically-allocated stack, which is OK. */
2006-04-18 15:49:11 +04:00
lis r12 ,_ e n d @h
ori r12 ,r12 ,_ e n d @l
cmplw r1 ,r12
ble 5 b / * r1 < = & _ e n d i s O K * /
2005-10-10 16:36:14 +04:00
SAVE_ N V G P R S ( r11 )
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
lis r1 ,i n i t _ t h r e a d _ u n i o n @ha
addi r1 ,r1 ,i n i t _ t h r e a d _ u n i o n @l
addi r1 ,r1 ,T H R E A D _ S I Z E - S T A C K _ F R A M E _ O V E R H E A D
lis r9 ,S t a c k O v e r f l o w @ha
addi r9 ,r9 ,S t a c k O v e r f l o w @l
2019-08-20 17:34:13 +03:00
LOAD_ R E G _ I M M E D I A T E ( r10 ,M S R _ K E R N E L )
2018-01-12 15:45:23 +03:00
# if d e f i n e d ( C O N F I G _ P P C _ 8 x x ) & & d e f i n e d ( C O N F I G _ P E R F _ E V E N T S )
powerpc/8xx: Perf events on PPC 8xx
This patch has been reworked since RFC version. In the RFC, this patch
was preceded by a patch clearing MSR RI for all PPC32 at all time at
exception prologs. Now MSR RI clearing is done only when this 8xx perf
events functionality is compiled in, it is therefore limited to 8xx
and merged inside this patch.
Other main changes have been to take into account detailed review from
Peter Zijlstra. The instructions counter has been reworked to behave
as a free running counter like the three other counters.
The 8xx has no PMU, however some events can be emulated by other means.
This patch implements the following events (as reported by 'perf list'):
cpu-cycles OR cycles [Hardware event]
instructions [Hardware event]
dTLB-load-misses [Hardware cache event]
iTLB-load-misses [Hardware cache event]
'cycles' event is implemented using the timebase clock. Timebase clock
corresponds to CPU clock divided by 16, so number of cycles is
approximatly 16 times the number of TB ticks
On the 8xx, TLB misses are handled by software. It is therefore
easy to count all TLB misses each time the TLB miss exception is
called.
'instructions' is calculated by using instruction watchpoint counter.
This patch sets counter A to count instructions at address greater
than 0, hence we count all instructions executed while MSR RI bit is
set. The counter is set to the maximum which is 0xffff. Every 65535
instructions, debug instruction breakpoint exception fires. The
exception handler increments a counter in memory which then
represent the upper part of the instruction counter. We therefore
end up with a 48 bits counter. In order to avoid unnecessary overhead
while no perf event is active, this counter is started when the first
event referring to this counter is added, and the counter is stopped
when the last event referring to it is deleted. In order to properly
support breakpoint exceptions, MSR RI bit has to be unset in exception
epilogs in order to avoid breakpoint exceptions during critical
sections during changes to SRR0 and SRR1 would be problematic.
All counters are handled as free running counters.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-12-15 15:42:18 +03:00
mtspr S P R N _ N R I , r0
# endif
2005-10-10 16:36:14 +04:00
mtspr S P R N _ S R R 0 ,r9
mtspr S P R N _ S R R 1 ,r10
2020-11-08 19:57:37 +03:00
rfi
# ifdef C O N F I G _ 4 0 x
b . / * P r e v e n t p r e f e t c h p a s t r f i * /
# endif
2020-03-31 19:03:44 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( s t a c k _ o v f )
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 11:32:29 +03:00
# endif
2005-10-10 16:36:14 +04:00
2019-04-30 15:39:02 +03:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
trace_syscall_entry_irq_off :
/ *
* Syscall s h o u l d n ' t h a p p e n w h i l e i n t e r r u p t s a r e d i s a b l e d ,
* so l e t ' s d o a w a r n i n g h e r e .
* /
0 : trap
EMIT_ B U G _ E N T R Y 0 b ,_ _ F I L E _ _ ,_ _ L I N E _ _ , B U G F L A G _ W A R N I N G
bl t r a c e _ h a r d i r q s _ o n
/* Now enable for real */
2019-08-20 17:34:13 +03:00
LOAD_ R E G _ I M M E D I A T E ( r10 , M S R _ K E R N E L | M S R _ E E )
2019-04-30 15:39:02 +03:00
mtmsr r10
REST_ G P R ( 0 , r1 )
REST_ 4 G P R S ( 3 , r1 )
REST_ 2 G P R S ( 7 , r1 )
b D o S y s c a l l
# endif / * C O N F I G _ T R A C E _ I R Q F L A G S * /
.globl transfer_to_syscall
transfer_to_syscall :
2021-02-08 18:10:20 +03:00
# ifdef C O N F I G _ P P C _ B O O K 3 S _ 3 2
kuep_ l o c k r11 , r12
# endif
2019-04-30 15:39:02 +03:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
andi. r12 ,r9 ,M S R _ E E
beq- t r a c e _ s y s c a l l _ e n t r y _ i r q _ o f f
# endif / * C O N F I G _ T R A C E _ I R Q F L A G S * /
2005-10-10 16:36:14 +04:00
/ *
* Handle a s y s t e m c a l l .
* /
.stabs " arch/ p o w e r p c / k e r n e l / " ,N _ S O ,0 ,0 ,0 f
.stabs " entry_ 3 2 . S " ,N _ S O ,0 ,0 ,0 f
0 :
_ GLOBAL( D o S y s c a l l )
stw r3 ,O R I G _ G P R 3 ( r1 )
li r12 ,0
stw r12 ,R E S U L T ( r1 )
2009-06-17 21:43:59 +04:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
2019-04-30 15:39:01 +03:00
/* Make sure interrupts are enabled */
2009-06-17 21:43:59 +04:00
mfmsr r11
andi. r12 ,r11 ,M S R _ E E
2019-04-30 15:39:01 +03:00
/ * We c a m e i n w i t h i n t e r r u p t s d i s a b l e d , w e W A R N a n d m a r k t h e m e n a b l e d
* for l o c k d e p n o w * /
0 : tweqi r12 , 0
EMIT_ B U G _ E N T R Y 0 b ,_ _ F I L E _ _ ,_ _ L I N E _ _ , B U G F L A G _ W A R N I N G
2009-06-17 21:43:59 +04:00
# endif / * C O N F I G _ T R A C E _ I R Q F L A G S * /
2019-01-31 13:09:04 +03:00
lwz r11 ,T I _ F L A G S ( r2 )
2015-01-15 04:01:42 +03:00
andi. r11 ,r11 ,_ T I F _ S Y S C A L L _ D O T R A C E
2005-10-10 16:36:14 +04:00
bne- s y s c a l l _ d o t r a c e
syscall_dotrace_cont :
cmplwi 0 ,r0 ,N R _ s y s c a l l s
lis r10 ,s y s _ c a l l _ t a b l e @h
ori r10 ,r10 ,s y s _ c a l l _ t a b l e @l
slwi r0 ,r0 ,2
bge- 6 6 f
2018-07-28 02:06:38 +03:00
barrier_ n o s p e c _ a s m
/ *
* Prevent t h e l o a d o f t h e h a n d l e r b e l o w ( b a s e d o n t h e u s e r - p a s s e d
* system c a l l n u m b e r ) b e i n g s p e c u l a t i v e l y e x e c u t e d u n t i l t h e t e s t
* against N R _ s y s c a l l s a n d b r a n c h t o . 6 6 f a b o v e h a s
* committed.
* /
2005-10-10 16:36:14 +04:00
lwzx r10 ,r10 ,r0 / * F e t c h s y s t e m c a l l h a n d l e r [ p t r ] * /
mtlr r10
addi r9 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
PPC4 4 0 E P _ E R R 4 2
blrl / * C a l l h a n d l e r * /
.globl ret_from_syscall
ret_from_syscall :
2018-06-02 15:44:01 +03:00
# ifdef C O N F I G _ D E B U G _ R S E Q
/* Check whether the syscall is issued inside a restartable sequence */
stw r3 ,G P R 3 ( r1 )
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl r s e q _ s y s c a l l
lwz r3 ,G P R 3 ( r1 )
# endif
2005-10-10 16:36:14 +04:00
mr r6 ,r3
/* disable interrupts so current_thread_info()->flags can't change */
2019-08-20 17:34:13 +03:00
LOAD_ R E G _ I M M E D I A T E ( r10 ,M S R _ K E R N E L ) / * d o e s n ' t i n c l u d e M S R _ E E * /
2009-06-17 21:43:59 +04:00
/* Note: We don't bother telling lockdep about it */
2019-12-21 11:32:22 +03:00
mtmsr r10
2019-01-31 13:09:04 +03:00
lwz r9 ,T I _ F L A G S ( r2 )
powerpc/kernel: Switch to using MAX_ERRNO
Currently on powerpc we have our own #define for the highest (negative)
errno value, called _LAST_ERRNO. This is defined to be 516, for reasons
which are not clear.
The generic code, and x86, use MAX_ERRNO, which is defined to be 4095.
In particular seccomp uses MAX_ERRNO to restrict the value that a
seccomp filter can return.
Currently with the mismatch between _LAST_ERRNO and MAX_ERRNO, a seccomp
tracer wanting to return 600, expecting it to be seen as an error, would
instead find on powerpc that userspace sees a successful syscall with a
return value of 600.
To avoid this inconsistency, switch powerpc to use MAX_ERRNO.
We are somewhat confident that generic syscalls that can return a
non-error value above negative MAX_ERRNO have already been updated to
use force_successful_syscall_return().
I have also checked all the powerpc specific syscalls, and believe that
none of them expect to return a non-error value between -MAX_ERRNO and
-516. So this change should be safe ...
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Reviewed-by: Kees Cook <keescook@chromium.org>
2015-07-23 13:21:01 +03:00
li r8 ,- M A X _ E R R N O
2015-01-15 04:01:42 +03:00
andi. r0 ,r9 ,( _ T I F _ S Y S C A L L _ D O T R A C E | _ T I F _ S I N G L E S T E P | _ T I F _ U S E R _ W O R K _ M A S K | _ T I F _ P E R S Y S C A L L _ M A S K )
2005-10-10 16:36:14 +04:00
bne- s y s c a l l _ e x i t _ w o r k
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
cmplw 0 ,r3 ,r8
blt+ s y s c a l l _ e x i t _ c o n t
lwz r11 ,_ C C R ( r1 ) / * L o a d C R * /
neg r3 ,r3
oris r11 ,r11 ,0 x10 0 0 / * S e t S O b i t i n C R * /
stw r11 ,_ C C R ( r1 )
2005-10-10 16:36:14 +04:00
syscall_exit_cont :
2009-06-17 21:43:59 +04:00
lwz r8 ,_ M S R ( r1 )
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
/ * If w e a r e g o i n g t o r e t u r n f r o m t h e s y s c a l l w i t h i n t e r r u p t s
2019-04-30 15:39:01 +03:00
* off, w e t r a c e t h a t h e r e . I t s h o u l d n ' t n o r m a l l y h a p p e n .
2009-06-17 21:43:59 +04:00
* /
andi. r10 ,r8 ,M S R _ E E
bne+ 1 f
stw r3 ,G P R 3 ( r1 )
bl t r a c e _ h a r d i r q s _ o f f
lwz r3 ,G P R 3 ( r1 )
1 :
# endif / * C O N F I G _ T R A C E _ I R Q F L A G S * /
2005-10-10 16:36:14 +04:00
# if d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E )
2008-04-10 01:15:40 +04:00
/ * If t h e p r o c e s s h a s i t s o w n D B C R 0 v a l u e , l o a d i t u p . T h e i n t e r n a l
debug m o d e b i t t e l l s u s t h a t d b c r0 s h o u l d b e l o a d e d . * /
2005-10-10 16:36:14 +04:00
lwz r0 ,T H R E A D + T H R E A D _ D B C R 0 ( r2 )
2008-07-25 23:27:33 +04:00
andis. r10 ,r0 ,D B C R 0 _ I D M @h
2005-10-10 16:36:14 +04:00
bnel- l o a d _ d b c r0
# endif
2020-10-18 20:25:18 +03:00
# ifdef C O N F I G _ P P C _ 4 7 x
2007-10-31 08:42:19 +03:00
lis r4 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @ha
lwz r5 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @l(r4)
cmplwi c r0 ,r5 ,0
bne- 2 f
2020-10-18 20:25:18 +03:00
# endif / * C O N F I G _ P P C _ 4 7 x * /
2007-10-31 08:42:19 +03:00
1 :
2007-11-10 01:17:49 +03:00
BEGIN_ F T R _ S E C T I O N
lwarx r7 ,0 ,r1
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ N E E D _ P A I R E D _ S T W C X )
2005-10-10 16:36:14 +04:00
stwcx. r0 ,0 ,r1 / * t o c l e a r t h e r e s e r v a t i o n * /
2019-01-31 13:09:04 +03:00
ACCOUNT_ C P U _ U S E R _ E X I T ( r2 , r5 , r7 )
2019-03-11 11:30:35 +03:00
# ifdef C O N F I G _ P P C _ B O O K 3 S _ 3 2
kuep_ u n l o c k r5 , r7
# endif
2019-03-11 11:30:31 +03:00
kuap_ c h e c k r2 , r4
2005-10-10 16:36:14 +04:00
lwz r4 ,_ L I N K ( r1 )
lwz r5 ,_ C C R ( r1 )
mtlr r4
mtcr r5
lwz r7 ,_ N I P ( r1 )
lwz r2 ,G P R 2 ( r1 )
lwz r1 ,G P R 1 ( r1 )
2020-03-31 19:03:46 +03:00
syscall_exit_finish :
2018-01-12 15:45:23 +03:00
# if d e f i n e d ( C O N F I G _ P P C _ 8 x x ) & & d e f i n e d ( C O N F I G _ P E R F _ E V E N T S )
powerpc/8xx: Perf events on PPC 8xx
This patch has been reworked since RFC version. In the RFC, this patch
was preceded by a patch clearing MSR RI for all PPC32 at all time at
exception prologs. Now MSR RI clearing is done only when this 8xx perf
events functionality is compiled in, it is therefore limited to 8xx
and merged inside this patch.
Other main changes have been to take into account detailed review from
Peter Zijlstra. The instructions counter has been reworked to behave
as a free running counter like the three other counters.
The 8xx has no PMU, however some events can be emulated by other means.
This patch implements the following events (as reported by 'perf list'):
cpu-cycles OR cycles [Hardware event]
instructions [Hardware event]
dTLB-load-misses [Hardware cache event]
iTLB-load-misses [Hardware cache event]
'cycles' event is implemented using the timebase clock. Timebase clock
corresponds to CPU clock divided by 16, so number of cycles is
approximatly 16 times the number of TB ticks
On the 8xx, TLB misses are handled by software. It is therefore
easy to count all TLB misses each time the TLB miss exception is
called.
'instructions' is calculated by using instruction watchpoint counter.
This patch sets counter A to count instructions at address greater
than 0, hence we count all instructions executed while MSR RI bit is
set. The counter is set to the maximum which is 0xffff. Every 65535
instructions, debug instruction breakpoint exception fires. The
exception handler increments a counter in memory which then
represent the upper part of the instruction counter. We therefore
end up with a 48 bits counter. In order to avoid unnecessary overhead
while no perf event is active, this counter is started when the first
event referring to this counter is added, and the counter is stopped
when the last event referring to it is deleted. In order to properly
support breakpoint exceptions, MSR RI bit has to be unset in exception
epilogs in order to avoid breakpoint exceptions during critical
sections during changes to SRR0 and SRR1 would be problematic.
All counters are handled as free running counters.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-12-15 15:42:18 +03:00
mtspr S P R N _ N R I , r0
# endif
2005-10-10 16:36:14 +04:00
mtspr S P R N _ S R R 0 ,r7
mtspr S P R N _ S R R 1 ,r8
2020-11-08 19:57:37 +03:00
rfi
# ifdef C O N F I G _ 4 0 x
b . / * P r e v e n t p r e f e t c h p a s t r f i * /
# endif
2020-03-31 19:03:46 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( s y s c a l l _ e x i t _ f i n i s h )
2007-10-31 08:42:19 +03:00
# ifdef C O N F I G _ 4 4 x
2 : li r7 ,0
iccci r0 ,r0
stw r7 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @l(r4)
b 1 b
# endif / * C O N F I G _ 4 4 x * /
2005-10-10 16:36:14 +04:00
66 : li r3 ,- E N O S Y S
b r e t _ f r o m _ s y s c a l l
.globl ret_from_fork
ret_from_fork :
REST_ N V G P R S ( r1 )
bl s c h e d u l e _ t a i l
li r3 ,0
b r e t _ f r o m _ s y s c a l l
2012-09-13 02:32:42 +04:00
.globl ret_from_kernel_thread
ret_from_kernel_thread :
REST_ N V G P R S ( r1 )
bl s c h e d u l e _ t a i l
mtlr r14
mr r3 ,r15
PPC4 4 0 E P _ E R R 4 2
blrl
li r3 ,0
2012-08-31 23:48:05 +04:00
b r e t _ f r o m _ s y s c a l l
2005-10-10 16:36:14 +04:00
/* Traced system call support */
syscall_dotrace :
SAVE_ N V G P R S ( r1 )
li r0 ,0 x c00
2005-10-28 16:45:25 +04:00
stw r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl d o _ s y s c a l l _ t r a c e _ e n t e r
2008-07-27 10:51:03 +04:00
/ *
* Restore a r g u m e n t r e g i s t e r s p o s s i b l y j u s t c h a n g e d .
* We u s e t h e r e t u r n v a l u e o f d o _ s y s c a l l _ t r a c e _ e n t e r
* for c a l l n u m b e r t o l o o k u p i n t h e t a b l e ( r0 ) .
* /
mr r0 ,r3
2005-10-10 16:36:14 +04:00
lwz r3 ,G P R 3 ( r1 )
lwz r4 ,G P R 4 ( r1 )
lwz r5 ,G P R 5 ( r1 )
lwz r6 ,G P R 6 ( r1 )
lwz r7 ,G P R 7 ( r1 )
lwz r8 ,G P R 8 ( r1 )
REST_ N V G P R S ( r1 )
2015-07-23 13:21:02 +03:00
cmplwi r0 ,N R _ s y s c a l l s
/* Return code is already in r3 thanks to do_syscall_trace_enter() */
bge- r e t _ f r o m _ s y s c a l l
2005-10-10 16:36:14 +04:00
b s y s c a l l _ d o t r a c e _ c o n t
syscall_exit_work :
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
andi. r0 ,r9 ,_ T I F _ R E S T O R E A L L
2006-03-08 05:24:22 +03:00
beq+ 0 f
REST_ N V G P R S ( r1 )
b 2 f
0 : cmplw 0 ,r3 ,r8
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
blt+ 1 f
andi. r0 ,r9 ,_ T I F _ N O E R R O R
bne- 1 f
lwz r11 ,_ C C R ( r1 ) / * L o a d C R * /
neg r3 ,r3
oris r11 ,r11 ,0 x10 0 0 / * S e t S O b i t i n C R * /
stw r11 ,_ C C R ( r1 )
1 : stw r6 ,R E S U L T ( r1 ) / * S a v e r e s u l t * /
2005-10-10 16:36:14 +04:00
stw r3 ,G P R 3 ( r1 ) / * U p d a t e r e t u r n v a l u e * /
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
2 : andi. r0 ,r9 ,( _ T I F _ P E R S Y S C A L L _ M A S K )
beq 4 f
2006-03-08 05:24:22 +03:00
/* Clear per-syscall TIF flags if any are set. */
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
li r11 ,_ T I F _ P E R S Y S C A L L _ M A S K
2019-01-31 13:09:04 +03:00
addi r12 ,r2 ,T I _ F L A G S
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
3 : lwarx r8 ,0 ,r12
andc r8 ,r8 ,r11
stwcx. r8 ,0 ,r12
bne- 3 b
4 : /* Anything which requires enabling interrupts? */
2015-01-15 04:01:42 +03:00
andi. r0 ,r9 ,( _ T I F _ S Y S C A L L _ D O T R A C E | _ T I F _ S I N G L E S T E P )
2006-03-08 05:24:22 +03:00
beq r e t _ f r o m _ e x c e p t
2009-06-17 21:43:59 +04:00
/ * Re- e n a b l e i n t e r r u p t s . T h e r e i s n o n e e d t o t r a c e t h a t w i t h
* lockdep a s w e a r e s u p p o s e d t o h a v e I R Q s o n a t t h i s p o i n t
* /
2006-03-08 05:24:22 +03:00
ori r10 ,r10 ,M S R _ E E
2019-12-21 11:32:22 +03:00
mtmsr r10
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
/* Save NVGPRS if they're not saved already */
2005-10-28 16:45:25 +04:00
lwz r4 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
andi. r4 ,r4 ,1
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
beq 5 f
2005-10-10 16:36:14 +04:00
SAVE_ N V G P R S ( r1 )
li r4 ,0 x c00
2005-10-28 16:45:25 +04:00
stw r4 ,_ T R A P ( r1 )
2006-03-08 05:24:22 +03:00
5 :
2005-10-10 16:36:14 +04:00
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
bl d o _ s y s c a l l _ t r a c e _ l e a v e
2006-03-08 05:24:22 +03:00
b r e t _ f r o m _ e x c e p t _ f u l l
2005-10-10 16:36:14 +04:00
2020-01-31 14:34:54 +03:00
/ *
* System c a l l w a s c a l l e d f r o m k e r n e l . W e g e t h e r e w i t h S R R 1 i n r9 .
* Mark t h e e x c e p t i o n a s r e c o v e r a b l e o n c e w e h a v e r e t r i e v e d S R R 0 ,
* trap a w a r n i n g a n d r e t u r n E N O S Y S w i t h C R [ S O ] s e t .
* /
.globl ret_from_kernel_syscall
ret_from_kernel_syscall :
mfspr r9 , S P R N _ S R R 0
mfspr r10 , S P R N _ S R R 1
# if ! d e f i n e d ( C O N F I G _ 4 x x ) & & ! d e f i n e d ( C O N F I G _ B O O K E )
LOAD_ R E G _ I M M E D I A T E ( r11 , M S R _ K E R N E L & ~ ( M S R _ I R | M S R _ D R ) )
mtmsr r11
# endif
0 : trap
EMIT_ B U G _ E N T R Y 0 b ,_ _ F I L E _ _ ,_ _ L I N E _ _ , B U G F L A G _ W A R N I N G
li r3 , E N O S Y S
crset s o
# if d e f i n e d ( C O N F I G _ P P C _ 8 x x ) & & d e f i n e d ( C O N F I G _ P E R F _ E V E N T S )
mtspr S P R N _ N R I , r0
# endif
mtspr S P R N _ S R R 0 , r9
mtspr S P R N _ S R R 1 , r10
2020-11-08 19:57:37 +03:00
rfi
# ifdef C O N F I G _ 4 0 x
b . / * P r e v e n t p r e f e t c h p a s t r f i * /
# endif
2020-03-31 19:03:46 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( r e t _ f r o m _ k e r n e l _ s y s c a l l )
2020-01-31 14:34:54 +03:00
2005-10-10 16:36:14 +04:00
/ *
[PATCH] syscall entry/exit revamp
This cleanup patch speeds up the null syscall path on ppc64 by about 3%,
and brings the ppc32 and ppc64 code slightly closer together.
The ppc64 code was checking current_thread_info()->flags twice in the
syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling
interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after
disabling interrupts. Now we do the same as ppc32 -- check the flags
only once in the fast path, and re-enable interrupts if necessary in the
ptrace case.
The patch abolishes the 'syscall_noerror' member of struct thread_info
and replaces it with a TIF_NOERROR bit in the flags, which is handled in
the slow path. This shortens the syscall entry code, which no longer
needs to clear syscall_noerror.
The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow
path to save the non-volatile GPRs into a signal frame. This removes the
need for the assembly wrappers around sys_sigsuspend(),
sys_rt_sigsuspend(), et al which existed solely to save those registers
in advance. It also means I don't have to add new wrappers for ppoll()
and pselect(), which is what I was supposed to be doing when I got
distracted into this...
Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit
directly into a signal handler (as required by sigsuspend et al) by
introducing a TIF_RESTOREALL flag which causes _all_ the registers to be
reloaded from the pt_regs by taking the ret_from_exception path, instead
of the normal syscall exit path which stomps on the callee-saved GPRs.
It appears to pass an LTP test run on ppc64, and passes basic testing on
ppc32 too. Brief tests of ptrace functionality with strace and gdb also
appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :)
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 21:52:18 +03:00
* The f o r k / c l o n e f u n c t i o n s n e e d t o c o p y t h e f u l l r e g i s t e r s e t i n t o
* the c h i l d p r o c e s s . T h e r e f o r e w e n e e d t o s a v e a l l t h e n o n v o l a t i l e
* registers ( r13 - r31 ) b e f o r e c a l l i n g t h e C c o d e .
2005-10-10 16:36:14 +04:00
* /
.globl ppc_fork
ppc_fork :
SAVE_ N V G P R S ( r1 )
2005-10-28 16:45:25 +04:00
lwz r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
2005-10-28 16:45:25 +04:00
stw r0 ,_ T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
2005-10-10 16:36:14 +04:00
b s y s _ f o r k
.globl ppc_vfork
ppc_vfork :
SAVE_ N V G P R S ( r1 )
2005-10-28 16:45:25 +04:00
lwz r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
2005-10-28 16:45:25 +04:00
stw r0 ,_ T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
2005-10-10 16:36:14 +04:00
b s y s _ v f o r k
.globl ppc_clone
ppc_clone :
SAVE_ N V G P R S ( r1 )
2005-10-28 16:45:25 +04:00
lwz r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
2005-10-28 16:45:25 +04:00
stw r0 ,_ T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
2005-10-10 16:36:14 +04:00
b s y s _ c l o n e
2019-07-22 15:26:56 +03:00
.globl ppc_clone3
ppc_clone3 :
SAVE_ N V G P R S ( r1 )
lwz r0 ,_ T R A P ( r1 )
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
stw r0 ,_ T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
b s y s _ c l o n e 3
2006-03-08 05:24:22 +03:00
.globl ppc_swapcontext
ppc_swapcontext :
SAVE_ N V G P R S ( r1 )
lwz r0 ,_ T R A P ( r1 )
rlwinm r0 ,r0 ,0 ,0 ,3 0 / * c l e a r L S B t o i n d i c a t e f u l l * /
stw r0 ,_ T R A P ( r1 ) / * r e g i s t e r s e t s a v e d * /
b s y s _ s w a p c o n t e x t
2005-10-10 16:36:14 +04:00
/ *
* Top- l e v e l p a g e f a u l t h a n d l i n g .
* This i s i n a s s e m b l e r b e c a u s e i f d o _ p a g e _ f a u l t t e l l s u s t h a t
* it i s a b a d k e r n e l p a g e f a u l t , w e w a n t t o s a v e t h e n o n - v o l a t i l e
* registers b e f o r e c a l l i n g b a d _ p a g e _ f a u l t .
* /
.globl handle_page_fault
handle_page_fault :
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
2017-08-08 09:37:24 +03:00
bl d o _ p a g e _ f a u l t
2005-10-10 16:36:14 +04:00
cmpwi r3 ,0
beq+ r e t _ f r o m _ e x c e p t
SAVE_ N V G P R S ( r1 )
2005-10-28 16:45:25 +04:00
lwz r0 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
clrrwi r0 ,r0 ,1
2005-10-28 16:45:25 +04:00
stw r0 ,_ T R A P ( r1 )
2021-01-30 16:08:21 +03:00
mr r4 ,r3 / * e r r a r g f o r b a d _ p a g e _ f a u l t * /
2005-10-10 16:36:14 +04:00
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
2020-12-09 08:29:25 +03:00
bl _ _ b a d _ p a g e _ f a u l t
2005-10-10 16:36:14 +04:00
b r e t _ f r o m _ e x c e p t _ f u l l
/ *
* This r o u t i n e s w i t c h e s b e t w e e n t w o d i f f e r e n t t a s k s . T h e p r o c e s s
* state o f o n e i s s a v e d o n i t s k e r n e l s t a c k . T h e n t h e s t a t e
* of t h e o t h e r i s r e s t o r e d f r o m i t s k e r n e l s t a c k . T h e m e m o r y
* management h a r d w a r e i s u p d a t e d t o t h e s e c o n d p r o c e s s ' s s t a t e .
* Finally, w e c a n r e t u r n t o t h e s e c o n d p r o c e s s .
* On e n t r y , r3 p o i n t s t o t h e T H R E A D f o r t h e c u r r e n t t a s k , r4
* points t o t h e T H R E A D f o r t h e n e w t a s k .
*
* This r o u t i n e i s a l w a y s c a l l e d w i t h i n t e r r u p t s d i s a b l e d .
*
* Note : there a r e t w o w a y s t o g e t t o t h e " g o i n g o u t " p o r t i o n
* of t h i s c o d e ; either by coming in via the entry (_switch)
* or v i a " f o r k " w h i c h m u s t s e t u p a n e n v i r o n m e n t e q u i v a l e n t
* to t h e " _ s w i t c h " p a t h . I f y o u c h a n g e t h i s , y o u ' l l h a v e t o
* change t h e f o r k c o d e a l s o .
*
* The c o d e w h i c h c r e a t e s t h e n e w t a s k c o n t e x t i s i n ' c o p y _ t h r e a d '
* in a r c h / p p c / k e r n e l / p r o c e s s . c
* /
_ GLOBAL( _ s w i t c h )
stwu r1 ,- I N T _ F R A M E _ S I Z E ( r1 )
mflr r0
stw r0 ,I N T _ F R A M E _ S I Z E + 4 ( r1 )
/* r3-r12 are caller saved -- Cort */
SAVE_ N V G P R S ( r1 )
stw r0 ,_ N I P ( r1 ) / * R e t u r n t o s w i t c h c a l l e r * /
mfmsr r11
li r0 ,M S R _ F P / * D i s a b l e f l o a t i n g - p o i n t * /
# ifdef C O N F I G _ A L T I V E C
BEGIN_ F T R _ S E C T I O N
oris r0 ,r0 ,M S R _ V E C @h /* Disable altivec */
mfspr r12 ,S P R N _ V R S A V E / * s a v e v r s a v e r e g i s t e r v a l u e * /
stw r12 ,T H R E A D + T H R E A D _ V R S A V E ( r2 )
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ A L T I V E C )
# endif / * C O N F I G _ A L T I V E C * /
# ifdef C O N F I G _ S P E
2007-09-13 10:44:20 +04:00
BEGIN_ F T R _ S E C T I O N
2005-10-10 16:36:14 +04:00
oris r0 ,r0 ,M S R _ S P E @h /* Disable SPE */
mfspr r12 ,S P R N _ S P E F S C R / * s a v e s p e f s c r r e g i s t e r v a l u e * /
stw r12 ,T H R E A D + T H R E A D _ S P E F S C R ( r2 )
2007-09-13 10:44:20 +04:00
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ S P E )
2005-10-10 16:36:14 +04:00
# endif / * C O N F I G _ S P E * /
and. r0 ,r0 ,r11 / * F P o r a l t i v e c o r S P E e n a b l e d ? * /
beq+ 1 f
andc r11 ,r11 ,r0
2019-12-21 11:32:22 +03:00
mtmsr r11
2005-10-10 16:36:14 +04:00
isync
1 : stw r11 ,_ M S R ( r1 )
mfcr r10
stw r10 ,_ C C R ( r1 )
stw r1 ,K S P ( r3 ) / * S e t o l d s t a c k p o i n t e r * /
2020-04-17 14:58:36 +03:00
kuap_ c h e c k r2 , r0
2005-10-10 16:36:14 +04:00
# ifdef C O N F I G _ S M P
/ * We n e e d a s y n c s o m e w h e r e h e r e t o m a k e s u r e t h a t i f t h e
* previous t a s k g e t s r e s c h e d u l e d o n a n o t h e r C P U , i t s e e s a l l
* stores i t h a s p e r f o r m e d o n t h i s o n e .
* /
sync
# endif / * C O N F I G _ S M P * /
tophys( r0 ,r4 )
2009-07-15 00:52:54 +04:00
mtspr S P R N _ S P R G _ T H R E A D ,r0 / * U p d a t e c u r r e n t T H R E A D p h y s a d d r * /
2005-10-10 16:36:14 +04:00
lwz r1 ,K S P ( r4 ) / * L o a d n e w s t a c k p o i n t e r * /
/* save the old current 'last' for return value */
mr r3 ,r2
addi r2 ,r4 ,- T H R E A D / * U p d a t e c u r r e n t * /
# ifdef C O N F I G _ A L T I V E C
BEGIN_ F T R _ S E C T I O N
lwz r0 ,T H R E A D + T H R E A D _ V R S A V E ( r2 )
mtspr S P R N _ V R S A V E ,r0 / * i f G 4 , r e s t o r e V R S A V E r e g * /
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ A L T I V E C )
# endif / * C O N F I G _ A L T I V E C * /
# ifdef C O N F I G _ S P E
2007-09-13 10:44:20 +04:00
BEGIN_ F T R _ S E C T I O N
2005-10-10 16:36:14 +04:00
lwz r0 ,T H R E A D + T H R E A D _ S P E F S C R ( r2 )
mtspr S P R N _ S P E F S C R ,r0 / * r e s t o r e S P E F S C R r e g * /
2007-09-13 10:44:20 +04:00
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ S P E )
2005-10-10 16:36:14 +04:00
# endif / * C O N F I G _ S P E * /
2017-01-24 13:37:20 +03:00
2005-10-10 16:36:14 +04:00
lwz r0 ,_ C C R ( r1 )
mtcrf 0 x F F ,r0
/* r3-r12 are destroyed -- Cort */
REST_ N V G P R S ( r1 )
lwz r4 ,_ N I P ( r1 ) / * R e t u r n t o _ s w i t c h c a l l e r i n n e w t a s k * /
mtlr r4
addi r1 ,r1 ,I N T _ F R A M E _ S I Z E
blr
.globl fast_exception_return
fast_exception_return :
# if ! ( d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E ) )
andi. r10 ,r9 ,M S R _ R I / * c h e c k f o r r e c o v e r a b l e i n t e r r u p t * /
beq 1 f / * i f n o t , w e ' v e g o t p r o b l e m s * /
# endif
2 : REST_ 4 G P R S ( 3 , r11 )
lwz r10 ,_ C C R ( r11 )
REST_ G P R ( 1 , r11 )
mtcr r10
lwz r10 ,_ L I N K ( r11 )
mtlr r10
2019-02-27 14:45:30 +03:00
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
li r10 , 0
stw r10 , 8 ( r11 )
2005-10-10 16:36:14 +04:00
REST_ G P R ( 1 0 , r11 )
2018-01-12 15:45:23 +03:00
# if d e f i n e d ( C O N F I G _ P P C _ 8 x x ) & & d e f i n e d ( C O N F I G _ P E R F _ E V E N T S )
powerpc/8xx: Perf events on PPC 8xx
This patch has been reworked since RFC version. In the RFC, this patch
was preceded by a patch clearing MSR RI for all PPC32 at all time at
exception prologs. Now MSR RI clearing is done only when this 8xx perf
events functionality is compiled in, it is therefore limited to 8xx
and merged inside this patch.
Other main changes have been to take into account detailed review from
Peter Zijlstra. The instructions counter has been reworked to behave
as a free running counter like the three other counters.
The 8xx has no PMU, however some events can be emulated by other means.
This patch implements the following events (as reported by 'perf list'):
cpu-cycles OR cycles [Hardware event]
instructions [Hardware event]
dTLB-load-misses [Hardware cache event]
iTLB-load-misses [Hardware cache event]
'cycles' event is implemented using the timebase clock. Timebase clock
corresponds to CPU clock divided by 16, so number of cycles is
approximatly 16 times the number of TB ticks
On the 8xx, TLB misses are handled by software. It is therefore
easy to count all TLB misses each time the TLB miss exception is
called.
'instructions' is calculated by using instruction watchpoint counter.
This patch sets counter A to count instructions at address greater
than 0, hence we count all instructions executed while MSR RI bit is
set. The counter is set to the maximum which is 0xffff. Every 65535
instructions, debug instruction breakpoint exception fires. The
exception handler increments a counter in memory which then
represent the upper part of the instruction counter. We therefore
end up with a 48 bits counter. In order to avoid unnecessary overhead
while no perf event is active, this counter is started when the first
event referring to this counter is added, and the counter is stopped
when the last event referring to it is deleted. In order to properly
support breakpoint exceptions, MSR RI bit has to be unset in exception
epilogs in order to avoid breakpoint exceptions during critical
sections during changes to SRR0 and SRR1 would be problematic.
All counters are handled as free running counters.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
2016-12-15 15:42:18 +03:00
mtspr S P R N _ N R I , r0
# endif
2005-10-10 16:36:14 +04:00
mtspr S P R N _ S R R 1 ,r9
mtspr S P R N _ S R R 0 ,r12
REST_ G P R ( 9 , r11 )
REST_ G P R ( 1 2 , r11 )
lwz r11 ,G P R 1 1 ( r11 )
2020-11-08 19:57:37 +03:00
rfi
# ifdef C O N F I G _ 4 0 x
b . / * P r e v e n t p r e f e t c h p a s t r f i * /
# endif
2020-03-31 19:03:47 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( f a s t _ e x c e p t i o n _ r e t u r n )
2005-10-10 16:36:14 +04:00
# if ! ( d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E ) )
/* check if the exception happened in a restartable section */
1 : lis r3 ,e x c _ e x i t _ r e s t a r t _ e n d @ha
addi r3 ,r3 ,e x c _ e x i t _ r e s t a r t _ e n d @l
cmplw r12 ,r3
bge 3 f
lis r4 ,e x c _ e x i t _ r e s t a r t @ha
addi r4 ,r4 ,e x c _ e x i t _ r e s t a r t @l
cmplw r12 ,r4
blt 3 f
lis r3 ,f e e _ r e s t a r t s @ha
tophys( r3 ,r3 )
lwz r5 ,f e e _ r e s t a r t s @l(r3)
addi r5 ,r5 ,1
stw r5 ,f e e _ r e s t a r t s @l(r3)
mr r12 ,r4 / * r e s t a r t a t e x c _ e x i t _ r e s t a r t * /
b 2 b
2007-05-15 02:11:58 +04:00
.section .bss
.align 2
fee_restarts :
.space 4
.previous
2005-10-10 16:36:14 +04:00
/* aargh, a nonrecoverable interrupt, panic */
/* aargh, we don't know which trap this is */
3 :
li r10 ,- 1
2005-10-28 16:45:25 +04:00
stw r10 ,_ T R A P ( r11 )
2005-10-10 16:36:14 +04:00
addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
lis r10 ,M S R _ K E R N E L @h
ori r10 ,r10 ,M S R _ K E R N E L @l
bl t r a n s f e r _ t o _ h a n d l e r _ f u l l
2018-09-25 17:10:04 +03:00
.long unrecoverable_exception
2005-10-10 16:36:14 +04:00
.long ret_from_except
# endif
.globl ret_from_except_full
ret_from_except_full :
REST_ N V G P R S ( r1 )
/* fall through */
.globl ret_from_except
ret_from_except :
/ * Hard- d i s a b l e i n t e r r u p t s s o t h a t c u r r e n t _ t h r e a d _ i n f o ( ) - > f l a g s
* can' t c h a n g e b e t w e e n w h e n w e t e s t i t a n d w h e n w e r e t u r n
* from t h e i n t e r r u p t . * /
2009-06-17 21:43:59 +04:00
/* Note: We don't bother telling lockdep about it */
2019-08-20 17:34:13 +03:00
LOAD_ R E G _ I M M E D I A T E ( r10 ,M S R _ K E R N E L )
2019-12-21 11:32:22 +03:00
mtmsr r10 / * d i s a b l e i n t e r r u p t s * /
2005-10-10 16:36:14 +04:00
lwz r3 ,_ M S R ( r1 ) / * R e t u r n i n g t o u s e r m o d e ? * /
andi. r0 ,r3 ,M S R _ P R
beq r e s u m e _ k e r n e l
user_exc_return : /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
2019-01-31 13:09:04 +03:00
lwz r9 ,T I _ F L A G S ( r2 )
2008-04-28 11:30:37 +04:00
andi. r0 ,r9 ,_ T I F _ U S E R _ W O R K _ M A S K
2005-10-10 16:36:14 +04:00
bne d o _ w o r k
restore_user :
# if d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E )
2008-04-10 01:15:40 +04:00
/ * Check w h e t h e r t h i s p r o c e s s h a s i t s o w n D B C R 0 v a l u e . T h e i n t e r n a l
debug m o d e b i t t e l l s u s t h a t d b c r0 s h o u l d b e l o a d e d . * /
2005-10-10 16:36:14 +04:00
lwz r0 ,T H R E A D + T H R E A D _ D B C R 0 ( r2 )
2008-07-25 23:27:33 +04:00
andis. r10 ,r0 ,D B C R 0 _ I D M @h
2005-10-10 16:36:14 +04:00
bnel- l o a d _ d b c r0
# endif
2019-01-31 13:09:04 +03:00
ACCOUNT_ C P U _ U S E R _ E X I T ( r2 , r10 , r11 )
2019-03-11 11:30:35 +03:00
# ifdef C O N F I G _ P P C _ B O O K 3 S _ 3 2
kuep_ u n l o c k r10 , r11
# endif
2005-10-10 16:36:14 +04:00
b r e s t o r e
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel :
2012-09-17 03:54:30 +04:00
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
2019-01-31 13:09:04 +03:00
lwz r8 ,T I _ F L A G S ( r2 )
2013-05-31 05:20:02 +04:00
andis. r0 ,r8 ,_ T I F _ E M U L A T E _ S T A C K _ S T O R E @h
2012-09-17 03:54:30 +04:00
beq+ 1 f
addi r8 ,r1 ,I N T _ F R A M E _ S I Z E / * G e t t h e k p r o b e d f u n c t i o n e n t r y * /
lwz r3 ,G P R 1 ( r1 )
subi r3 ,r3 ,I N T _ F R A M E _ S I Z E / * d s t : A l l o c a t e a t r a m p o l i n e e x c e p t i o n f r a m e * /
mr r4 ,r1 / * s r c : c u r r e n t e x c e p t i o n f r a m e * /
mr r1 ,r3 / * R e r o u t e t h e t r a m p o l i n e f r a m e t o r1 * /
/* Copy from the original to the trampoline. */
li r5 ,I N T _ F R A M E _ S I Z E / 4 / * s i z e : I N T _ F R A M E _ S I Z E * /
li r6 ,0 / * s t a r t o f f s e t : 0 * /
mtctr r5
2 : lwzx r0 ,r6 ,r4
stwx r0 ,r6 ,r3
addi r6 ,r6 ,4
bdnz 2 b
/* Do real store operation to complete stwu */
lwz r5 ,G P R 1 ( r1 )
stw r8 ,0 ( r5 )
/* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11 ,_ T I F _ E M U L A T E _ S T A C K _ S T O R E @h
2019-01-31 13:09:04 +03:00
addi r5 ,r2 ,T I _ F L A G S
2012-09-17 03:54:30 +04:00
0 : lwarx r8 ,0 ,r5
andc r8 ,r8 ,r11
stwcx. r8 ,0 ,r5
bne- 0 b
1 :
2019-10-24 19:04:58 +03:00
# ifdef C O N F I G _ P R E E M P T I O N
2012-09-17 03:54:30 +04:00
/* check current_thread_info->preempt_count */
2019-01-31 13:09:04 +03:00
lwz r0 ,T I _ P R E E M P T ( r2 )
2005-10-10 16:36:14 +04:00
cmpwi 0 ,r0 ,0 / * i f n o n - z e r o , j u s t r e s t o r e r e g s a n d r e t u r n * /
2019-03-11 11:30:31 +03:00
bne r e s t o r e _ k u a p
2012-09-17 03:54:30 +04:00
andi. r8 ,r8 ,_ T I F _ N E E D _ R E S C H E D
2019-03-11 11:30:31 +03:00
beq+ r e s t o r e _ k u a p
2012-09-17 03:54:30 +04:00
lwz r3 ,_ M S R ( r1 )
2005-10-10 16:36:14 +04:00
andi. r0 ,r3 ,M S R _ E E / * i n t e r r u p t s o f f ? * /
2019-03-11 11:30:31 +03:00
beq r e s t o r e _ k u a p / * d o n ' t s c h e d u l e i f s o * /
2009-06-17 21:43:59 +04:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
/ * Lockdep t h i n k s i r q s a r e e n a b l e d , w e n e e d t o c a l l
* preempt_ s c h e d u l e _ i r q w i t h I R Q s o f f , s o w e i n f o r m l o c k d e p
* now t h a t w e - d i d - t u r n t h e m o f f a l r e a d y
* /
bl t r a c e _ h a r d i r q s _ o f f
# endif
2019-03-12 01:47:46 +03:00
bl p r e e m p t _ s c h e d u l e _ i r q
2009-06-17 21:43:59 +04:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
/ * And n o w , t o p r o p e r l y r e b a l a n c e t h e a b o v e , w e t e l l l o c k d e p t h e y
* are b e i n g t u r n e d b a c k o n , w h i c h w i l l h a p p e n w h e n w e r e t u r n
* /
bl t r a c e _ h a r d i r q s _ o n
# endif
2019-10-24 19:04:58 +03:00
# endif / * C O N F I G _ P R E E M P T I O N * /
2019-03-11 11:30:31 +03:00
restore_kuap :
kuap_ r e s t o r e r1 , r2 , r9 , r10 , r0
2005-10-10 16:36:14 +04:00
/* interrupts are hard-disabled at this point */
restore :
2020-10-18 20:25:18 +03:00
# if d e f i n e d ( C O N F I G _ 4 4 x ) & & ! d e f i n e d ( C O N F I G _ P P C _ 4 7 x )
2007-10-31 08:42:19 +03:00
lis r4 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @ha
lwz r5 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @l(r4)
cmplwi c r0 ,r5 ,0
beq+ 1 f
li r6 ,0
iccci r0 ,r0
stw r6 ,i c a c h e _ 4 4 x _ n e e d _ f l u s h @l(r4)
1 :
# endif / * C O N F I G _ 4 4 x * /
2009-06-17 21:43:59 +04:00
lwz r9 ,_ M S R ( r1 )
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
/ * Lockdep d o e s n ' t k n o w a b o u t t h e f a c t t h a t I R Q s a r e t e m p o r a r i l y t u r n e d
* off i n t h i s a s s e m b l y c o d e w h i l e p e e k i n g a t T I _ F L A G S ( ) a n d s u c h . H o w e v e r
* we n e e d t o i n f o r m i t i f t h e e x c e p t i o n t u r n e d i n t e r r u p t s o f f , a n d w e
* are a b o u t t o t r u n t h e m b a c k o n .
* /
andi. r10 ,r9 ,M S R _ E E
beq 1 f
2010-12-22 19:42:56 +03:00
stwu r1 ,- 3 2 ( r1 )
mflr r0
stw r0 ,4 ( r1 )
2009-06-17 21:43:59 +04:00
bl t r a c e _ h a r d i r q s _ o n
2019-04-30 15:39:05 +03:00
addi r1 , r1 , 3 2
2009-06-17 21:43:59 +04:00
lwz r9 ,_ M S R ( r1 )
1 :
# endif / * C O N F I G _ T R A C E _ I R Q F L A G S * /
2005-10-10 16:36:14 +04:00
lwz r0 ,G P R 0 ( r1 )
lwz r2 ,G P R 2 ( r1 )
REST_ 4 G P R S ( 3 , r1 )
REST_ 2 G P R S ( 7 , r1 )
lwz r10 ,_ X E R ( r1 )
lwz r11 ,_ C T R ( r1 )
mtspr S P R N _ X E R ,r10
mtctr r11
2007-11-10 01:17:49 +03:00
BEGIN_ F T R _ S E C T I O N
lwarx r11 ,0 ,r1
END_ F T R _ S E C T I O N _ I F S E T ( C P U _ F T R _ N E E D _ P A I R E D _ S T W C X )
2005-10-10 16:36:14 +04:00
stwcx. r0 ,0 ,r1 / * t o c l e a r t h e r e s e r v a t i o n * /
# if ! ( d e f i n e d ( C O N F I G _ 4 x x ) | | d e f i n e d ( C O N F I G _ B O O K E ) )
andi. r10 ,r9 ,M S R _ R I / * c h e c k i f t h i s e x c e p t i o n o c c u r r e d * /
beql n o n r e c o v e r a b l e / * a t a b a d p l a c e ( M S R : R I = 0 ) * /
lwz r10 ,_ C C R ( r1 )
lwz r11 ,_ L I N K ( r1 )
mtcrf 0 x F F ,r10
mtlr r11
2019-02-27 14:45:30 +03:00
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
li r10 , 0
stw r10 , 8 ( r1 )
2005-10-10 16:36:14 +04:00
/ *
* Once w e p u t v a l u e s i n S R R 0 a n d S R R 1 , w e a r e i n a s t a t e
* where e x c e p t i o n s a r e n o t r e c o v e r a b l e , s i n c e t a k i n g a n
* exception w i l l t r a s h S R R 0 a n d S R R 1 . T h e r e f o r e w e c l e a r t h e
* MSR : RI b i t t o i n d i c a t e t h i s . I f w e d o t a k e a n e x c e p t i o n ,
* we c a n ' t r e t u r n t o t h e p o i n t o f t h e e x c e p t i o n b u t w e
* can r e s t a r t t h e e x c e p t i o n e x i t p a t h a t t h e l a b e l
* exc_ e x i t _ r e s t a r t b e l o w . - - p a u l u s
* /
2019-08-20 17:34:13 +03:00
LOAD_ R E G _ I M M E D I A T E ( r10 ,M S R _ K E R N E L & ~ M S R _ R I )
2019-12-21 11:32:22 +03:00
mtmsr r10 / * c l e a r t h e R I b i t * /
2005-10-10 16:36:14 +04:00
.globl exc_exit_restart
exc_exit_restart :
lwz r12 ,_ N I P ( r1 )
mtspr S P R N _ S R R 0 ,r12
mtspr S P R N _ S R R 1 ,r9
REST_ 4 G P R S ( 9 , r1 )
lwz r1 ,G P R 1 ( r1 )
.globl exc_exit_restart_end
exc_exit_restart_end :
2020-11-08 19:57:36 +03:00
rfi
2020-03-31 19:03:47 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( e x c _ e x i t _ r e s t a r t )
_ ASM_ N O K P R O B E _ S Y M B O L ( e x c _ e x i t _ r e s t a r t _ e n d )
2005-10-10 16:36:14 +04:00
# else / * ! ( C O N F I G _ 4 x x | | C O N F I G _ B O O K E ) * /
/ *
* This i s a b i t d i f f e r e n t o n 4 x x / B o o k - E b e c a u s e i t d o e s n ' t h a v e
* the R I b i t i n t h e M S R .
* The T L B m i s s h a n d l e r c h e c k s i f w e h a v e i n t e r r u p t e d
* the e x c e p t i o n e x i t p a t h a n d r e s t a r t s i t i f s o
* ( well m a y b e o n e d a y i t w i l l . . . : ) .
* /
lwz r11 ,_ L I N K ( r1 )
mtlr r11
lwz r10 ,_ C C R ( r1 )
mtcrf 0 x f f ,r10
2019-02-27 14:45:30 +03:00
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
li r10 , 0
stw r10 , 8 ( r1 )
2005-10-10 16:36:14 +04:00
REST_ 2 G P R S ( 9 , r1 )
.globl exc_exit_restart
exc_exit_restart :
lwz r11 ,_ N I P ( r1 )
lwz r12 ,_ M S R ( r1 )
mtspr S P R N _ S R R 0 ,r11
mtspr S P R N _ S R R 1 ,r12
REST_ 2 G P R S ( 1 1 , r1 )
lwz r1 ,G P R 1 ( r1 )
.globl exc_exit_restart_end
exc_exit_restart_end :
rfi
b . / * p r e v e n t p r e f e t c h p a s t r f i * /
2020-03-31 19:03:47 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( e x c _ e x i t _ r e s t a r t )
2005-10-10 16:36:14 +04:00
/ *
* Returning f r o m a c r i t i c a l i n t e r r u p t i n u s e r m o d e d o e s n ' t n e e d
* to b e a n y d i f f e r e n t f r o m a n o r m a l e x c e p t i o n . F o r a c r i t i c a l
* interrupt i n t h e k e r n e l , w e j u s t r e t u r n ( w i t h o u t c h e c k i n g f o r
* preemption) s i n c e t h e i n t e r r u p t m a y h a v e h a p p e n e d a t s o m e c r u c i a l
* place ( e . g . i n s i d e t h e T L B m i s s h a n d l e r ) , a n d b e c a u s e w e w i l l b e
* running w i t h r1 p o i n t i n g i n t o c r i t i c a l _ s t a c k , n o t t h e c u r r e n t
* process' s k e r n e l s t a c k ( a n d t h e r e f o r e c u r r e n t _ t h r e a d _ i n f o ( ) w i l l
* give t h e w r o n g a n s w e r ) .
* We h a v e t o r e s t o r e v a r i o u s S P R s t h a t m a y h a v e b e e n i n u s e a t t h e
* time o f t h e c r i t i c a l i n t e r r u p t .
*
* /
# ifdef C O N F I G _ 4 0 x
# define P P C _ 4 0 x _ T U R N _ O F F _ M S R _ D R \
/ * avoid a n y p o s s i b l e T L B m i s s e s h e r e b y t u r n i n g o f f M S R . D R , w e \
* assume t h e i n s t r u c t i o n s h e r e a r e m a p p e d b y a p i n n e d T L B e n t r y * / \
li r10 ,M S R _ I R ; \
mtmsr r10 ; \
isync; \
tophys( r1 , r1 ) ;
# else
# define P P C _ 4 0 x _ T U R N _ O F F _ M S R _ D R
# endif
# define R E T _ F R O M _ E X C _ L E V E L ( e x c _ l v l _ s r r0 , e x c _ l v l _ s r r1 , e x c _ l v l _ r f i ) \
REST_ N V G P R S ( r1 ) ; \
lwz r3 ,_ M S R ( r1 ) ; \
andi. r3 ,r3 ,M S R _ P R ; \
2019-08-20 17:34:13 +03:00
LOAD_ R E G _ I M M E D I A T E ( r10 ,M S R _ K E R N E L ) ; \
2005-10-10 16:36:14 +04:00
bne u s e r _ e x c _ r e t u r n ; \
lwz r0 ,G P R 0 ( r1 ) ; \
lwz r2 ,G P R 2 ( r1 ) ; \
REST_ 4 G P R S ( 3 , r1 ) ; \
REST_ 2 G P R S ( 7 , r1 ) ; \
lwz r10 ,_ X E R ( r1 ) ; \
lwz r11 ,_ C T R ( r1 ) ; \
mtspr S P R N _ X E R ,r10 ; \
mtctr r11 ; \
stwcx. r0 ,0 ,r1 ; /* to clear the reservation */ \
lwz r11 ,_ L I N K ( r1 ) ; \
mtlr r11 ; \
lwz r10 ,_ C C R ( r1 ) ; \
mtcrf 0 x f f ,r10 ; \
PPC_ 4 0 x _ T U R N _ O F F _ M S R _ D R ; \
lwz r9 ,_ D E A R ( r1 ) ; \
lwz r10 ,_ E S R ( r1 ) ; \
mtspr S P R N _ D E A R ,r9 ; \
mtspr S P R N _ E S R ,r10 ; \
lwz r11 ,_ N I P ( r1 ) ; \
lwz r12 ,_ M S R ( r1 ) ; \
mtspr e x c _ l v l _ s r r0 ,r11 ; \
mtspr e x c _ l v l _ s r r1 ,r12 ; \
lwz r9 ,G P R 9 ( r1 ) ; \
lwz r12 ,G P R 1 2 ( r1 ) ; \
lwz r10 ,G P R 1 0 ( r1 ) ; \
lwz r11 ,G P R 1 1 ( r1 ) ; \
lwz r1 ,G P R 1 ( r1 ) ; \
exc_ l v l _ r f i ; \
b . ; /* prevent prefetch past exc_lvl_rfi */
2008-04-30 14:23:21 +04:00
# define R E S T O R E _ x S R R ( e x c _ l v l _ s r r0 , e x c _ l v l _ s r r1 ) \
lwz r9 ,_ ## e x c _ l v l _ s r r 0 ( r1 ) ; \
lwz r10 ,_ ## e x c _ l v l _ s r r 1 ( r1 ) ; \
mtspr S P R N _ ## e x c _ l v l _ s r r 0 ,r9 ; \
mtspr S P R N _ ## e x c _ l v l _ s r r 1 ,r10 ;
2009-02-13 01:12:40 +03:00
# if d e f i n e d ( C O N F I G _ P P C _ B O O K 3 E _ M M U )
2008-04-30 14:23:21 +04:00
# ifdef C O N F I G _ P H Y S _ 6 4 B I T
# define R E S T O R E _ M A S 7 \
lwz r11 ,M A S 7 ( r1 ) ; \
mtspr S P R N _ M A S 7 ,r11 ;
# else
# define R E S T O R E _ M A S 7
# endif / * C O N F I G _ P H Y S _ 6 4 B I T * /
# define R E S T O R E _ M M U _ R E G S \
lwz r9 ,M A S 0 ( r1 ) ; \
lwz r10 ,M A S 1 ( r1 ) ; \
lwz r11 ,M A S 2 ( r1 ) ; \
mtspr S P R N _ M A S 0 ,r9 ; \
lwz r9 ,M A S 3 ( r1 ) ; \
mtspr S P R N _ M A S 1 ,r10 ; \
lwz r10 ,M A S 6 ( r1 ) ; \
mtspr S P R N _ M A S 2 ,r11 ; \
mtspr S P R N _ M A S 3 ,r9 ; \
mtspr S P R N _ M A S 6 ,r10 ; \
RESTORE_ M A S 7 ;
# elif d e f i n e d ( C O N F I G _ 4 4 x )
# define R E S T O R E _ M M U _ R E G S \
lwz r9 ,M M U C R ( r1 ) ; \
mtspr S P R N _ M M U C R ,r9 ;
# else
# define R E S T O R E _ M M U _ R E G S
# endif
# ifdef C O N F I G _ 4 0 x
2005-10-10 16:36:14 +04:00
.globl ret_from_crit_exc
ret_from_crit_exc :
2009-07-15 00:52:54 +04:00
mfspr r9 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lis r10 ,s a v e d _ k s p _ l i m i t @ha;
lwz r10 ,s a v e d _ k s p _ l i m i t @l(r10);
tovirt( r9 ,r9 ) ;
stw r10 ,K S P _ L I M I T ( r9 )
lis r9 ,c r i t _ s r r0 @ha;
lwz r9 ,c r i t _ s r r0 @l(r9);
lis r10 ,c r i t _ s r r1 @ha;
lwz r10 ,c r i t _ s r r1 @l(r10);
mtspr S P R N _ S R R 0 ,r9 ;
mtspr S P R N _ S R R 1 ,r10 ;
2009-02-10 23:10:44 +03:00
RET_ F R O M _ E X C _ L E V E L ( S P R N _ C S R R 0 , S P R N _ C S R R 1 , P P C _ R F C I )
2020-03-31 19:03:47 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( r e t _ f r o m _ c r i t _ e x c )
2008-04-30 14:23:21 +04:00
# endif / * C O N F I G _ 4 0 x * /
2005-10-10 16:36:14 +04:00
# ifdef C O N F I G _ B O O K E
2008-04-30 14:23:21 +04:00
.globl ret_from_crit_exc
ret_from_crit_exc :
2009-07-15 00:52:54 +04:00
mfspr r9 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lwz r10 ,S A V E D _ K S P _ L I M I T ( r1 )
stw r10 ,K S P _ L I M I T ( r9 )
RESTORE_ x S R R ( S R R 0 ,S R R 1 ) ;
RESTORE_ M M U _ R E G S ;
2009-02-10 23:10:44 +03:00
RET_ F R O M _ E X C _ L E V E L ( S P R N _ C S R R 0 , S P R N _ C S R R 1 , P P C _ R F C I )
2020-03-31 19:03:47 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( r e t _ f r o m _ c r i t _ e x c )
2008-04-30 14:23:21 +04:00
2005-10-10 16:36:14 +04:00
.globl ret_from_debug_exc
ret_from_debug_exc :
2009-07-15 00:52:54 +04:00
mfspr r9 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lwz r10 ,S A V E D _ K S P _ L I M I T ( r1 )
stw r10 ,K S P _ L I M I T ( r9 )
RESTORE_ x S R R ( S R R 0 ,S R R 1 ) ;
RESTORE_ x S R R ( C S R R 0 ,C S R R 1 ) ;
RESTORE_ M M U _ R E G S ;
2009-02-10 23:10:44 +03:00
RET_ F R O M _ E X C _ L E V E L ( S P R N _ D S R R 0 , S P R N _ D S R R 1 , P P C _ R F D I )
2020-03-31 19:03:47 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( r e t _ f r o m _ d e b u g _ e x c )
2005-10-10 16:36:14 +04:00
.globl ret_from_mcheck_exc
ret_from_mcheck_exc :
2009-07-15 00:52:54 +04:00
mfspr r9 ,S P R N _ S P R G _ T H R E A D
2008-04-30 14:23:21 +04:00
lwz r10 ,S A V E D _ K S P _ L I M I T ( r1 )
stw r10 ,K S P _ L I M I T ( r9 )
RESTORE_ x S R R ( S R R 0 ,S R R 1 ) ;
RESTORE_ x S R R ( C S R R 0 ,C S R R 1 ) ;
RESTORE_ x S R R ( D S R R 0 ,D S R R 1 ) ;
RESTORE_ M M U _ R E G S ;
2009-02-10 23:10:44 +03:00
RET_ F R O M _ E X C _ L E V E L ( S P R N _ M C S R R 0 , S P R N _ M C S R R 1 , P P C _ R F M C I )
2020-03-31 19:03:47 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( r e t _ f r o m _ m c h e c k _ e x c )
2005-10-10 16:36:14 +04:00
# endif / * C O N F I G _ B O O K E * /
/ *
* Load t h e D B C R 0 v a l u e f o r a t a s k t h a t i s b e i n g p t r a c e d ,
* having f i r s t s a v e d a w a y t h e g l o b a l D B C R 0 . N o t e t h a t r0
* has t h e d b c r0 v a l u e t o s e t u p o n e n t r y t o t h i s .
* /
load_dbcr0 :
mfmsr r10 / * f i r s t d i s a b l e d e b u g e x c e p t i o n s * /
rlwinm r10 ,r10 ,0 ,~ M S R _ D E
mtmsr r10
isync
mfspr r10 ,S P R N _ D B C R 0
lis r11 ,g l o b a l _ d b c r0 @ha
addi r11 ,r11 ,g l o b a l _ d b c r0 @l
2008-04-10 01:15:40 +04:00
# ifdef C O N F I G _ S M P
2019-01-31 13:09:04 +03:00
lwz r9 ,T A S K _ C P U ( r2 )
2008-04-10 01:15:40 +04:00
slwi r9 ,r9 ,3
add r11 ,r11 ,r9
# endif
2005-10-10 16:36:14 +04:00
stw r10 ,0 ( r11 )
mtspr S P R N _ D B C R 0 ,r0
lwz r10 ,4 ( r11 )
addi r10 ,r10 ,1
stw r10 ,4 ( r11 )
li r11 ,- 1
mtspr S P R N _ D B S R ,r11 / * c l e a r a l l p e n d i n g d e b u g e v e n t s * /
blr
2007-05-15 02:11:58 +04:00
.section .bss
.align 4
2019-04-30 15:39:02 +03:00
.global global_dbcr0
2007-05-15 02:11:58 +04:00
global_dbcr0 :
2008-04-10 01:15:40 +04:00
.space 8 * NR_ C P U S
2007-05-15 02:11:58 +04:00
.previous
2005-10-10 16:36:14 +04:00
# endif / * ! ( C O N F I G _ 4 x x | | C O N F I G _ B O O K E ) * /
do_work : /* r10 contains MSR_KERNEL here */
andi. r0 ,r9 ,_ T I F _ N E E D _ R E S C H E D
beq d o _ u s e r _ s i g n a l
do_resched : /* r10 contains MSR_KERNEL here */
2019-04-30 15:39:01 +03:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
bl t r a c e _ h a r d i r q s _ o n
mfmsr r10
# endif
2005-10-10 16:36:14 +04:00
ori r10 ,r10 ,M S R _ E E
2019-12-21 11:32:22 +03:00
mtmsr r10 / * h a r d - e n a b l e i n t e r r u p t s * /
2005-10-10 16:36:14 +04:00
bl s c h e d u l e
recheck :
2009-06-17 21:43:59 +04:00
/ * Note : And w e d o n ' t t e l l i t w e a r e d i s a b l i n g t h e m a g a i n
* neither. T h o s e d i s a b l e / e n a b l e c y c l e s u s e d t o p e e k a t
* TI_ F L A G S a r e n ' t a d v e r t i s e d .
* /
2019-08-20 17:34:13 +03:00
LOAD_ R E G _ I M M E D I A T E ( r10 ,M S R _ K E R N E L )
2019-12-21 11:32:22 +03:00
mtmsr r10 / * d i s a b l e i n t e r r u p t s * /
2019-01-31 13:09:04 +03:00
lwz r9 ,T I _ F L A G S ( r2 )
2005-10-10 16:36:14 +04:00
andi. r0 ,r9 ,_ T I F _ N E E D _ R E S C H E D
bne- d o _ r e s c h e d
2008-04-28 11:30:37 +04:00
andi. r0 ,r9 ,_ T I F _ U S E R _ W O R K _ M A S K
2005-10-10 16:36:14 +04:00
beq r e s t o r e _ u s e r
do_user_signal : /* r10 contains MSR_KERNEL here */
ori r10 ,r10 ,M S R _ E E
2019-12-21 11:32:22 +03:00
mtmsr r10 / * h a r d - e n a b l e i n t e r r u p t s * /
2005-10-10 16:36:14 +04:00
/* save r13-r31 in the exception frame, if not already done */
2005-10-28 16:45:25 +04:00
lwz r3 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
andi. r0 ,r3 ,1
beq 2 f
SAVE_ N V G P R S ( r1 )
rlwinm r3 ,r3 ,0 ,0 ,3 0
2005-10-28 16:45:25 +04:00
stw r3 ,_ T R A P ( r1 )
2008-07-27 10:52:52 +04:00
2 : addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
mr r4 ,r9
2012-02-22 09:48:32 +04:00
bl d o _ n o t i f y _ r e s u m e
2005-10-10 16:36:14 +04:00
REST_ N V G P R S ( r1 )
b r e c h e c k
/ *
* We c o m e h e r e w h e n w e a r e a t t h e e n d o f h a n d l i n g a n e x c e p t i o n
* that o c c u r r e d a t a p l a c e w h e r e t a k i n g a n e x c e p t i o n w i l l l o s e
* state i n f o r m a t i o n , s u c h a s t h e c o n t e n t s o f S R R 0 a n d S R R 1 .
* /
nonrecoverable :
lis r10 ,e x c _ e x i t _ r e s t a r t _ e n d @ha
addi r10 ,r10 ,e x c _ e x i t _ r e s t a r t _ e n d @l
cmplw r12 ,r10
bge 3 f
lis r11 ,e x c _ e x i t _ r e s t a r t @ha
addi r11 ,r11 ,e x c _ e x i t _ r e s t a r t @l
cmplw r12 ,r11
blt 3 f
lis r10 ,e e _ r e s t a r t s @ha
lwz r12 ,e e _ r e s t a r t s @l(r10)
addi r12 ,r12 ,1
stw r12 ,e e _ r e s t a r t s @l(r10)
mr r12 ,r11 / * r e s t a r t a t e x c _ e x i t _ r e s t a r t * /
blr
3 : /* OK, we can't recover, kill this process */
2005-10-28 16:45:25 +04:00
lwz r3 ,_ T R A P ( r1 )
2005-10-10 16:36:14 +04:00
andi. r0 ,r3 ,1
2019-01-31 13:08:58 +03:00
beq 5 f
2005-10-10 16:36:14 +04:00
SAVE_ N V G P R S ( r1 )
rlwinm r3 ,r3 ,0 ,0 ,3 0
2005-10-28 16:45:25 +04:00
stw r3 ,_ T R A P ( r1 )
2019-01-31 13:08:58 +03:00
5 : mfspr r2 ,S P R N _ S P R G _ T H R E A D
addi r2 ,r2 ,- T H R E A D
tovirt( r2 ,r2 ) / * s e t b a c k r2 t o c u r r e n t * /
2005-10-10 16:36:14 +04:00
4 : addi r3 ,r1 ,S T A C K _ F R A M E _ O V E R H E A D
2018-09-25 17:10:04 +03:00
bl u n r e c o v e r a b l e _ e x c e p t i o n
2005-10-10 16:36:14 +04:00
/* shouldn't return */
b 4 b
2020-03-31 19:03:44 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( n o n r e c o v e r a b l e )
2005-10-10 16:36:14 +04:00
2007-05-15 02:11:58 +04:00
.section .bss
.align 2
ee_restarts :
.space 4
.previous
2005-10-10 16:36:14 +04:00
/ *
* PROM c o d e f o r s p e c i f i c m a c h i n e s f o l l o w s . P u t i t
* here s o i t ' s e a s y t o a d d a r c h - s p e c i f i c s e c t i o n s l a t e r .
* - - Cort
* /
2005-10-26 11:05:24 +04:00
# ifdef C O N F I G _ P P C _ R T A S
2005-10-10 16:36:14 +04:00
/ *
* On C H R P , t h e R u n - T i m e A b s t r a c t i o n S e r v i c e s ( R T A S ) h a v e t o b e
* called w i t h t h e M M U o f f .
* /
_ GLOBAL( e n t e r _ r t a s )
stwu r1 ,- I N T _ F R A M E _ S I Z E ( r1 )
mflr r0
stw r0 ,I N T _ F R A M E _ S I Z E + 4 ( r1 )
2006-01-13 06:56:25 +03:00
LOAD_ R E G _ A D D R ( r4 , r t a s )
2005-10-10 16:36:14 +04:00
lis r6 ,1 f @ha /* physical return address for rtas */
addi r6 ,r6 ,1 f @l
tophys( r6 ,r6 )
2019-12-21 11:32:38 +03:00
tophys_ n o v m s t a c k r7 , r1
2005-10-26 11:05:24 +04:00
lwz r8 ,R T A S E N T R Y ( r4 )
lwz r4 ,R T A S B A S E ( r4 )
2005-10-10 16:36:14 +04:00
mfmsr r9
stw r9 ,8 ( r1 )
2019-08-20 17:34:13 +03:00
LOAD_ R E G _ I M M E D I A T E ( r0 ,M S R _ K E R N E L )
2020-09-29 09:48:34 +03:00
mtmsr r0 / * d i s a b l e i n t e r r u p t s s o S R R 0 / 1 d o n ' t g e t t r a s h e d * /
2005-10-10 16:36:14 +04:00
li r9 ,M S R _ K E R N E L & ~ ( M S R _ I R | M S R _ D R )
mtlr r6
2019-02-21 13:37:54 +03:00
stw r7 , T H R E A D + R T A S _ S P ( r2 )
2005-10-10 16:36:14 +04:00
mtspr S P R N _ S R R 0 ,r8
mtspr S P R N _ S R R 1 ,r9
2020-11-08 19:57:36 +03:00
rfi
2020-02-14 11:39:50 +03:00
1 : tophys_ n o v m s t a c k r9 , r1
# ifdef C O N F I G _ V M A P _ S T A C K
li r0 , M S R _ K E R N E L & ~ M S R _ I R / * c a n t a k e D T L B m i s s * /
mtmsr r0
isync
# endif
2005-10-10 16:36:14 +04:00
lwz r8 ,I N T _ F R A M E _ S I Z E + 4 ( r9 ) / * g e t r e t u r n a d d r e s s * /
lwz r9 ,8 ( r9 ) / * o r i g i n a l m s r v a l u e * /
addi r1 ,r1 ,I N T _ F R A M E _ S I Z E
li r0 ,0
2020-02-14 11:39:50 +03:00
tophys_ n o v m s t a c k r7 , r2
2019-02-21 13:37:54 +03:00
stw r0 , T H R E A D + R T A S _ S P ( r7 )
2005-10-10 16:36:14 +04:00
mtspr S P R N _ S R R 0 ,r8
mtspr S P R N _ S R R 1 ,r9
2020-11-08 19:57:36 +03:00
rfi / * r e t u r n t o c a l l e r * /
2020-03-31 19:03:44 +03:00
_ ASM_ N O K P R O B E _ S Y M B O L ( e n t e r _ r t a s )
2005-10-26 11:05:24 +04:00
# endif / * C O N F I G _ P P C _ R T A S * /