2007-06-04 09:15:49 +04:00
/*
2016-02-24 21:51:11 +03:00
* Copyright ( c ) 2007 Benjamin Herrenschmidt , IBM Corporation
2007-06-04 09:15:49 +04:00
* Extracted from signal_32 . c and signal_64 . c
*
* This file is subject to the terms and conditions of the GNU General
* Public License . See the file README . legal in the main directory of
* this archive for more details .
*/
# ifndef _POWERPC_ARCH_SIGNAL_H
# define _POWERPC_ARCH_SIGNAL_H
2020-08-18 20:19:22 +03:00
void __user * get_sigframe ( struct ksignal * ksig , struct task_struct * tsk ,
size_t frame_size , int is_32 ) ;
2007-06-04 09:15:52 +04:00
2014-03-02 17:46:11 +04:00
extern int handle_signal32 ( struct ksignal * ksig , sigset_t * oldset ,
2016-09-23 09:18:12 +03:00
struct task_struct * tsk ) ;
2007-06-04 09:15:52 +04:00
2014-03-02 17:46:11 +04:00
extern int handle_rt_signal32 ( struct ksignal * ksig , sigset_t * oldset ,
2016-09-23 09:18:12 +03:00
struct task_struct * tsk ) ;
2007-06-04 09:15:52 +04:00
2021-02-27 04:12:59 +03:00
static inline int __get_user_sigset ( sigset_t * dst , const sigset_t __user * src )
{
BUILD_BUG_ON ( sizeof ( sigset_t ) ! = sizeof ( u64 ) ) ;
return __get_user ( dst - > sig [ 0 ] , ( u64 __user * ) & src - > sig [ 0 ] ) ;
}
2021-03-19 14:06:58 +03:00
# define unsafe_get_user_sigset(dst, src, label) \
unsafe_get_user ( ( dst ) - > sig [ 0 ] , ( u64 __user * ) & ( src ) - > sig [ 0 ] , label )
2021-02-27 04:12:59 +03:00
2008-07-02 08:06:37 +04:00
# ifdef CONFIG_VSX
extern unsigned long copy_vsx_to_user ( void __user * to ,
struct task_struct * task ) ;
2016-09-23 09:18:25 +03:00
extern unsigned long copy_ckvsx_to_user ( void __user * to ,
2013-02-13 20:21:41 +04:00
struct task_struct * task ) ;
2008-07-02 08:06:37 +04:00
extern unsigned long copy_vsx_from_user ( struct task_struct * task ,
void __user * from ) ;
2016-09-23 09:18:25 +03:00
extern unsigned long copy_ckvsx_from_user ( struct task_struct * task ,
2013-02-13 20:21:41 +04:00
void __user * from ) ;
2020-08-18 20:19:13 +03:00
unsigned long copy_fpr_to_user ( void __user * to , struct task_struct * task ) ;
unsigned long copy_ckfpr_to_user ( void __user * to , struct task_struct * task ) ;
unsigned long copy_fpr_from_user ( struct task_struct * task , void __user * from ) ;
unsigned long copy_ckfpr_from_user ( struct task_struct * task , void __user * from ) ;
powerpc/signal: Create 'unsafe' versions of copy_[ck][fpr/vsx]_to_user()
For the non VSX version, that's trivial. Just use unsafe_copy_to_user()
instead of __copy_to_user().
For the VSX version, remove the intermediate step through a buffer and
use unsafe_put_user() directly. This generates a far smaller code which
is acceptable to inline, see below:
Standard VSX version:
0000000000000000 <.copy_fpr_to_user>:
0: 7c 08 02 a6 mflr r0
4: fb e1 ff f8 std r31,-8(r1)
8: 39 00 00 20 li r8,32
c: 39 24 0b 80 addi r9,r4,2944
10: 7d 09 03 a6 mtctr r8
14: f8 01 00 10 std r0,16(r1)
18: f8 21 fe 71 stdu r1,-400(r1)
1c: 39 41 00 68 addi r10,r1,104
20: e9 09 00 00 ld r8,0(r9)
24: 39 4a 00 08 addi r10,r10,8
28: 39 29 00 10 addi r9,r9,16
2c: f9 0a 00 00 std r8,0(r10)
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: 3d 42 00 00 addis r10,r2,0
3a: R_PPC64_TOC16_HA .toc
3c: eb ea 00 00 ld r31,0(r10)
3e: R_PPC64_TOC16_LO_DS .toc
40: f9 21 01 70 std r9,368(r1)
44: e9 3f 00 00 ld r9,0(r31)
48: 81 29 00 20 lwz r9,32(r9)
4c: 2f 89 00 00 cmpwi cr7,r9,0
50: 40 9c 00 18 bge cr7,68 <.copy_fpr_to_user+0x68>
54: 4c 00 01 2c isync
58: 3d 20 40 00 lis r9,16384
5c: 79 29 07 c6 rldicr r9,r9,32,31
60: 7d 3d 03 a6 mtspr 29,r9
64: 4c 00 01 2c isync
68: 38 a0 01 08 li r5,264
6c: 38 81 00 70 addi r4,r1,112
70: 48 00 00 01 bl 70 <.copy_fpr_to_user+0x70>
70: R_PPC64_REL24 .__copy_tofrom_user
74: 60 00 00 00 nop
78: e9 3f 00 00 ld r9,0(r31)
7c: 81 29 00 20 lwz r9,32(r9)
80: 2f 89 00 00 cmpwi cr7,r9,0
84: 40 9c 00 18 bge cr7,9c <.copy_fpr_to_user+0x9c>
88: 4c 00 01 2c isync
8c: 39 20 ff ff li r9,-1
90: 79 29 00 44 rldicr r9,r9,0,1
94: 7d 3d 03 a6 mtspr 29,r9
98: 4c 00 01 2c isync
9c: 38 21 01 90 addi r1,r1,400
a0: e8 01 00 10 ld r0,16(r1)
a4: eb e1 ff f8 ld r31,-8(r1)
a8: 7c 08 03 a6 mtlr r0
ac: 4e 80 00 20 blr
'unsafe' simulated VSX version (The ... are only nops) using
unsafe_copy_fpr_to_user() macro:
unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
unsafe_copy_fpr_to_user(to, task, failed);
return 0;
failed:
return 1;
}
0000000000000000 <.copy_fpr_to_user>:
0: 39 00 00 20 li r8,32
4: 39 44 0b 80 addi r10,r4,2944
8: 7d 09 03 a6 mtctr r8
c: 7c 69 1b 78 mr r9,r3
...
20: e9 0a 00 00 ld r8,0(r10)
24: f9 09 00 00 std r8,0(r9)
28: 39 4a 00 10 addi r10,r10,16
2c: 39 29 00 08 addi r9,r9,8
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: f9 23 01 00 std r9,256(r3)
3c: 38 60 00 00 li r3,0
40: 4e 80 00 20 blr
...
50: 38 60 00 01 li r3,1
54: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/29f6c4b8e7a5bbc61e6a8801b78bbf493f9f819e.1597770847.git.christophe.leroy@csgroup.eu
2020-08-18 20:19:35 +03:00
# define unsafe_copy_fpr_to_user(to, task, label) do { \
struct task_struct * __t = task ; \
u64 __user * buf = ( u64 __user * ) to ; \
int i ; \
\
for ( i = 0 ; i < ELF_NFPREG - 1 ; i + + ) \
unsafe_put_user ( __t - > thread . TS_FPR ( i ) , & buf [ i ] , label ) ; \
unsafe_put_user ( __t - > thread . fp_state . fpscr , & buf [ i ] , label ) ; \
} while ( 0 )
# define unsafe_copy_vsx_to_user(to, task, label) do { \
struct task_struct * __t = task ; \
u64 __user * buf = ( u64 __user * ) to ; \
int i ; \
\
for ( i = 0 ; i < ELF_NVSRHALFREG ; i + + ) \
unsafe_put_user ( __t - > thread . fp_state . fpr [ i ] [ TS_VSRLOWOFFSET ] , \
& buf [ i ] , label ) ; \
} while ( 0 )
2021-02-27 04:12:51 +03:00
# define unsafe_copy_fpr_from_user(task, from, label) do { \
struct task_struct * __t = task ; \
u64 __user * buf = ( u64 __user * ) from ; \
int i ; \
\
for ( i = 0 ; i < ELF_NFPREG - 1 ; i + + ) \
unsafe_get_user ( __t - > thread . TS_FPR ( i ) , & buf [ i ] , label ) ; \
unsafe_get_user ( __t - > thread . fp_state . fpscr , & buf [ i ] , label ) ; \
} while ( 0 )
# define unsafe_copy_vsx_from_user(task, from, label) do { \
struct task_struct * __t = task ; \
u64 __user * buf = ( u64 __user * ) from ; \
int i ; \
\
for ( i = 0 ; i < ELF_NVSRHALFREG ; i + + ) \
unsafe_get_user ( __t - > thread . fp_state . fpr [ i ] [ TS_VSRLOWOFFSET ] , \
& buf [ i ] , label ) ; \
} while ( 0 )
powerpc/signal: Create 'unsafe' versions of copy_[ck][fpr/vsx]_to_user()
For the non VSX version, that's trivial. Just use unsafe_copy_to_user()
instead of __copy_to_user().
For the VSX version, remove the intermediate step through a buffer and
use unsafe_put_user() directly. This generates a far smaller code which
is acceptable to inline, see below:
Standard VSX version:
0000000000000000 <.copy_fpr_to_user>:
0: 7c 08 02 a6 mflr r0
4: fb e1 ff f8 std r31,-8(r1)
8: 39 00 00 20 li r8,32
c: 39 24 0b 80 addi r9,r4,2944
10: 7d 09 03 a6 mtctr r8
14: f8 01 00 10 std r0,16(r1)
18: f8 21 fe 71 stdu r1,-400(r1)
1c: 39 41 00 68 addi r10,r1,104
20: e9 09 00 00 ld r8,0(r9)
24: 39 4a 00 08 addi r10,r10,8
28: 39 29 00 10 addi r9,r9,16
2c: f9 0a 00 00 std r8,0(r10)
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: 3d 42 00 00 addis r10,r2,0
3a: R_PPC64_TOC16_HA .toc
3c: eb ea 00 00 ld r31,0(r10)
3e: R_PPC64_TOC16_LO_DS .toc
40: f9 21 01 70 std r9,368(r1)
44: e9 3f 00 00 ld r9,0(r31)
48: 81 29 00 20 lwz r9,32(r9)
4c: 2f 89 00 00 cmpwi cr7,r9,0
50: 40 9c 00 18 bge cr7,68 <.copy_fpr_to_user+0x68>
54: 4c 00 01 2c isync
58: 3d 20 40 00 lis r9,16384
5c: 79 29 07 c6 rldicr r9,r9,32,31
60: 7d 3d 03 a6 mtspr 29,r9
64: 4c 00 01 2c isync
68: 38 a0 01 08 li r5,264
6c: 38 81 00 70 addi r4,r1,112
70: 48 00 00 01 bl 70 <.copy_fpr_to_user+0x70>
70: R_PPC64_REL24 .__copy_tofrom_user
74: 60 00 00 00 nop
78: e9 3f 00 00 ld r9,0(r31)
7c: 81 29 00 20 lwz r9,32(r9)
80: 2f 89 00 00 cmpwi cr7,r9,0
84: 40 9c 00 18 bge cr7,9c <.copy_fpr_to_user+0x9c>
88: 4c 00 01 2c isync
8c: 39 20 ff ff li r9,-1
90: 79 29 00 44 rldicr r9,r9,0,1
94: 7d 3d 03 a6 mtspr 29,r9
98: 4c 00 01 2c isync
9c: 38 21 01 90 addi r1,r1,400
a0: e8 01 00 10 ld r0,16(r1)
a4: eb e1 ff f8 ld r31,-8(r1)
a8: 7c 08 03 a6 mtlr r0
ac: 4e 80 00 20 blr
'unsafe' simulated VSX version (The ... are only nops) using
unsafe_copy_fpr_to_user() macro:
unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
unsafe_copy_fpr_to_user(to, task, failed);
return 0;
failed:
return 1;
}
0000000000000000 <.copy_fpr_to_user>:
0: 39 00 00 20 li r8,32
4: 39 44 0b 80 addi r10,r4,2944
8: 7d 09 03 a6 mtctr r8
c: 7c 69 1b 78 mr r9,r3
...
20: e9 0a 00 00 ld r8,0(r10)
24: f9 09 00 00 std r8,0(r9)
28: 39 4a 00 10 addi r10,r10,16
2c: 39 29 00 08 addi r9,r9,8
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: f9 23 01 00 std r9,256(r3)
3c: 38 60 00 00 li r3,0
40: 4e 80 00 20 blr
...
50: 38 60 00 01 li r3,1
54: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/29f6c4b8e7a5bbc61e6a8801b78bbf493f9f819e.1597770847.git.christophe.leroy@csgroup.eu
2020-08-18 20:19:35 +03:00
# ifdef CONFIG_PPC_TRANSACTIONAL_MEM
# define unsafe_copy_ckfpr_to_user(to, task, label) do { \
struct task_struct * __t = task ; \
u64 __user * buf = ( u64 __user * ) to ; \
int i ; \
\
for ( i = 0 ; i < ELF_NFPREG - 1 ; i + + ) \
unsafe_put_user ( __t - > thread . TS_CKFPR ( i ) , & buf [ i ] , label ) ; \
unsafe_put_user ( __t - > thread . ckfp_state . fpscr , & buf [ i ] , label ) ; \
} while ( 0 )
# define unsafe_copy_ckvsx_to_user(to, task, label) do { \
struct task_struct * __t = task ; \
u64 __user * buf = ( u64 __user * ) to ; \
int i ; \
\
for ( i = 0 ; i < ELF_NVSRHALFREG ; i + + ) \
unsafe_put_user ( __t - > thread . ckfp_state . fpr [ i ] [ TS_VSRLOWOFFSET ] , \
& buf [ i ] , label ) ; \
} while ( 0 )
2021-03-19 14:06:52 +03:00
# define unsafe_copy_ckfpr_from_user(task, from, label) do { \
struct task_struct * __t = task ; \
u64 __user * buf = ( u64 __user * ) from ; \
int i ; \
\
for ( i = 0 ; i < ELF_NFPREG - 1 ; i + + ) \
unsafe_get_user ( __t - > thread . TS_CKFPR ( i ) , & buf [ i ] , label ) ; \
unsafe_get_user ( __t - > thread . ckfp_state . fpscr , & buf [ i ] , failed ) ; \
} while ( 0 )
# define unsafe_copy_ckvsx_from_user(task, from, label) do { \
struct task_struct * __t = task ; \
u64 __user * buf = ( u64 __user * ) from ; \
int i ; \
\
for ( i = 0 ; i < ELF_NVSRHALFREG ; i + + ) \
unsafe_get_user ( __t - > thread . ckfp_state . fpr [ i ] [ TS_VSRLOWOFFSET ] , \
& buf [ i ] , label ) ; \
} while ( 0 )
powerpc/signal: Create 'unsafe' versions of copy_[ck][fpr/vsx]_to_user()
For the non VSX version, that's trivial. Just use unsafe_copy_to_user()
instead of __copy_to_user().
For the VSX version, remove the intermediate step through a buffer and
use unsafe_put_user() directly. This generates a far smaller code which
is acceptable to inline, see below:
Standard VSX version:
0000000000000000 <.copy_fpr_to_user>:
0: 7c 08 02 a6 mflr r0
4: fb e1 ff f8 std r31,-8(r1)
8: 39 00 00 20 li r8,32
c: 39 24 0b 80 addi r9,r4,2944
10: 7d 09 03 a6 mtctr r8
14: f8 01 00 10 std r0,16(r1)
18: f8 21 fe 71 stdu r1,-400(r1)
1c: 39 41 00 68 addi r10,r1,104
20: e9 09 00 00 ld r8,0(r9)
24: 39 4a 00 08 addi r10,r10,8
28: 39 29 00 10 addi r9,r9,16
2c: f9 0a 00 00 std r8,0(r10)
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: 3d 42 00 00 addis r10,r2,0
3a: R_PPC64_TOC16_HA .toc
3c: eb ea 00 00 ld r31,0(r10)
3e: R_PPC64_TOC16_LO_DS .toc
40: f9 21 01 70 std r9,368(r1)
44: e9 3f 00 00 ld r9,0(r31)
48: 81 29 00 20 lwz r9,32(r9)
4c: 2f 89 00 00 cmpwi cr7,r9,0
50: 40 9c 00 18 bge cr7,68 <.copy_fpr_to_user+0x68>
54: 4c 00 01 2c isync
58: 3d 20 40 00 lis r9,16384
5c: 79 29 07 c6 rldicr r9,r9,32,31
60: 7d 3d 03 a6 mtspr 29,r9
64: 4c 00 01 2c isync
68: 38 a0 01 08 li r5,264
6c: 38 81 00 70 addi r4,r1,112
70: 48 00 00 01 bl 70 <.copy_fpr_to_user+0x70>
70: R_PPC64_REL24 .__copy_tofrom_user
74: 60 00 00 00 nop
78: e9 3f 00 00 ld r9,0(r31)
7c: 81 29 00 20 lwz r9,32(r9)
80: 2f 89 00 00 cmpwi cr7,r9,0
84: 40 9c 00 18 bge cr7,9c <.copy_fpr_to_user+0x9c>
88: 4c 00 01 2c isync
8c: 39 20 ff ff li r9,-1
90: 79 29 00 44 rldicr r9,r9,0,1
94: 7d 3d 03 a6 mtspr 29,r9
98: 4c 00 01 2c isync
9c: 38 21 01 90 addi r1,r1,400
a0: e8 01 00 10 ld r0,16(r1)
a4: eb e1 ff f8 ld r31,-8(r1)
a8: 7c 08 03 a6 mtlr r0
ac: 4e 80 00 20 blr
'unsafe' simulated VSX version (The ... are only nops) using
unsafe_copy_fpr_to_user() macro:
unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
unsafe_copy_fpr_to_user(to, task, failed);
return 0;
failed:
return 1;
}
0000000000000000 <.copy_fpr_to_user>:
0: 39 00 00 20 li r8,32
4: 39 44 0b 80 addi r10,r4,2944
8: 7d 09 03 a6 mtctr r8
c: 7c 69 1b 78 mr r9,r3
...
20: e9 0a 00 00 ld r8,0(r10)
24: f9 09 00 00 std r8,0(r9)
28: 39 4a 00 10 addi r10,r10,16
2c: 39 29 00 08 addi r9,r9,8
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: f9 23 01 00 std r9,256(r3)
3c: 38 60 00 00 li r3,0
40: 4e 80 00 20 blr
...
50: 38 60 00 01 li r3,1
54: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/29f6c4b8e7a5bbc61e6a8801b78bbf493f9f819e.1597770847.git.christophe.leroy@csgroup.eu
2020-08-18 20:19:35 +03:00
# endif
2020-08-18 20:19:17 +03:00
# elif defined(CONFIG_PPC_FPU_REGS)
powerpc/signal: Create 'unsafe' versions of copy_[ck][fpr/vsx]_to_user()
For the non VSX version, that's trivial. Just use unsafe_copy_to_user()
instead of __copy_to_user().
For the VSX version, remove the intermediate step through a buffer and
use unsafe_put_user() directly. This generates a far smaller code which
is acceptable to inline, see below:
Standard VSX version:
0000000000000000 <.copy_fpr_to_user>:
0: 7c 08 02 a6 mflr r0
4: fb e1 ff f8 std r31,-8(r1)
8: 39 00 00 20 li r8,32
c: 39 24 0b 80 addi r9,r4,2944
10: 7d 09 03 a6 mtctr r8
14: f8 01 00 10 std r0,16(r1)
18: f8 21 fe 71 stdu r1,-400(r1)
1c: 39 41 00 68 addi r10,r1,104
20: e9 09 00 00 ld r8,0(r9)
24: 39 4a 00 08 addi r10,r10,8
28: 39 29 00 10 addi r9,r9,16
2c: f9 0a 00 00 std r8,0(r10)
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: 3d 42 00 00 addis r10,r2,0
3a: R_PPC64_TOC16_HA .toc
3c: eb ea 00 00 ld r31,0(r10)
3e: R_PPC64_TOC16_LO_DS .toc
40: f9 21 01 70 std r9,368(r1)
44: e9 3f 00 00 ld r9,0(r31)
48: 81 29 00 20 lwz r9,32(r9)
4c: 2f 89 00 00 cmpwi cr7,r9,0
50: 40 9c 00 18 bge cr7,68 <.copy_fpr_to_user+0x68>
54: 4c 00 01 2c isync
58: 3d 20 40 00 lis r9,16384
5c: 79 29 07 c6 rldicr r9,r9,32,31
60: 7d 3d 03 a6 mtspr 29,r9
64: 4c 00 01 2c isync
68: 38 a0 01 08 li r5,264
6c: 38 81 00 70 addi r4,r1,112
70: 48 00 00 01 bl 70 <.copy_fpr_to_user+0x70>
70: R_PPC64_REL24 .__copy_tofrom_user
74: 60 00 00 00 nop
78: e9 3f 00 00 ld r9,0(r31)
7c: 81 29 00 20 lwz r9,32(r9)
80: 2f 89 00 00 cmpwi cr7,r9,0
84: 40 9c 00 18 bge cr7,9c <.copy_fpr_to_user+0x9c>
88: 4c 00 01 2c isync
8c: 39 20 ff ff li r9,-1
90: 79 29 00 44 rldicr r9,r9,0,1
94: 7d 3d 03 a6 mtspr 29,r9
98: 4c 00 01 2c isync
9c: 38 21 01 90 addi r1,r1,400
a0: e8 01 00 10 ld r0,16(r1)
a4: eb e1 ff f8 ld r31,-8(r1)
a8: 7c 08 03 a6 mtlr r0
ac: 4e 80 00 20 blr
'unsafe' simulated VSX version (The ... are only nops) using
unsafe_copy_fpr_to_user() macro:
unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
unsafe_copy_fpr_to_user(to, task, failed);
return 0;
failed:
return 1;
}
0000000000000000 <.copy_fpr_to_user>:
0: 39 00 00 20 li r8,32
4: 39 44 0b 80 addi r10,r4,2944
8: 7d 09 03 a6 mtctr r8
c: 7c 69 1b 78 mr r9,r3
...
20: e9 0a 00 00 ld r8,0(r10)
24: f9 09 00 00 std r8,0(r9)
28: 39 4a 00 10 addi r10,r10,16
2c: 39 29 00 08 addi r9,r9,8
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: f9 23 01 00 std r9,256(r3)
3c: 38 60 00 00 li r3,0
40: 4e 80 00 20 blr
...
50: 38 60 00 01 li r3,1
54: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/29f6c4b8e7a5bbc61e6a8801b78bbf493f9f819e.1597770847.git.christophe.leroy@csgroup.eu
2020-08-18 20:19:35 +03:00
# define unsafe_copy_fpr_to_user(to, task, label) \
unsafe_copy_to_user ( to , ( task ) - > thread . fp_state . fpr , \
ELF_NFPREG * sizeof ( double ) , label )
2021-02-27 04:12:51 +03:00
# define unsafe_copy_fpr_from_user(task, from, label) \
unsafe_copy_from_user ( ( task ) - > thread . fp_state . fpr , from , \
ELF_NFPREG * sizeof ( double ) , label )
2020-08-18 20:19:13 +03:00
static inline unsigned long
copy_fpr_to_user ( void __user * to , struct task_struct * task )
{
return __copy_to_user ( to , task - > thread . fp_state . fpr ,
ELF_NFPREG * sizeof ( double ) ) ;
}
static inline unsigned long
copy_fpr_from_user ( struct task_struct * task , void __user * from )
{
return __copy_from_user ( task - > thread . fp_state . fpr , from ,
ELF_NFPREG * sizeof ( double ) ) ;
}
# ifdef CONFIG_PPC_TRANSACTIONAL_MEM
powerpc/signal: Create 'unsafe' versions of copy_[ck][fpr/vsx]_to_user()
For the non VSX version, that's trivial. Just use unsafe_copy_to_user()
instead of __copy_to_user().
For the VSX version, remove the intermediate step through a buffer and
use unsafe_put_user() directly. This generates a far smaller code which
is acceptable to inline, see below:
Standard VSX version:
0000000000000000 <.copy_fpr_to_user>:
0: 7c 08 02 a6 mflr r0
4: fb e1 ff f8 std r31,-8(r1)
8: 39 00 00 20 li r8,32
c: 39 24 0b 80 addi r9,r4,2944
10: 7d 09 03 a6 mtctr r8
14: f8 01 00 10 std r0,16(r1)
18: f8 21 fe 71 stdu r1,-400(r1)
1c: 39 41 00 68 addi r10,r1,104
20: e9 09 00 00 ld r8,0(r9)
24: 39 4a 00 08 addi r10,r10,8
28: 39 29 00 10 addi r9,r9,16
2c: f9 0a 00 00 std r8,0(r10)
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: 3d 42 00 00 addis r10,r2,0
3a: R_PPC64_TOC16_HA .toc
3c: eb ea 00 00 ld r31,0(r10)
3e: R_PPC64_TOC16_LO_DS .toc
40: f9 21 01 70 std r9,368(r1)
44: e9 3f 00 00 ld r9,0(r31)
48: 81 29 00 20 lwz r9,32(r9)
4c: 2f 89 00 00 cmpwi cr7,r9,0
50: 40 9c 00 18 bge cr7,68 <.copy_fpr_to_user+0x68>
54: 4c 00 01 2c isync
58: 3d 20 40 00 lis r9,16384
5c: 79 29 07 c6 rldicr r9,r9,32,31
60: 7d 3d 03 a6 mtspr 29,r9
64: 4c 00 01 2c isync
68: 38 a0 01 08 li r5,264
6c: 38 81 00 70 addi r4,r1,112
70: 48 00 00 01 bl 70 <.copy_fpr_to_user+0x70>
70: R_PPC64_REL24 .__copy_tofrom_user
74: 60 00 00 00 nop
78: e9 3f 00 00 ld r9,0(r31)
7c: 81 29 00 20 lwz r9,32(r9)
80: 2f 89 00 00 cmpwi cr7,r9,0
84: 40 9c 00 18 bge cr7,9c <.copy_fpr_to_user+0x9c>
88: 4c 00 01 2c isync
8c: 39 20 ff ff li r9,-1
90: 79 29 00 44 rldicr r9,r9,0,1
94: 7d 3d 03 a6 mtspr 29,r9
98: 4c 00 01 2c isync
9c: 38 21 01 90 addi r1,r1,400
a0: e8 01 00 10 ld r0,16(r1)
a4: eb e1 ff f8 ld r31,-8(r1)
a8: 7c 08 03 a6 mtlr r0
ac: 4e 80 00 20 blr
'unsafe' simulated VSX version (The ... are only nops) using
unsafe_copy_fpr_to_user() macro:
unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
unsafe_copy_fpr_to_user(to, task, failed);
return 0;
failed:
return 1;
}
0000000000000000 <.copy_fpr_to_user>:
0: 39 00 00 20 li r8,32
4: 39 44 0b 80 addi r10,r4,2944
8: 7d 09 03 a6 mtctr r8
c: 7c 69 1b 78 mr r9,r3
...
20: e9 0a 00 00 ld r8,0(r10)
24: f9 09 00 00 std r8,0(r9)
28: 39 4a 00 10 addi r10,r10,16
2c: 39 29 00 08 addi r9,r9,8
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: f9 23 01 00 std r9,256(r3)
3c: 38 60 00 00 li r3,0
40: 4e 80 00 20 blr
...
50: 38 60 00 01 li r3,1
54: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/29f6c4b8e7a5bbc61e6a8801b78bbf493f9f819e.1597770847.git.christophe.leroy@csgroup.eu
2020-08-18 20:19:35 +03:00
# define unsafe_copy_ckfpr_to_user(to, task, label) \
unsafe_copy_to_user ( to , ( task ) - > thread . ckfp_state . fpr , \
ELF_NFPREG * sizeof ( double ) , label )
2020-08-18 20:19:13 +03:00
inline unsigned long copy_ckfpr_to_user ( void __user * to , struct task_struct * task )
{
return __copy_to_user ( to , task - > thread . ckfp_state . fpr ,
ELF_NFPREG * sizeof ( double ) ) ;
}
static inline unsigned long
copy_ckfpr_from_user ( struct task_struct * task , void __user * from )
{
return __copy_from_user ( task - > thread . ckfp_state . fpr , from ,
ELF_NFPREG * sizeof ( double ) ) ;
}
# endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2020-08-18 20:19:17 +03:00
# else
2021-05-08 12:25:44 +03:00
# define unsafe_copy_fpr_to_user(to, task, label) do { if (0) goto label;} while (0)
powerpc/signal: Create 'unsafe' versions of copy_[ck][fpr/vsx]_to_user()
For the non VSX version, that's trivial. Just use unsafe_copy_to_user()
instead of __copy_to_user().
For the VSX version, remove the intermediate step through a buffer and
use unsafe_put_user() directly. This generates a far smaller code which
is acceptable to inline, see below:
Standard VSX version:
0000000000000000 <.copy_fpr_to_user>:
0: 7c 08 02 a6 mflr r0
4: fb e1 ff f8 std r31,-8(r1)
8: 39 00 00 20 li r8,32
c: 39 24 0b 80 addi r9,r4,2944
10: 7d 09 03 a6 mtctr r8
14: f8 01 00 10 std r0,16(r1)
18: f8 21 fe 71 stdu r1,-400(r1)
1c: 39 41 00 68 addi r10,r1,104
20: e9 09 00 00 ld r8,0(r9)
24: 39 4a 00 08 addi r10,r10,8
28: 39 29 00 10 addi r9,r9,16
2c: f9 0a 00 00 std r8,0(r10)
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: 3d 42 00 00 addis r10,r2,0
3a: R_PPC64_TOC16_HA .toc
3c: eb ea 00 00 ld r31,0(r10)
3e: R_PPC64_TOC16_LO_DS .toc
40: f9 21 01 70 std r9,368(r1)
44: e9 3f 00 00 ld r9,0(r31)
48: 81 29 00 20 lwz r9,32(r9)
4c: 2f 89 00 00 cmpwi cr7,r9,0
50: 40 9c 00 18 bge cr7,68 <.copy_fpr_to_user+0x68>
54: 4c 00 01 2c isync
58: 3d 20 40 00 lis r9,16384
5c: 79 29 07 c6 rldicr r9,r9,32,31
60: 7d 3d 03 a6 mtspr 29,r9
64: 4c 00 01 2c isync
68: 38 a0 01 08 li r5,264
6c: 38 81 00 70 addi r4,r1,112
70: 48 00 00 01 bl 70 <.copy_fpr_to_user+0x70>
70: R_PPC64_REL24 .__copy_tofrom_user
74: 60 00 00 00 nop
78: e9 3f 00 00 ld r9,0(r31)
7c: 81 29 00 20 lwz r9,32(r9)
80: 2f 89 00 00 cmpwi cr7,r9,0
84: 40 9c 00 18 bge cr7,9c <.copy_fpr_to_user+0x9c>
88: 4c 00 01 2c isync
8c: 39 20 ff ff li r9,-1
90: 79 29 00 44 rldicr r9,r9,0,1
94: 7d 3d 03 a6 mtspr 29,r9
98: 4c 00 01 2c isync
9c: 38 21 01 90 addi r1,r1,400
a0: e8 01 00 10 ld r0,16(r1)
a4: eb e1 ff f8 ld r31,-8(r1)
a8: 7c 08 03 a6 mtlr r0
ac: 4e 80 00 20 blr
'unsafe' simulated VSX version (The ... are only nops) using
unsafe_copy_fpr_to_user() macro:
unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
unsafe_copy_fpr_to_user(to, task, failed);
return 0;
failed:
return 1;
}
0000000000000000 <.copy_fpr_to_user>:
0: 39 00 00 20 li r8,32
4: 39 44 0b 80 addi r10,r4,2944
8: 7d 09 03 a6 mtctr r8
c: 7c 69 1b 78 mr r9,r3
...
20: e9 0a 00 00 ld r8,0(r10)
24: f9 09 00 00 std r8,0(r9)
28: 39 4a 00 10 addi r10,r10,16
2c: 39 29 00 08 addi r9,r9,8
30: 42 00 ff f0 bdnz 20 <.copy_fpr_to_user+0x20>
34: e9 24 0d 80 ld r9,3456(r4)
38: f9 23 01 00 std r9,256(r3)
3c: 38 60 00 00 li r3,0
40: 4e 80 00 20 blr
...
50: 38 60 00 01 li r3,1
54: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/29f6c4b8e7a5bbc61e6a8801b78bbf493f9f819e.1597770847.git.christophe.leroy@csgroup.eu
2020-08-18 20:19:35 +03:00
2021-05-08 12:25:44 +03:00
# define unsafe_copy_fpr_from_user(task, from, label) do { if (0) goto label;} while (0)
2021-02-27 04:12:51 +03:00
2020-08-18 20:19:17 +03:00
static inline unsigned long
copy_fpr_to_user ( void __user * to , struct task_struct * task )
{
return 0 ;
}
static inline unsigned long
copy_fpr_from_user ( struct task_struct * task , void __user * from )
{
return 0 ;
}
2008-07-02 08:06:37 +04:00
# endif
2007-06-04 09:15:56 +04:00
# ifdef CONFIG_PPC64
2014-03-02 17:46:11 +04:00
extern int handle_rt_signal64 ( struct ksignal * ksig , sigset_t * set ,
2016-09-23 09:18:12 +03:00
struct task_struct * tsk ) ;
2007-06-04 09:15:49 +04:00
2007-06-04 09:15:56 +04:00
# else /* CONFIG_PPC64 */
2018-05-02 16:20:47 +03:00
extern long sys_rt_sigreturn ( void ) ;
extern long sys_sigreturn ( void ) ;
2018-02-25 20:22:33 +03:00
2014-03-02 17:46:11 +04:00
static inline int handle_rt_signal64 ( struct ksignal * ksig , sigset_t * set ,
2016-09-23 09:18:12 +03:00
struct task_struct * tsk )
2007-06-04 09:15:56 +04:00
{
return - EFAULT ;
}
# endif /* !defined(CONFIG_PPC64) */
2020-08-18 20:19:23 +03:00
void signal_fault ( struct task_struct * tsk , struct pt_regs * regs ,
const char * where , void __user * ptr ) ;
2007-06-04 09:15:49 +04:00
# endif /* _POWERPC_ARCH_SIGNAL_H */