2005-04-16 15:20:36 -07:00
/ *
* This f i l e i s s u b j e c t t o t h e t e r m s a n d c o n d i t i o n s o f t h e G N U G e n e r a l P u b l i c
* License. S e e t h e f i l e " C O P Y I N G " i n t h e m a i n d i r e c t o r y o f t h i s a r c h i v e
* for m o r e d e t a i l s .
*
* Copyright ( C ) 1 9 9 8 , 1 9 9 9 , 2 0 0 0 b y R a l f B a e c h l e
* Copyright ( C ) 1 9 9 9 , 2 0 0 0 S i l i c o n G r a p h i c s , I n c .
2013-03-25 13:40:49 -05:00
* Copyright ( C ) 2 0 0 7 b y M a c i e j W . R o z y c k i
* Copyright ( C ) 2 0 1 1 , 2 0 1 2 M I P S T e c h n o l o g i e s , I n c .
2005-04-16 15:20:36 -07:00
* /
# include < a s m / a s m . h >
2005-09-09 22:32:31 +02:00
# include < a s m / a s m - o f f s e t s . h >
2016-11-07 11:14:15 +00:00
# include < a s m / e x p o r t . h >
2005-04-16 15:20:36 -07:00
# include < a s m / r e g d e f . h >
2006-12-18 00:07:40 +09:00
# if L O N G S I Z E = = 4
# define L O N G _ S _ L s w l
# define L O N G _ S _ R s w r
# else
# define L O N G _ S _ L s d l
# define L O N G _ S _ R s d r
# endif
2013-03-25 13:40:49 -05:00
# ifdef C O N F I G _ C P U _ M I C R O M I P S
# define S T O R S I Z E ( L O N G S I Z E * 2 )
# define S T O R M A S K ( S T O R S I Z E - 1 )
# define F I L L 6 4 R G t 8
# define F I L L P T R G t 7
# undef L O N G _ S
# define L O N G _ S L O N G _ S P
# else
# define S T O R S I Z E L O N G S I Z E
# define S T O R M A S K L O N G M A S K
# define F I L L 6 4 R G a1
# define F I L L P T R G t 0
# endif
2014-01-03 09:23:16 +00:00
# define L E G A C Y _ M O D E 1
# define E V A _ M O D E 2
2014-01-03 10:11:45 +00:00
/ *
* No n e e d t o p r o t e c t i t w i t h E V A #i f d e f e r y . T h e g e n e r a t e d b l o c k o f c o d e
* will n e v e r b e a s s e m b l e d i f E V A i s n o t e n a b l e d .
* /
# define _ _ E V A F Y ( i n s n , r e g , a d d r ) _ _ B U I L D _ E V A _ I N S N ( i n s n ## e , r e g , a d d r )
# define _ _ _ B U I L D _ E V A _ I N S N ( i n s n , r e g , a d d r ) _ _ E V A F Y ( i n s n , r e g , a d d r )
2005-04-16 15:20:36 -07:00
# define E X ( i n s n ,r e g ,a d d r ,h a n d l e r ) \
2014-01-03 10:11:45 +00:00
.if \ mode = = L E G A C Y _ M O D E ; \
9 : insn r e g , a d d r ; \
.else ; \
9 : _ _ _ BUILD_ E V A _ I N S N ( i n s n , r e g , a d d r ) ; \
.endif ; \
2013-01-22 12:59:30 +01:00
.section _ _ ex_ t a b l e ," a " ; \
PTR 9 b , h a n d l e r ; \
2005-04-16 15:20:36 -07:00
.previous
2014-01-03 10:11:45 +00:00
.macro f_fill64 dst, o f f s e t , v a l , f i x u p , m o d e
2013-03-25 13:40:49 -05:00
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 0 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 1 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 2 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 3 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
# if ( ( d e f i n e d ( C O N F I G _ C P U _ M I C R O M I P S ) & & ( L O N G S I Z E = = 4 ) ) | | ! d e f i n e d ( C O N F I G _ C P U _ M I C R O M I P S ) )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 4 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 5 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 6 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 7 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
# endif
# if ( ! d e f i n e d ( C O N F I G _ C P U _ M I C R O M I P S ) & & ( L O N G S I Z E = = 4 ) )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 8 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 9 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 1 0 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 1 1 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 1 2 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 1 3 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 1 4 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
EX( L O N G _ S , \ v a l , ( \ o f f s e t + 1 5 * S T O R S I Z E ) ( \ d s t ) , \ f i x u p )
2006-12-18 00:07:40 +09:00
# endif
2005-04-16 15:20:36 -07:00
.endm
.set noreorder
.align 5
2014-01-03 09:23:16 +00:00
/ *
* Macro t o g e n e r a t e t h e _ _ b z e r o { ,_ u s e r } s y m b o l
* Arguments :
* mode : LEGACY_ M O D E o r E V A _ M O D E
* /
.macro __BUILD_BZERO mode
/* Initialize __memset if this is the first time we call this macro */
.ifnotdef __memset
.set _ _ memset, 1
.hidden __memset /* Make sure it does not leak */
.endif
2005-04-16 15:20:36 -07:00
2013-03-25 13:40:49 -05:00
sltiu t 0 , a2 , S T O R S I Z E / * v e r y s m a l l r e g i o n ? * /
2014-01-03 09:23:16 +00:00
bnez t 0 , . L s m a l l _ m e m s e t \ @
2018-04-17 16:40:02 +01:00
andi t 0 , a0 , S T O R M A S K / * a l i g n e d ? * /
2005-04-16 15:20:36 -07:00
2013-03-25 13:40:49 -05:00
# ifdef C O N F I G _ C P U _ M I C R O M I P S
move t 8 , a1 / * u s e d b y ' s w p ' i n s t r u c t i o n * /
move t 9 , a1
# endif
2007-10-23 12:43:25 +01:00
# ifndef C O N F I G _ C P U _ D A D D I _ W O R K A R O U N D S
2005-04-16 15:20:36 -07:00
beqz t 0 , 1 f
2018-04-17 16:40:02 +01:00
PTR_ S U B U t 0 , S T O R S I Z E / * a l i g n m e n t i n b y t e s * /
2007-10-23 12:43:25 +01:00
# else
.set noat
2013-03-25 13:40:49 -05:00
li A T , S T O R S I Z E
2007-10-23 12:43:25 +01:00
beqz t 0 , 1 f
2018-04-17 16:40:02 +01:00
PTR_ S U B U t 0 , A T / * a l i g n m e n t i n b y t e s * /
2007-10-23 12:43:25 +01:00
.set at
# endif
2005-04-16 15:20:36 -07:00
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 14:16:15 +03:00
# ifdef C O N F I G _ C P U _ H A S _ L O A D _ S T O R E _ L R
2007-11-25 11:47:56 +01:00
R1 0 K C B A R R I E R ( 0 ( r a ) )
2005-04-16 15:20:36 -07:00
# ifdef _ _ M I P S E B _ _
2014-01-03 09:23:16 +00:00
EX( L O N G _ S _ L , a1 , ( a0 ) , . L f i r s t _ f i x u p \ @) /* make word/dword aligned */
2014-11-19 08:58:10 +00:00
# else
2014-01-03 09:23:16 +00:00
EX( L O N G _ S _ R , a1 , ( a0 ) , . L f i r s t _ f i x u p \ @) /* make word/dword aligned */
2005-04-16 15:20:36 -07:00
# endif
PTR_ S U B U a0 , t 0 / * l o n g a l i g n p t r * /
PTR_ A D D U a2 , t 0 / * c o r r e c t s i z e * /
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 14:16:15 +03:00
# else / * ! C O N F I G _ C P U _ H A S _ L O A D _ S T O R E _ L R * /
2014-11-18 09:04:34 +00:00
# define S T O R E _ B Y T E ( N ) \
EX( s b , a1 , N ( a0 ) , . L b y t e _ f i x u p \ @); \
beqz t 0 , 0 f ; \
PTR_ A D D U t 0 , 1 ;
PTR_ A D D U a2 , t 0 / * c o r r e c t s i z e * /
PTR_ A D D U t 0 , 1
STORE_ B Y T E ( 0 )
STORE_ B Y T E ( 1 )
# if L O N G S I Z E = = 4
EX( s b , a1 , 2 ( a0 ) , . L b y t e _ f i x u p \ @)
# else
STORE_ B Y T E ( 2 )
STORE_ B Y T E ( 3 )
STORE_ B Y T E ( 4 )
STORE_ B Y T E ( 5 )
EX( s b , a1 , 6 ( a0 ) , . L b y t e _ f i x u p \ @)
# endif
0 :
ori a0 , S T O R M A S K
xori a0 , S T O R M A S K
PTR_ A D D I U a0 , S T O R S I Z E
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 14:16:15 +03:00
# endif / * ! C O N F I G _ C P U _ H A S _ L O A D _ S T O R E _ L R * /
2005-04-16 15:20:36 -07:00
1 : ori t 1 , a2 , 0 x3 f / * # o f f u l l b l o c k s * /
xori t 1 , 0 x3 f
2014-01-03 09:23:16 +00:00
beqz t 1 , . L m e m s e t _ p a r t i a l \ @ /* no block to fill */
2018-04-17 16:40:02 +01:00
andi t 0 , a2 , 0 x40 - S T O R S I Z E
2005-04-16 15:20:36 -07:00
PTR_ A D D U t 1 , a0 / * e n d a d d r e s s * /
.set reorder
1 : PTR_ A D D I U a0 , 6 4
2007-11-25 11:47:56 +01:00
R1 0 K C B A R R I E R ( 0 ( r a ) )
2014-01-03 10:11:45 +00:00
f_ f i l l 6 4 a0 , - 6 4 , F I L L 6 4 R G , . L f w d _ f i x u p \ @, \mode
2005-04-16 15:20:36 -07:00
bne t 1 , a0 , 1 b
.set noreorder
2014-01-03 09:23:16 +00:00
.Lmemset_partial \ @:
2007-11-25 11:47:56 +01:00
R1 0 K C B A R R I E R ( 0 ( r a ) )
2005-04-16 15:20:36 -07:00
PTR_ L A t 1 , 2 f / * w h e r e t o s t a r t * /
2013-03-25 13:40:49 -05:00
# ifdef C O N F I G _ C P U _ M I C R O M I P S
LONG_ S R L t 7 , t 0 , 1
# endif
2006-12-18 00:07:40 +09:00
# if L O N G S I Z E = = 4
2013-03-25 13:40:49 -05:00
PTR_ S U B U t 1 , F I L L P T R G
2006-12-18 00:07:40 +09:00
# else
.set noat
2013-03-25 13:40:49 -05:00
LONG_ S R L A T , F I L L P T R G , 1
2006-12-18 00:07:40 +09:00
PTR_ S U B U t 1 , A T
2007-10-23 12:43:25 +01:00
.set at
2006-12-18 00:07:40 +09:00
# endif
2005-04-16 15:20:36 -07:00
jr t 1
2018-04-17 16:40:02 +01:00
PTR_ A D D U a0 , t 0 / * d e s t p t r * /
2005-04-16 15:20:36 -07:00
.set push
.set noreorder
.set nomacro
2014-01-03 09:23:16 +00:00
/* ... but first do longs ... */
2014-01-03 10:11:45 +00:00
f_ f i l l 6 4 a0 , - 6 4 , F I L L 6 4 R G , . L p a r t i a l _ f i x u p \ @, \mode
2005-04-16 15:20:36 -07:00
2 : .set p o p
2013-03-25 13:40:49 -05:00
andi a2 , S T O R M A S K / * A t m o s t o n e l o n g t o g o * /
2005-04-16 15:20:36 -07:00
beqz a2 , 1 f
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 14:16:15 +03:00
# ifdef C O N F I G _ C P U _ H A S _ L O A D _ S T O R E _ L R
2018-04-17 16:40:02 +01:00
PTR_ A D D U a0 , a2 / * W h a t ' s l e f t * /
2007-11-25 11:47:56 +01:00
R1 0 K C B A R R I E R ( 0 ( r a ) )
2005-04-16 15:20:36 -07:00
# ifdef _ _ M I P S E B _ _
2014-01-03 09:23:16 +00:00
EX( L O N G _ S _ R , a1 , - 1 ( a0 ) , . L l a s t _ f i x u p \ @)
2014-11-19 08:58:10 +00:00
# else
2014-01-03 09:23:16 +00:00
EX( L O N G _ S _ L , a1 , - 1 ( a0 ) , . L l a s t _ f i x u p \ @)
2005-04-16 15:20:36 -07:00
# endif
2014-11-18 09:04:34 +00:00
# else
2018-04-17 16:40:02 +01:00
PTR_ S U B U t 0 , $ 0 , a2
MIPS: memset.S: Fix byte_fixup for MIPSr6
The __clear_user function is defined to return the number of bytes that
could not be cleared. From the underlying memset / bzero implementation
this means setting register a2 to that number on return. Currently if a
page fault is triggered within the MIPSr6 version of setting of initial
unaligned bytes, the value loaded into a2 on return is meaningless.
During the MIPSr6 version of the initial unaligned bytes block, register
a2 contains the number of bytes to be set beyond the initial unaligned
bytes. The t0 register is initally set to the number of unaligned bytes
- STORSIZE, effectively a negative version of the number of unaligned
bytes. This is then incremented before each byte is saved.
The label .Lbyte_fixup\@ is jumped to on page fault. Currently the value
in a2 is incorrectly replaced by 0 - t0 + 1, effectively the number of
unaligned bytes remaining. This leads to the failures being reported by
the following test code:
static int __init test_clear_user(void)
{
int j, k;
pr_info("\n\n\nTesting clear_user\n");
for (j = 0; j < 512; j++) {
if ((k = clear_user(NULL+3, j)) != j) {
pr_err("clear_user (NULL %d) returned %d\n", j, k);
}
}
return 0;
}
late_initcall(test_clear_user);
Which reports:
[ 3.965439] Testing clear_user
[ 3.973169] clear_user (NULL 8) returned 6
[ 3.976782] clear_user (NULL 9) returned 6
[ 3.980390] clear_user (NULL 10) returned 6
[ 3.984052] clear_user (NULL 11) returned 6
[ 3.987524] clear_user (NULL 12) returned 6
Fix this by subtracting t0 from a2 (rather than $0), effectivey giving:
unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
a2 = a2 - t0 + 1
This fixes the value returned from __clear user when the number of bytes
to set is > LONGSIZE and the address is invalid and unaligned.
Unfortunately, this breaks the fixup handling for unaligned bytes after
the final long, where register a2 still contains the number of bytes
remaining to be set and the t0 register is to 0 - the number of
unaligned bytes remaining.
Because t0 is now is now subtracted from a2 rather than 0, the number of
bytes unset is reported incorrectly:
static int __init test_clear_user(void)
{
char *test;
int j, k;
pr_info("\n\n\nTesting clear_user\n");
test = vmalloc(PAGE_SIZE);
for (j = 256; j < 512; j++) {
if ((k = clear_user(test + PAGE_SIZE - 254, j)) != j - 254) {
pr_err("clear_user (%px %d) returned %d\n",
test + PAGE_SIZE - 254, j, k);
}
}
return 0;
}
late_initcall(test_clear_user);
[ 3.976775] clear_user (c00000000000df02 256) returned 4
[ 3.981957] clear_user (c00000000000df02 257) returned 6
[ 3.986425] clear_user (c00000000000df02 258) returned 8
[ 3.990850] clear_user (c00000000000df02 259) returned 10
[ 3.995332] clear_user (c00000000000df02 260) returned 12
[ 3.999815] clear_user (c00000000000df02 261) returned 14
Fix this by ensuring that a2 is set to 0 during the set of final
unaligned bytes.
Signed-off-by: Matt Redfearn <matt.redfearn@mips.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Fixes: 8c56208aff77 ("MIPS: lib: memset: Add MIPS R6 support")
Patchwork: https://patchwork.linux-mips.org/patch/19338/
Cc: James Hogan <jhogan@kernel.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: stable@vger.kernel.org # v4.0+
2018-05-23 14:39:58 +01:00
move a2 , z e r o / * N o r e m a i n i n g l o n g s * /
2014-11-18 09:04:34 +00:00
PTR_ A D D I U t 0 , 1
STORE_ B Y T E ( 0 )
STORE_ B Y T E ( 1 )
# if L O N G S I Z E = = 4
EX( s b , a1 , 2 ( a0 ) , . L b y t e _ f i x u p \ @)
# else
STORE_ B Y T E ( 2 )
STORE_ B Y T E ( 3 )
STORE_ B Y T E ( 4 )
STORE_ B Y T E ( 5 )
EX( s b , a1 , 6 ( a0 ) , . L b y t e _ f i x u p \ @)
# endif
0 :
# endif
2005-04-16 15:20:36 -07:00
1 : jr r a
2018-04-17 16:40:02 +01:00
move a2 , z e r o
2005-04-16 15:20:36 -07:00
2014-01-03 09:23:16 +00:00
.Lsmall_memset \ @:
2005-04-16 15:20:36 -07:00
beqz a2 , 2 f
2018-04-17 16:40:02 +01:00
PTR_ A D D U t 1 , a0 , a2
2005-04-16 15:20:36 -07:00
1 : PTR_ A D D I U a0 , 1 / * f i l l b y t e w i s e * /
2007-11-25 11:47:56 +01:00
R1 0 K C B A R R I E R ( 0 ( r a ) )
2005-04-16 15:20:36 -07:00
bne t 1 , a0 , 1 b
2018-03-29 10:28:23 +01:00
EX( s b , a1 , - 1 ( a0 ) , . L s m a l l _ f i x u p \ @)
2005-04-16 15:20:36 -07:00
2 : jr r a / * d o n e * /
2018-04-17 16:40:02 +01:00
move a2 , z e r o
2014-01-03 09:23:16 +00:00
.if __memset = = 1
2005-04-16 15:20:36 -07:00
END( m e m s e t )
2014-01-03 09:23:16 +00:00
.set _ _ memset, 0
.hidden __memset
.endif
2005-04-16 15:20:36 -07:00
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 14:16:15 +03:00
# ifndef C O N F I G _ C P U _ H A S _ L O A D _ S T O R E _ L R
2014-11-18 09:04:34 +00:00
.Lbyte_fixup \ @:
2018-05-23 14:39:59 +01:00
/ *
* unset_ b y t e s = ( #b y t e s - ( # u n a l i g n e d b y t e s ) ) - ( - # u n a l i g n e d b y t e s r e m a i n i n g + 1 ) + 1
* a2 = a2 - t 0 + 1
* /
MIPS: memset.S: Fix byte_fixup for MIPSr6
The __clear_user function is defined to return the number of bytes that
could not be cleared. From the underlying memset / bzero implementation
this means setting register a2 to that number on return. Currently if a
page fault is triggered within the MIPSr6 version of setting of initial
unaligned bytes, the value loaded into a2 on return is meaningless.
During the MIPSr6 version of the initial unaligned bytes block, register
a2 contains the number of bytes to be set beyond the initial unaligned
bytes. The t0 register is initally set to the number of unaligned bytes
- STORSIZE, effectively a negative version of the number of unaligned
bytes. This is then incremented before each byte is saved.
The label .Lbyte_fixup\@ is jumped to on page fault. Currently the value
in a2 is incorrectly replaced by 0 - t0 + 1, effectively the number of
unaligned bytes remaining. This leads to the failures being reported by
the following test code:
static int __init test_clear_user(void)
{
int j, k;
pr_info("\n\n\nTesting clear_user\n");
for (j = 0; j < 512; j++) {
if ((k = clear_user(NULL+3, j)) != j) {
pr_err("clear_user (NULL %d) returned %d\n", j, k);
}
}
return 0;
}
late_initcall(test_clear_user);
Which reports:
[ 3.965439] Testing clear_user
[ 3.973169] clear_user (NULL 8) returned 6
[ 3.976782] clear_user (NULL 9) returned 6
[ 3.980390] clear_user (NULL 10) returned 6
[ 3.984052] clear_user (NULL 11) returned 6
[ 3.987524] clear_user (NULL 12) returned 6
Fix this by subtracting t0 from a2 (rather than $0), effectivey giving:
unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
a2 = a2 - t0 + 1
This fixes the value returned from __clear user when the number of bytes
to set is > LONGSIZE and the address is invalid and unaligned.
Unfortunately, this breaks the fixup handling for unaligned bytes after
the final long, where register a2 still contains the number of bytes
remaining to be set and the t0 register is to 0 - the number of
unaligned bytes remaining.
Because t0 is now is now subtracted from a2 rather than 0, the number of
bytes unset is reported incorrectly:
static int __init test_clear_user(void)
{
char *test;
int j, k;
pr_info("\n\n\nTesting clear_user\n");
test = vmalloc(PAGE_SIZE);
for (j = 256; j < 512; j++) {
if ((k = clear_user(test + PAGE_SIZE - 254, j)) != j - 254) {
pr_err("clear_user (%px %d) returned %d\n",
test + PAGE_SIZE - 254, j, k);
}
}
return 0;
}
late_initcall(test_clear_user);
[ 3.976775] clear_user (c00000000000df02 256) returned 4
[ 3.981957] clear_user (c00000000000df02 257) returned 6
[ 3.986425] clear_user (c00000000000df02 258) returned 8
[ 3.990850] clear_user (c00000000000df02 259) returned 10
[ 3.995332] clear_user (c00000000000df02 260) returned 12
[ 3.999815] clear_user (c00000000000df02 261) returned 14
Fix this by ensuring that a2 is set to 0 during the set of final
unaligned bytes.
Signed-off-by: Matt Redfearn <matt.redfearn@mips.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Fixes: 8c56208aff77 ("MIPS: lib: memset: Add MIPS R6 support")
Patchwork: https://patchwork.linux-mips.org/patch/19338/
Cc: James Hogan <jhogan@kernel.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: stable@vger.kernel.org # v4.0+
2018-05-23 14:39:58 +01:00
PTR_ S U B U a2 , t 0
2014-11-18 09:04:34 +00:00
jr r a
PTR_ A D D I U a2 , 1
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 14:16:15 +03:00
# endif / * ! C O N F I G _ C P U _ H A S _ L O A D _ S T O R E _ L R * /
2014-11-18 09:04:34 +00:00
2014-01-03 09:23:16 +00:00
.Lfirst_fixup \ @:
2018-05-23 14:39:59 +01:00
/* unset_bytes already in a2 */
2005-04-16 15:20:36 -07:00
jr r a
2018-04-17 16:40:02 +01:00
nop
2005-04-16 15:20:36 -07:00
2014-01-03 09:23:16 +00:00
.Lfwd_fixup \ @:
2018-05-23 14:39:59 +01:00
/ *
* unset_ b y t e s = p a r t i a l _ s t a r t _ a d d r + #b y t e s - f a u l t _ a d d r
* a2 = t 1 + ( a2 & 3 f ) - $ 2 8 - > t a s k - > B U A D D R
* /
2005-04-16 15:20:36 -07:00
PTR_ L t 0 , T I _ T A S K ( $ 2 8 )
andi a2 , 0 x3 f
2010-11-10 21:48:15 +08:00
LONG_ L t 0 , T H R E A D _ B U A D D R ( t 0 )
2005-04-16 15:20:36 -07:00
LONG_ A D D U a2 , t 1
jr r a
2018-04-17 16:40:02 +01:00
LONG_ S U B U a2 , t 0
2005-04-16 15:20:36 -07:00
2014-01-03 09:23:16 +00:00
.Lpartial_fixup \ @:
2018-05-23 14:39:59 +01:00
/ *
* unset_ b y t e s = p a r t i a l _ e n d _ a d d r + #b y t e s - f a u l t _ a d d r
* a2 = a0 + ( a2 & S T O R M A S K ) - $ 2 8 - > t a s k - > B U A D D R
* /
2005-04-16 15:20:36 -07:00
PTR_ L t 0 , T I _ T A S K ( $ 2 8 )
2013-03-25 13:40:49 -05:00
andi a2 , S T O R M A S K
2010-11-10 21:48:15 +08:00
LONG_ L t 0 , T H R E A D _ B U A D D R ( t 0 )
MIPS: memset.S: Fix return of __clear_user from Lpartial_fixup
The __clear_user function is defined to return the number of bytes that
could not be cleared. From the underlying memset / bzero implementation
this means setting register a2 to that number on return. Currently if a
page fault is triggered within the memset_partial block, the value
loaded into a2 on return is meaningless.
The label .Lpartial_fixup\@ is jumped to on page fault. In order to work
out how many bytes failed to copy, the exception handler should find how
many bytes left in the partial block (andi a2, STORMASK), add that to
the partial block end address (a2), and subtract the faulting address to
get the remainder. Currently it incorrectly subtracts the partial block
start address (t1), which has additionally been clobbered to generate a
jump target in memset_partial. Fix this by adding the block end address
instead.
This issue was found with the following test code:
int j, k;
for (j = 0; j < 512; j++) {
if ((k = clear_user(NULL, j)) != j) {
pr_err("clear_user (NULL %d) returned %d\n", j, k);
}
}
Which now passes on Creator Ci40 (MIPS32) and Cavium Octeon II (MIPS64).
Suggested-by: James Hogan <jhogan@kernel.org>
Signed-off-by: Matt Redfearn <matt.redfearn@mips.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/19108/
Signed-off-by: James Hogan <jhogan@kernel.org>
2018-04-17 15:52:21 +01:00
LONG_ A D D U a2 , a0
2005-04-16 15:20:36 -07:00
jr r a
2018-04-17 16:40:02 +01:00
LONG_ S U B U a2 , t 0
2005-04-16 15:20:36 -07:00
2014-01-03 09:23:16 +00:00
.Llast_fixup \ @:
2018-05-23 14:39:59 +01:00
/* unset_bytes already in a2 */
2005-04-16 15:20:36 -07:00
jr r a
2018-04-17 16:40:00 +01:00
nop
2014-01-03 09:23:16 +00:00
2018-03-29 10:28:23 +01:00
.Lsmall_fixup \ @:
2018-05-23 14:39:59 +01:00
/ *
* unset_ b y t e s = e n d _ a d d r - c u r r e n t _ a d d r + 1
* a2 = t 1 - a0 + 1
* /
MIPS: memset: Fix CPU_DADDI_WORKAROUNDS `small_fixup' regression
Fix a commit 8a8158c85e1e ("MIPS: memset.S: EVA & fault support for
small_memset") regression and remove assembly warnings:
arch/mips/lib/memset.S: Assembler messages:
arch/mips/lib/memset.S:243: Warning: Macro instruction expanded into multiple instructions in a branch delay slot
triggering with the CPU_DADDI_WORKAROUNDS option set and this code:
PTR_SUBU a2, t1, a0
jr ra
PTR_ADDIU a2, 1
This is because with that option in place the DADDIU instruction, which
the PTR_ADDIU CPP macro expands to, becomes a GAS macro, which in turn
expands to an LI/DADDU (or actually ADDIU/DADDU) sequence:
13c: 01a4302f dsubu a2,t1,a0
140: 03e00008 jr ra
144: 24010001 li at,1
148: 00c1302d daddu a2,a2,at
...
Correct this by switching off the `noreorder' assembly mode and letting
GAS schedule this jump's delay slot, as there is nothing special about
it that would require manual scheduling. With this change in place
correct code is produced:
13c: 01a4302f dsubu a2,t1,a0
140: 24010001 li at,1
144: 03e00008 jr ra
148: 00c1302d daddu a2,a2,at
...
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Fixes: 8a8158c85e1e ("MIPS: memset.S: EVA & fault support for small_memset")
Patchwork: https://patchwork.linux-mips.org/patch/20833/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: stable@vger.kernel.org # 4.17+
2018-10-02 12:50:11 +01:00
.set reorder
2018-03-29 10:28:23 +01:00
PTR_ S U B U a2 , t 1 , a0
MIPS: memset: Fix CPU_DADDI_WORKAROUNDS `small_fixup' regression
Fix a commit 8a8158c85e1e ("MIPS: memset.S: EVA & fault support for
small_memset") regression and remove assembly warnings:
arch/mips/lib/memset.S: Assembler messages:
arch/mips/lib/memset.S:243: Warning: Macro instruction expanded into multiple instructions in a branch delay slot
triggering with the CPU_DADDI_WORKAROUNDS option set and this code:
PTR_SUBU a2, t1, a0
jr ra
PTR_ADDIU a2, 1
This is because with that option in place the DADDIU instruction, which
the PTR_ADDIU CPP macro expands to, becomes a GAS macro, which in turn
expands to an LI/DADDU (or actually ADDIU/DADDU) sequence:
13c: 01a4302f dsubu a2,t1,a0
140: 03e00008 jr ra
144: 24010001 li at,1
148: 00c1302d daddu a2,a2,at
...
Correct this by switching off the `noreorder' assembly mode and letting
GAS schedule this jump's delay slot, as there is nothing special about
it that would require manual scheduling. With this change in place
correct code is produced:
13c: 01a4302f dsubu a2,t1,a0
140: 24010001 li at,1
144: 03e00008 jr ra
148: 00c1302d daddu a2,a2,at
...
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Fixes: 8a8158c85e1e ("MIPS: memset.S: EVA & fault support for small_memset")
Patchwork: https://patchwork.linux-mips.org/patch/20833/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: stable@vger.kernel.org # 4.17+
2018-10-02 12:50:11 +01:00
PTR_ A D D I U a2 , 1
2018-03-29 10:28:23 +01:00
jr r a
MIPS: memset: Fix CPU_DADDI_WORKAROUNDS `small_fixup' regression
Fix a commit 8a8158c85e1e ("MIPS: memset.S: EVA & fault support for
small_memset") regression and remove assembly warnings:
arch/mips/lib/memset.S: Assembler messages:
arch/mips/lib/memset.S:243: Warning: Macro instruction expanded into multiple instructions in a branch delay slot
triggering with the CPU_DADDI_WORKAROUNDS option set and this code:
PTR_SUBU a2, t1, a0
jr ra
PTR_ADDIU a2, 1
This is because with that option in place the DADDIU instruction, which
the PTR_ADDIU CPP macro expands to, becomes a GAS macro, which in turn
expands to an LI/DADDU (or actually ADDIU/DADDU) sequence:
13c: 01a4302f dsubu a2,t1,a0
140: 03e00008 jr ra
144: 24010001 li at,1
148: 00c1302d daddu a2,a2,at
...
Correct this by switching off the `noreorder' assembly mode and letting
GAS schedule this jump's delay slot, as there is nothing special about
it that would require manual scheduling. With this change in place
correct code is produced:
13c: 01a4302f dsubu a2,t1,a0
140: 24010001 li at,1
144: 03e00008 jr ra
148: 00c1302d daddu a2,a2,at
...
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Fixes: 8a8158c85e1e ("MIPS: memset.S: EVA & fault support for small_memset")
Patchwork: https://patchwork.linux-mips.org/patch/20833/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: stable@vger.kernel.org # 4.17+
2018-10-02 12:50:11 +01:00
.set noreorder
2018-03-29 10:28:23 +01:00
2014-01-03 09:23:16 +00:00
.endm
/ *
* memset( v o i d * s , i n t c , s i z e _ t n )
*
* a0 : start o f a r e a t o c l e a r
* a1 : char t o f i l l w i t h
* a2 : size o f a r e a t o c l e a r
* /
LEAF( m e m s e t )
2016-11-07 11:14:15 +00:00
EXPORT_ S Y M B O L ( m e m s e t )
2014-01-03 09:23:16 +00:00
beqz a1 , 1 f
2018-04-17 16:40:02 +01:00
move v0 , a0 / * r e s u l t * /
2014-01-03 09:23:16 +00:00
andi a1 , 0 x f f / * s p r e a d f i l l w o r d * /
LONG_ S L L t 1 , a1 , 8
or a1 , t 1
LONG_ S L L t 1 , a1 , 1 6
# if L O N G S I Z E = = 8
or a1 , t 1
LONG_ S L L t 1 , a1 , 3 2
# endif
or a1 , t 1
1 :
2014-01-03 10:11:45 +00:00
# ifndef C O N F I G _ E V A
2014-01-03 09:23:16 +00:00
FEXPORT( _ _ b z e r o )
2016-11-07 11:14:15 +00:00
EXPORT_ S Y M B O L ( _ _ b z e r o )
2015-08-05 16:41:39 +01:00
# else
FEXPORT( _ _ b z e r o _ k e r n e l )
2016-11-07 11:14:15 +00:00
EXPORT_ S Y M B O L ( _ _ b z e r o _ k e r n e l )
2014-01-03 10:11:45 +00:00
# endif
2014-01-03 09:23:16 +00:00
_ _ BUILD_ B Z E R O L E G A C Y _ M O D E
2014-01-03 10:11:45 +00:00
# ifdef C O N F I G _ E V A
LEAF( _ _ b z e r o )
2016-11-07 11:14:15 +00:00
EXPORT_ S Y M B O L ( _ _ b z e r o )
2014-01-03 10:11:45 +00:00
_ _ BUILD_ B Z E R O E V A _ M O D E
END( _ _ b z e r o )
# endif