2023-01-13 22:23:00 +01:00
/* SPDX-License-Identifier: GPL-2.0-only */
# include < l i n u x / l i n k a g e . h >
# include < a s m / a s m . h >
2023-01-13 22:23:01 +01:00
# include < a s m / a l t e r n a t i v e - m a c r o s . h >
2023-02-24 16:46:00 +01:00
# include < a s m / h w c a p . h >
2023-01-13 22:23:00 +01:00
/* int strncmp(const char *cs, const char *ct, size_t count) */
SYM_ F U N C _ S T A R T ( s t r n c m p )
2023-01-13 22:23:01 +01:00
2023-02-11 20:15:33 -06:00
ALTERNATIVE( " n o p " , " j s t r n c m p _ z b b " , 0 , R I S C V _ I S A _ E X T _ Z B B , C O N F I G _ R I S C V _ I S A _ Z B B )
2023-01-13 22:23:01 +01:00
2023-01-13 22:23:00 +01:00
/ *
* Returns
* a0 - c o m p a r i s o n r e s u l t , v a l u e l i k e s t r n c m p
*
* Parameters
* a0 - s t r i n g 1
* a1 - s t r i n g 2
* a2 - n u m b e r o f c h a r a c t e r s t o c o m p a r e
*
* Clobbers
* t0 , t 1 , t 2
* /
li t 2 , 0
1 :
beq a2 , t 2 , 2 f
lbu t 0 , 0 ( a0 )
lbu t 1 , 0 ( a1 )
addi a0 , a0 , 1
addi a1 , a1 , 1
bne t 0 , t 1 , 3 f
addi t 2 , t 2 , 1
bnez t 0 , 1 b
2 :
li a0 , 0
ret
3 :
/ *
* strncmp o n l y n e e d s t o r e t u r n ( < 0 , 0 , > 0 ) v a l u e s
* not n e c e s s a r i l y - 1 , 0 , + 1
* /
sub a0 , t 0 , t 1
ret
2023-01-13 22:23:01 +01:00
/ *
* Variant o f s t r n c m p u s i n g t h e Z B B e x t e n s i o n i f a v a i l a b l e
* /
# ifdef C O N F I G _ R I S C V _ I S A _ Z B B
strncmp_zbb :
.option push
.option arch,+ z b b
/ *
* Returns
* a0 - c o m p a r i s o n r e s u l t , l i k e s t r n c m p
*
* Parameters
* a0 - s t r i n g 1
* a1 - s t r i n g 2
* a2 - n u m b e r o f c h a r a c t e r s t o c o m p a r e
*
* Clobbers
* t0 , t 1 , t 2 , t 3 , t 4 , t 5 , t 6
* /
or t 2 , a0 , a1
li t 5 , - 1
and t 2 , t 2 , S Z R E G - 1
add t 4 , a0 , a2
2023-02-08 23:53:28 +01:00
bnez t 2 , 3 f
2023-01-13 22:23:01 +01:00
/* Adjust limit for fast-path. */
andi t 6 , t 4 , - S Z R E G
/* Main loop for aligned string. */
.p2align 3
1 :
riscv, lib: Fix Zbb strncmp
The Zbb optimized strncmp has two parts; a fast path that does XLEN/8B
per iteration, and a slow that does one byte per iteration.
The idea is to compare aligned XLEN chunks for most of strings, and do
the remainder tail in the slow path.
The Zbb strncmp has two issues in the fast path:
Incorrect remainder handling (wrong compare): Assume that the string
length is 9. On 64b systems, the fast path should do one iteration,
and one iteration in the slow path. Instead, both were done in the
fast path, which lead to incorrect results. An example:
strncmp("/dev/vda", "/dev/", 5);
Correct by changing "bgt" to "bge".
Missing NULL checks in the second string: This could lead to incorrect
results for:
strncmp("/dev/vda", "/dev/vda\0", 8);
Correct by adding an additional check.
Fixes: b6fcdb191e36 ("RISC-V: add zbb support to string functions")
Suggested-by: Heiko Stuebner <heiko.stuebner@vrull.eu>
Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
Tested-by: Conor Dooley <conor.dooley@microchip.com>
Tested-by: Guenter Roeck <linux@roeck-us.net>
Link: https://lore.kernel.org/r/20230228184211.1585641-1-bjorn@kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-02-28 19:42:10 +01:00
bge a0 , t 6 , 3 f
2023-01-13 22:23:01 +01:00
REG_ L t 0 , 0 ( a0 )
REG_ L t 1 , 0 ( a1 )
orc. b t 3 , t 0
bne t 3 , t 5 , 2 f
riscv, lib: Fix Zbb strncmp
The Zbb optimized strncmp has two parts; a fast path that does XLEN/8B
per iteration, and a slow that does one byte per iteration.
The idea is to compare aligned XLEN chunks for most of strings, and do
the remainder tail in the slow path.
The Zbb strncmp has two issues in the fast path:
Incorrect remainder handling (wrong compare): Assume that the string
length is 9. On 64b systems, the fast path should do one iteration,
and one iteration in the slow path. Instead, both were done in the
fast path, which lead to incorrect results. An example:
strncmp("/dev/vda", "/dev/", 5);
Correct by changing "bgt" to "bge".
Missing NULL checks in the second string: This could lead to incorrect
results for:
strncmp("/dev/vda", "/dev/vda\0", 8);
Correct by adding an additional check.
Fixes: b6fcdb191e36 ("RISC-V: add zbb support to string functions")
Suggested-by: Heiko Stuebner <heiko.stuebner@vrull.eu>
Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
Tested-by: Conor Dooley <conor.dooley@microchip.com>
Tested-by: Guenter Roeck <linux@roeck-us.net>
Link: https://lore.kernel.org/r/20230228184211.1585641-1-bjorn@kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-02-28 19:42:10 +01:00
orc. b t 3 , t 1
bne t 3 , t 5 , 2 f
2023-01-13 22:23:01 +01:00
addi a0 , a0 , S Z R E G
addi a1 , a1 , S Z R E G
beq t 0 , t 1 , 1 b
/ *
* Words d o n ' t m a t c h , a n d n o n u l l b y t e i n t h e f i r s t
* word. G e t b y t e s i n b i g - e n d i a n o r d e r a n d c o m p a r e .
* /
# ifndef C O N F I G _ C P U _ B I G _ E N D I A N
rev8 t 0 , t 0
rev8 t 1 , t 1
# endif
/* Synthesize (t0 >= t1) ? 1 : -1 in a branchless sequence. */
sltu a0 , t 0 , t 1
neg a0 , a0
ori a0 , a0 , 1
ret
2 :
/ *
* Found a n u l l b y t e .
* If w o r d s d o n ' t m a t c h , f a l l b a c k t o s i m p l e l o o p .
* /
bne t 0 , t 1 , 3 f
/* Otherwise, strings are equal. */
li a0 , 0
ret
/* Simple loop for misaligned strings. */
.p2align 3
2023-02-08 23:53:28 +01:00
3 :
bge a0 , t 4 , 5 f
2023-01-13 22:23:01 +01:00
lbu t 0 , 0 ( a0 )
lbu t 1 , 0 ( a1 )
addi a0 , a0 , 1
addi a1 , a1 , 1
2023-02-08 23:53:28 +01:00
bne t 0 , t 1 , 4 f
bnez t 0 , 3 b
2023-01-13 22:23:01 +01:00
2023-02-08 23:53:28 +01:00
4 :
2023-01-13 22:23:01 +01:00
sub a0 , t 0 , t 1
ret
2023-02-08 23:53:28 +01:00
5 :
2023-01-13 22:23:01 +01:00
li a0 , 0
ret
.option pop
# endif
2023-01-13 22:23:00 +01:00
SYM_ F U N C _ E N D ( s t r n c m p )