From a1ae431705410fc7092790977bffd1b00c63c229 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 9 Mar 2022 08:56:14 +0100 Subject: [PATCH] powerpc: Use rol32() instead of opencoding in csum_fold() rol32(x, 16) will do the rotate using rlwinm. No need to open code using inline assembly. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/794337eff7bb803d2c4e67d9eee635390c4c48fe.1646812553.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/checksum.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 8321f6053a67..4b573a3b7e17 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -38,14 +38,15 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, */ static inline __sum16 csum_fold(__wsum sum) { - unsigned int tmp; + u32 tmp = (__force u32)sum; - /* swap the two 16-bit halves of sum */ - __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum)); - /* if there is a carry from adding the two 16-bit halves, - it will carry from the lower half into the upper half, - giving us the correct sum in the upper half. */ - return (__force __sum16)(~((__force u32)sum + tmp) >> 16); + /* + * swap the two 16-bit halves of sum + * if there is a carry from adding the two 16-bit halves, + * it will carry from the lower half into the upper half, + * giving us the correct sum in the upper half. + */ + return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16); } static inline u32 from64to32(u64 x)