linux/arch/x86/lib/csum-partial_64.c
Noah Goldstein 5d4acb6285 x86/csum: Remove unnecessary odd handling
The special case for odd aligned buffers is unnecessary and mostly
just adds overhead. Aligned buffers is the expectations, and even for
unaligned buffer, the only case that was helped is if the buffer was
1-byte from word aligned which is ~1/7 of the cases. Overall it seems
highly unlikely to be worth to extra branch.

It was left in the previous perf improvement patch because I was
erroneously comparing the exact output of `csum_partial(...)`, but
really we only need `csum_fold(csum_partial(...))` to match so its
safe to remove.

All csum kunit tests pass.

Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: David Laight <david.laight@aculab.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2024-01-04 15:33:14 -08:00

131 lines
3.6 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* arch/x86_64/lib/csum-partial.c
*
* This file contains network checksum routines that are better done
* in an architecture-specific manner due to speed.
*/
#include <linux/compiler.h>
#include <linux/export.h>
#include <asm/checksum.h>
#include <asm/word-at-a-time.h>
static inline __wsum csum_finalize_sum(u64 temp64)
{
return (__force __wsum)((temp64 + ror64(temp64, 32)) >> 32);
}
/*
* Do a checksum on an arbitrary memory area.
* Returns a 32bit checksum.
*
* This isn't as time critical as it used to be because many NICs
* do hardware checksumming these days.
*
* Still, with CHECKSUM_COMPLETE this is called to compute
* checksums on IPv6 headers (40 bytes) and other small parts.
* it's best to have buff aligned on a 64-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
u64 temp64 = (__force u64)sum;
/*
* len == 40 is the hot case due to IPv6 headers, but annotating it likely()
* has noticeable negative affect on codegen for all other cases with
* minimal performance benefit here.
*/
if (len == 40) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq 4*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[40])buff));
return csum_finalize_sum(temp64);
}
if (unlikely(len >= 64)) {
/*
* Extra accumulators for better ILP in the loop.
*/
u64 tmp_accum, tmp_carries;
asm("xorl %k[tmp_accum],%k[tmp_accum]\n\t"
"xorl %k[tmp_carries],%k[tmp_carries]\n\t"
"subl $64, %[len]\n\t"
"1:\n\t"
"addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcl $0,%k[tmp_carries]\n\t"
"addq 4*8(%[src]),%[tmp_accum]\n\t"
"adcq 5*8(%[src]),%[tmp_accum]\n\t"
"adcq 6*8(%[src]),%[tmp_accum]\n\t"
"adcq 7*8(%[src]),%[tmp_accum]\n\t"
"adcl $0,%k[tmp_carries]\n\t"
"addq $64, %[src]\n\t"
"subl $64, %[len]\n\t"
"jge 1b\n\t"
"addq %[tmp_accum],%[res]\n\t"
"adcq %[tmp_carries],%[res]\n\t"
"adcq $0,%[res]"
: [tmp_accum] "=&r"(tmp_accum),
[tmp_carries] "=&r"(tmp_carries), [res] "+r"(temp64),
[len] "+r"(len), [src] "+r"(buff)
: "m"(*(const char *)buff));
}
if (len & 32) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[32])buff));
buff += 32;
}
if (len & 16) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[16])buff));
buff += 16;
}
if (len & 8) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[8])buff));
buff += 8;
}
if (len & 7) {
unsigned int shift = (-len << 3) & 63;
unsigned long trail;
trail = (load_unaligned_zeropad(buff) << shift) >> shift;
asm("addq %[trail],%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r"(temp64)
: [trail] "r"(trail));
}
return csum_finalize_sum(temp64);
}
EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
__sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
EXPORT_SYMBOL(ip_compute_csum);