bce5a1e8a3
When building ARCH=i386 with CONFIG_LTO_CLANG_FULL=y, it's possible (depending on additional configs which I have not been able to isolate) to observe a failure during register allocation: error: inline assembly requires more registers than available when memmove is inlined into tcp_v4_fill_cb() or tcp_v6_fill_cb(). memmove is quite large and probably shouldn't be inlined due to size alone. A noinline function attribute would be the simplest fix, but there's a few things that stand out with the current definition: In addition to having complex constraints that can't always be resolved, the clobber list seems to be missing %bx. By using numbered operands rather than symbolic operands, the constraints are quite obnoxious to refactor. Having a large function be 99% inline asm is a code smell that this function should simply be written in stand-alone out-of-line assembler. Moving this to out of line assembler guarantees that the compiler cannot inline calls to memmove. This has been done previously for 64b: commit 9599ec0471de ("x86-64, mem: Convert memmove() to assembly file and fix return value bug") That gives the opportunity for other cleanups like fixing the inconsistent use of tabs vs spaces and instruction suffixes, and the label 3 appearing twice. Symbolic operands, local labels, and additional comments would provide this code with a fresh coat of paint. Finally, add a test that tickles the `rep movsl` implementation to test it for correctness, since it has implicit operands. Suggested-by: Ingo Molnar <mingo@kernel.org> Suggested-by: David Laight <David.Laight@aculab.com> Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Kees Cook <keescook@chromium.org> Tested-by: Kees Cook <keescook@chromium.org> Tested-by: Nathan Chancellor <nathan@kernel.org> Link: https://lore.kernel.org/all/20221018172155.287409-1-ndesaulniers%40google.com
201 lines
3.8 KiB
ArmAsm
201 lines
3.8 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/export.h>
|
|
|
|
SYM_FUNC_START(memmove)
|
|
/*
|
|
* void *memmove(void *dest_in, const void *src_in, size_t n)
|
|
* -mregparm=3 passes these in registers:
|
|
* dest_in: %eax
|
|
* src_in: %edx
|
|
* n: %ecx
|
|
* See also: arch/x86/entry/calling.h for description of the calling convention.
|
|
*
|
|
* n can remain in %ecx, but for `rep movsl`, we'll need dest in %edi and src
|
|
* in %esi.
|
|
*/
|
|
.set dest_in, %eax
|
|
.set dest, %edi
|
|
.set src_in, %edx
|
|
.set src, %esi
|
|
.set n, %ecx
|
|
.set tmp0, %edx
|
|
.set tmp0w, %dx
|
|
.set tmp1, %ebx
|
|
.set tmp1w, %bx
|
|
.set tmp2, %eax
|
|
.set tmp3b, %cl
|
|
|
|
/*
|
|
* Save all callee-saved registers, because this function is going to clobber
|
|
* all of them:
|
|
*/
|
|
pushl %ebp
|
|
movl %esp, %ebp // set standard frame pointer
|
|
|
|
pushl %ebx
|
|
pushl %edi
|
|
pushl %esi
|
|
pushl %eax // save 'dest_in' parameter [eax] as the return value
|
|
|
|
movl src_in, src
|
|
movl dest_in, dest
|
|
|
|
/* Handle more 16 bytes in loop */
|
|
cmpl $0x10, n
|
|
jb .Lmove_16B
|
|
|
|
/* Decide forward/backward copy mode */
|
|
cmpl dest, src
|
|
jb .Lbackwards_header
|
|
|
|
/*
|
|
* movs instruction have many startup latency
|
|
* so we handle small size by general register.
|
|
*/
|
|
cmpl $680, n
|
|
jb .Ltoo_small_forwards
|
|
/* movs instruction is only good for aligned case. */
|
|
movl src, tmp0
|
|
xorl dest, tmp0
|
|
andl $0xff, tmp0
|
|
jz .Lforward_movs
|
|
.Ltoo_small_forwards:
|
|
subl $0x10, n
|
|
|
|
/* We gobble 16 bytes forward in each loop. */
|
|
.Lmove_16B_forwards_loop:
|
|
subl $0x10, n
|
|
movl 0*4(src), tmp0
|
|
movl 1*4(src), tmp1
|
|
movl tmp0, 0*4(dest)
|
|
movl tmp1, 1*4(dest)
|
|
movl 2*4(src), tmp0
|
|
movl 3*4(src), tmp1
|
|
movl tmp0, 2*4(dest)
|
|
movl tmp1, 3*4(dest)
|
|
leal 0x10(src), src
|
|
leal 0x10(dest), dest
|
|
jae .Lmove_16B_forwards_loop
|
|
addl $0x10, n
|
|
jmp .Lmove_16B
|
|
|
|
/* Handle data forward by movs. */
|
|
.p2align 4
|
|
.Lforward_movs:
|
|
movl -4(src, n), tmp0
|
|
leal -4(dest, n), tmp1
|
|
shrl $2, n
|
|
rep movsl
|
|
movl tmp0, (tmp1)
|
|
jmp .Ldone
|
|
|
|
/* Handle data backward by movs. */
|
|
.p2align 4
|
|
.Lbackwards_movs:
|
|
movl (src), tmp0
|
|
movl dest, tmp1
|
|
leal -4(src, n), src
|
|
leal -4(dest, n), dest
|
|
shrl $2, n
|
|
std
|
|
rep movsl
|
|
movl tmp0,(tmp1)
|
|
cld
|
|
jmp .Ldone
|
|
|
|
/* Start to prepare for backward copy. */
|
|
.p2align 4
|
|
.Lbackwards_header:
|
|
cmpl $680, n
|
|
jb .Ltoo_small_backwards
|
|
movl src, tmp0
|
|
xorl dest, tmp0
|
|
andl $0xff, tmp0
|
|
jz .Lbackwards_movs
|
|
|
|
/* Calculate copy position to tail. */
|
|
.Ltoo_small_backwards:
|
|
addl n, src
|
|
addl n, dest
|
|
subl $0x10, n
|
|
|
|
/* We gobble 16 bytes backward in each loop. */
|
|
.Lmove_16B_backwards_loop:
|
|
subl $0x10, n
|
|
|
|
movl -1*4(src), tmp0
|
|
movl -2*4(src), tmp1
|
|
movl tmp0, -1*4(dest)
|
|
movl tmp1, -2*4(dest)
|
|
movl -3*4(src), tmp0
|
|
movl -4*4(src), tmp1
|
|
movl tmp0, -3*4(dest)
|
|
movl tmp1, -4*4(dest)
|
|
leal -0x10(src), src
|
|
leal -0x10(dest), dest
|
|
jae .Lmove_16B_backwards_loop
|
|
/* Calculate copy position to head. */
|
|
addl $0x10, n
|
|
subl n, src
|
|
subl n, dest
|
|
|
|
/* Move data from 8 bytes to 15 bytes. */
|
|
.p2align 4
|
|
.Lmove_16B:
|
|
cmpl $8, n
|
|
jb .Lmove_8B
|
|
movl 0*4(src), tmp0
|
|
movl 1*4(src), tmp1
|
|
movl -2*4(src, n), tmp2
|
|
movl -1*4(src, n), src
|
|
|
|
movl tmp0, 0*4(dest)
|
|
movl tmp1, 1*4(dest)
|
|
movl tmp2, -2*4(dest, n)
|
|
movl src, -1*4(dest, n)
|
|
jmp .Ldone
|
|
|
|
/* Move data from 4 bytes to 7 bytes. */
|
|
.p2align 4
|
|
.Lmove_8B:
|
|
cmpl $4, n
|
|
jb .Lmove_4B
|
|
movl 0*4(src), tmp0
|
|
movl -1*4(src, n), tmp1
|
|
movl tmp0, 0*4(dest)
|
|
movl tmp1, -1*4(dest, n)
|
|
jmp .Ldone
|
|
|
|
/* Move data from 2 bytes to 3 bytes. */
|
|
.p2align 4
|
|
.Lmove_4B:
|
|
cmpl $2, n
|
|
jb .Lmove_1B
|
|
movw 0*2(src), tmp0w
|
|
movw -1*2(src, n), tmp1w
|
|
movw tmp0w, 0*2(dest)
|
|
movw tmp1w, -1*2(dest, n)
|
|
jmp .Ldone
|
|
|
|
/* Move data for 1 byte. */
|
|
.p2align 4
|
|
.Lmove_1B:
|
|
cmpl $1, n
|
|
jb .Ldone
|
|
movb (src), tmp3b
|
|
movb tmp3b, (dest)
|
|
.p2align 4
|
|
.Ldone:
|
|
popl dest_in // restore 'dest_in' [eax] as the return value
|
|
/* Restore all callee-saved registers: */
|
|
popl %esi
|
|
popl %edi
|
|
popl %ebx
|
|
popl %ebp
|
|
|
|
RET
|
|
SYM_FUNC_END(memmove)
|
|
EXPORT_SYMBOL(memmove)
|