linux/arch/x86/lib/getuser.S
Qiuxu Zhuo 8eed4e00a3 x86/lib: Revert to _ASM_EXTABLE_UA() for {get,put}_user() fixups
During memory error injection test on kernels >= v6.4, the kernel panics
like below. However, this issue couldn't be reproduced on kernels <= v6.3.

  mce: [Hardware Error]: CPU 296: Machine Check Exception: f Bank 1: bd80000000100134
  mce: [Hardware Error]: RIP 10:<ffffffff821b9776> {__get_user_nocheck_4+0x6/0x20}
  mce: [Hardware Error]: TSC 411a93533ed ADDR 346a8730040 MISC 86
  mce: [Hardware Error]: PROCESSOR 0:a06d0 TIME 1706000767 SOCKET 1 APIC 211 microcode 80001490
  mce: [Hardware Error]: Run the above through 'mcelog --ascii'
  mce: [Hardware Error]: Machine check: Data load in unrecoverable area of kernel
  Kernel panic - not syncing: Fatal local machine check

The MCA code can recover from an in-kernel #MC if the fixup type is
EX_TYPE_UACCESS, explicitly indicating that the kernel is attempting to
access userspace memory. However, if the fixup type is EX_TYPE_DEFAULT
the only thing that is raised for an in-kernel #MC is a panic.

ex_handler_uaccess() would warn if users gave a non-canonical addresses
(with bit 63 clear) to {get, put}_user(), which was unexpected.

Therefore, commit

  b19b74bc99 ("x86/mm: Rework address range check in get_user() and put_user()")

replaced _ASM_EXTABLE_UA() with _ASM_EXTABLE() for {get, put}_user()
fixups. However, the new fixup type EX_TYPE_DEFAULT results in a panic.

Commit

  6014bc2756 ("x86-64: make access_ok() independent of LAM")

added the check gp_fault_address_ok() right before the WARN_ONCE() in
ex_handler_uaccess() to not warn about non-canonical user addresses due
to LAM.

With that in place, revert back to _ASM_EXTABLE_UA() for {get,put}_user()
exception fixups in order to be able to handle in-kernel MCEs correctly
again.

  [ bp: Massage commit message. ]

Fixes: b19b74bc99 ("x86/mm: Rework address range check in get_user() and put_user()")
Signed-off-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: <stable@kernel.org>
Link: https://lore.kernel.org/r/20240129063842.61584-1-qiuxu.zhuo@intel.com
2024-01-29 11:40:41 +01:00

186 lines
3.9 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0 */
/*
* __get_user functions.
*
* (C) Copyright 1998 Linus Torvalds
* (C) Copyright 2005 Andi Kleen
* (C) Copyright 2008 Glauber Costa
*
* These functions have a non-standard call interface
* to make them more efficient, especially as they
* return an error value in addition to the "real"
* return value.
*/
/*
* __get_user_X
*
* Inputs: %[r|e]ax contains the address.
*
* Outputs: %[r|e]ax is error code (0 or -EFAULT)
* %[r|e]dx contains zero-extended value
* %ecx contains the high half for 32-bit __get_user_8
*
*
* These functions should not modify any other registers,
* as they get called from within inline assembly.
*/
#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/page_types.h>
#include <asm/errno.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <asm/smap.h>
#define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC
.macro check_range size:req
.if IS_ENABLED(CONFIG_X86_64)
mov %rax, %rdx
sar $63, %rdx
or %rdx, %rax
.else
cmp $TASK_SIZE_MAX-\size+1, %eax
jae .Lbad_get_user
sbb %edx, %edx /* array_index_mask_nospec() */
and %edx, %eax
.endif
.endm
.text
SYM_FUNC_START(__get_user_1)
check_range size=1
ASM_STAC
1: movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
SYM_FUNC_START(__get_user_2)
check_range size=2
ASM_STAC
2: movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
SYM_FUNC_START(__get_user_4)
check_range size=4
ASM_STAC
3: movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
SYM_FUNC_START(__get_user_8)
check_range size=8
ASM_STAC
#ifdef CONFIG_X86_64
4: movq (%_ASM_AX),%rdx
#else
4: movl (%_ASM_AX),%edx
5: movl 4(%_ASM_AX),%ecx
#endif
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
/* .. and the same for __get_user, just without the range checks */
SYM_FUNC_START(__get_user_nocheck_1)
ASM_STAC
ASM_BARRIER_NOSPEC
6: movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_nocheck_1)
EXPORT_SYMBOL(__get_user_nocheck_1)
SYM_FUNC_START(__get_user_nocheck_2)
ASM_STAC
ASM_BARRIER_NOSPEC
7: movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_nocheck_2)
EXPORT_SYMBOL(__get_user_nocheck_2)
SYM_FUNC_START(__get_user_nocheck_4)
ASM_STAC
ASM_BARRIER_NOSPEC
8: movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_nocheck_4)
EXPORT_SYMBOL(__get_user_nocheck_4)
SYM_FUNC_START(__get_user_nocheck_8)
ASM_STAC
ASM_BARRIER_NOSPEC
#ifdef CONFIG_X86_64
9: movq (%_ASM_AX),%rdx
#else
9: movl (%_ASM_AX),%edx
10: movl 4(%_ASM_AX),%ecx
#endif
xor %eax,%eax
ASM_CLAC
RET
SYM_FUNC_END(__get_user_nocheck_8)
EXPORT_SYMBOL(__get_user_nocheck_8)
SYM_CODE_START_LOCAL(__get_user_handle_exception)
ASM_CLAC
.Lbad_get_user:
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
RET
SYM_CODE_END(__get_user_handle_exception)
#ifdef CONFIG_X86_32
SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
ASM_CLAC
bad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
RET
SYM_CODE_END(__get_user_8_handle_exception)
#endif
/* get_user */
_ASM_EXTABLE_UA(1b, __get_user_handle_exception)
_ASM_EXTABLE_UA(2b, __get_user_handle_exception)
_ASM_EXTABLE_UA(3b, __get_user_handle_exception)
#ifdef CONFIG_X86_64
_ASM_EXTABLE_UA(4b, __get_user_handle_exception)
#else
_ASM_EXTABLE_UA(4b, __get_user_8_handle_exception)
_ASM_EXTABLE_UA(5b, __get_user_8_handle_exception)
#endif
/* __get_user */
_ASM_EXTABLE_UA(6b, __get_user_handle_exception)
_ASM_EXTABLE_UA(7b, __get_user_handle_exception)
_ASM_EXTABLE_UA(8b, __get_user_handle_exception)
#ifdef CONFIG_X86_64
_ASM_EXTABLE_UA(9b, __get_user_handle_exception)
#else
_ASM_EXTABLE_UA(9b, __get_user_8_handle_exception)
_ASM_EXTABLE_UA(10b, __get_user_8_handle_exception)
#endif