Merge remote-tracking branch 'arm64/for-next/uaccess' into HEAD
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
64dad8e49d
@ -195,7 +195,6 @@ config ARM64
|
||||
select PCI_SYSCALL if PCI
|
||||
select POWER_RESET
|
||||
select POWER_SUPPLY
|
||||
select SET_FS
|
||||
select SPARSE_IRQ
|
||||
select SWIOTLB
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
@ -1388,6 +1387,9 @@ config ARM64_PAN
|
||||
The feature is detected at runtime, and will remain as a 'nop'
|
||||
instruction if the cpu does not implement the feature.
|
||||
|
||||
config AS_HAS_LDAPR
|
||||
def_bool $(as-instr,.arch_extension rcpc)
|
||||
|
||||
config ARM64_LSE_ATOMICS
|
||||
bool
|
||||
default ARM64_USE_LSE_ATOMICS
|
||||
@ -1425,27 +1427,6 @@ endmenu
|
||||
|
||||
menu "ARMv8.2 architectural features"
|
||||
|
||||
config ARM64_UAO
|
||||
bool "Enable support for User Access Override (UAO)"
|
||||
default y
|
||||
help
|
||||
User Access Override (UAO; part of the ARMv8.2 Extensions)
|
||||
causes the 'unprivileged' variant of the load/store instructions to
|
||||
be overridden to be privileged.
|
||||
|
||||
This option changes get_user() and friends to use the 'unprivileged'
|
||||
variant of the load/store instructions. This ensures that user-space
|
||||
really did have access to the supplied memory. When addr_limit is
|
||||
set to kernel memory the UAO bit will be set, allowing privileged
|
||||
access to kernel memory.
|
||||
|
||||
Choosing this option will cause copy_to_user() et al to use user-space
|
||||
memory permissions.
|
||||
|
||||
The feature is detected at runtime, the kernel will use the
|
||||
regular load/store instructions if the cpu does not implement the
|
||||
feature.
|
||||
|
||||
config ARM64_PMEM
|
||||
bool "Enable support for persistent memory"
|
||||
select ARCH_HAS_PMEM_API
|
||||
|
217
arch/arm64/include/asm/alternative-macros.h
Normal file
217
arch/arm64/include/asm/alternative-macros.h
Normal file
@ -0,0 +1,217 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ASM_ALTERNATIVE_MACROS_H
|
||||
#define __ASM_ALTERNATIVE_MACROS_H
|
||||
|
||||
#include <asm/cpucaps.h>
|
||||
|
||||
#define ARM64_CB_PATCH ARM64_NCAPS
|
||||
|
||||
/* A64 instructions are always 32 bits. */
|
||||
#define AARCH64_INSN_SIZE 4
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#define ALTINSTR_ENTRY(feature) \
|
||||
" .word 661b - .\n" /* label */ \
|
||||
" .word 663f - .\n" /* new instruction */ \
|
||||
" .hword " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte 662b-661b\n" /* source len */ \
|
||||
" .byte 664f-663f\n" /* replacement len */
|
||||
|
||||
#define ALTINSTR_ENTRY_CB(feature, cb) \
|
||||
" .word 661b - .\n" /* label */ \
|
||||
" .word " __stringify(cb) "- .\n" /* callback */ \
|
||||
" .hword " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte 662b-661b\n" /* source len */ \
|
||||
" .byte 664f-663f\n" /* replacement len */
|
||||
|
||||
/*
|
||||
* alternative assembly primitive:
|
||||
*
|
||||
* If any of these .org directive fail, it means that insn1 and insn2
|
||||
* don't have the same length. This used to be written as
|
||||
*
|
||||
* .if ((664b-663b) != (662b-661b))
|
||||
* .error "Alternatives instruction length mismatch"
|
||||
* .endif
|
||||
*
|
||||
* but most assemblers die if insn1 or insn2 have a .inst. This should
|
||||
* be fixed in a binutils release posterior to 2.25.51.0.2 (anything
|
||||
* containing commit 4e4d08cf7399b606 or c1baaddf8861).
|
||||
*
|
||||
* Alternatives with callbacks do not generate replacement instructions.
|
||||
*/
|
||||
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
|
||||
".if "__stringify(cfg_enabled)" == 1\n" \
|
||||
"661:\n\t" \
|
||||
oldinstr "\n" \
|
||||
"662:\n" \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY(feature) \
|
||||
".popsection\n" \
|
||||
".subsection 1\n" \
|
||||
"663:\n\t" \
|
||||
newinstr "\n" \
|
||||
"664:\n\t" \
|
||||
".org . - (664b-663b) + (662b-661b)\n\t" \
|
||||
".org . - (662b-661b) + (664b-663b)\n\t" \
|
||||
".previous\n" \
|
||||
".endif\n"
|
||||
|
||||
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
|
||||
".if "__stringify(cfg_enabled)" == 1\n" \
|
||||
"661:\n\t" \
|
||||
oldinstr "\n" \
|
||||
"662:\n" \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY_CB(feature, cb) \
|
||||
".popsection\n" \
|
||||
"663:\n\t" \
|
||||
"664:\n\t" \
|
||||
".endif\n"
|
||||
|
||||
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
|
||||
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
|
||||
|
||||
#define ALTERNATIVE_CB(oldinstr, cb) \
|
||||
__ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
|
||||
#else
|
||||
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
|
||||
.word \orig_offset - .
|
||||
.word \alt_offset - .
|
||||
.hword \feature
|
||||
.byte \orig_len
|
||||
.byte \alt_len
|
||||
.endm
|
||||
|
||||
.macro alternative_insn insn1, insn2, cap, enable = 1
|
||||
.if \enable
|
||||
661: \insn1
|
||||
662: .pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
|
||||
.popsection
|
||||
.subsection 1
|
||||
663: \insn2
|
||||
664: .previous
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Alternative sequences
|
||||
*
|
||||
* The code for the case where the capability is not present will be
|
||||
* assembled and linked as normal. There are no restrictions on this
|
||||
* code.
|
||||
*
|
||||
* The code for the case where the capability is present will be
|
||||
* assembled into a special section to be used for dynamic patching.
|
||||
* Code for that case must:
|
||||
*
|
||||
* 1. Be exactly the same length (in bytes) as the default code
|
||||
* sequence.
|
||||
*
|
||||
* 2. Not contain a branch target that is used outside of the
|
||||
* alternative sequence it is defined in (branches into an
|
||||
* alternative sequence are not fixed up).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Begin an alternative code sequence.
|
||||
*/
|
||||
.macro alternative_if_not cap
|
||||
.set .Lasm_alt_mode, 0
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
|
||||
.popsection
|
||||
661:
|
||||
.endm
|
||||
|
||||
.macro alternative_if cap
|
||||
.set .Lasm_alt_mode, 1
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
|
||||
.popsection
|
||||
.subsection 1
|
||||
.align 2 /* So GAS knows label 661 is suitably aligned */
|
||||
661:
|
||||
.endm
|
||||
|
||||
.macro alternative_cb cb
|
||||
.set .Lasm_alt_mode, 0
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
|
||||
.popsection
|
||||
661:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Provide the other half of the alternative code sequence.
|
||||
*/
|
||||
.macro alternative_else
|
||||
662:
|
||||
.if .Lasm_alt_mode==0
|
||||
.subsection 1
|
||||
.else
|
||||
.previous
|
||||
.endif
|
||||
663:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Complete an alternative code sequence.
|
||||
*/
|
||||
.macro alternative_endif
|
||||
664:
|
||||
.if .Lasm_alt_mode==0
|
||||
.previous
|
||||
.endif
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Callback-based alternative epilogue
|
||||
*/
|
||||
.macro alternative_cb_end
|
||||
662:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Provides a trivial alternative or default sequence consisting solely
|
||||
* of NOPs. The number of NOPs is chosen automatically to match the
|
||||
* previous case.
|
||||
*/
|
||||
.macro alternative_else_nop_endif
|
||||
alternative_else
|
||||
nops (662b-661b) / AARCH64_INSN_SIZE
|
||||
alternative_endif
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
|
||||
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
|
||||
|
||||
.macro user_alt, label, oldinstr, newinstr, cond
|
||||
9999: alternative_insn "\oldinstr", "\newinstr", \cond
|
||||
_asm_extable 9999b, \label
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
|
||||
*
|
||||
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
|
||||
* N.B. If CONFIG_FOO is specified, but not selected, the whole block
|
||||
* will be omitted, including oldinstr.
|
||||
*/
|
||||
#define ALTERNATIVE(oldinstr, newinstr, ...) \
|
||||
_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
|
||||
|
||||
#endif /* __ASM_ALTERNATIVE_MACROS_H */
|
@ -2,17 +2,13 @@
|
||||
#ifndef __ASM_ALTERNATIVE_H
|
||||
#define __ASM_ALTERNATIVE_H
|
||||
|
||||
#include <asm/cpucaps.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#define ARM64_CB_PATCH ARM64_NCAPS
|
||||
#include <asm/alternative-macros.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
struct alt_instr {
|
||||
s32 orig_offset; /* offset to original instruction */
|
||||
@ -35,264 +31,5 @@ void apply_alternatives_module(void *start, size_t length);
|
||||
static inline void apply_alternatives_module(void *start, size_t length) { }
|
||||
#endif
|
||||
|
||||
#define ALTINSTR_ENTRY(feature) \
|
||||
" .word 661b - .\n" /* label */ \
|
||||
" .word 663f - .\n" /* new instruction */ \
|
||||
" .hword " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte 662b-661b\n" /* source len */ \
|
||||
" .byte 664f-663f\n" /* replacement len */
|
||||
|
||||
#define ALTINSTR_ENTRY_CB(feature, cb) \
|
||||
" .word 661b - .\n" /* label */ \
|
||||
" .word " __stringify(cb) "- .\n" /* callback */ \
|
||||
" .hword " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte 662b-661b\n" /* source len */ \
|
||||
" .byte 664f-663f\n" /* replacement len */
|
||||
|
||||
/*
|
||||
* alternative assembly primitive:
|
||||
*
|
||||
* If any of these .org directive fail, it means that insn1 and insn2
|
||||
* don't have the same length. This used to be written as
|
||||
*
|
||||
* .if ((664b-663b) != (662b-661b))
|
||||
* .error "Alternatives instruction length mismatch"
|
||||
* .endif
|
||||
*
|
||||
* but most assemblers die if insn1 or insn2 have a .inst. This should
|
||||
* be fixed in a binutils release posterior to 2.25.51.0.2 (anything
|
||||
* containing commit 4e4d08cf7399b606 or c1baaddf8861).
|
||||
*
|
||||
* Alternatives with callbacks do not generate replacement instructions.
|
||||
*/
|
||||
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
|
||||
".if "__stringify(cfg_enabled)" == 1\n" \
|
||||
"661:\n\t" \
|
||||
oldinstr "\n" \
|
||||
"662:\n" \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY(feature) \
|
||||
".popsection\n" \
|
||||
".subsection 1\n" \
|
||||
"663:\n\t" \
|
||||
newinstr "\n" \
|
||||
"664:\n\t" \
|
||||
".org . - (664b-663b) + (662b-661b)\n\t" \
|
||||
".org . - (662b-661b) + (664b-663b)\n\t" \
|
||||
".previous\n" \
|
||||
".endif\n"
|
||||
|
||||
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
|
||||
".if "__stringify(cfg_enabled)" == 1\n" \
|
||||
"661:\n\t" \
|
||||
oldinstr "\n" \
|
||||
"662:\n" \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY_CB(feature, cb) \
|
||||
".popsection\n" \
|
||||
"663:\n\t" \
|
||||
"664:\n\t" \
|
||||
".endif\n"
|
||||
|
||||
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
|
||||
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
|
||||
|
||||
#define ALTERNATIVE_CB(oldinstr, cb) \
|
||||
__ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
|
||||
#else
|
||||
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
|
||||
.word \orig_offset - .
|
||||
.word \alt_offset - .
|
||||
.hword \feature
|
||||
.byte \orig_len
|
||||
.byte \alt_len
|
||||
.endm
|
||||
|
||||
.macro alternative_insn insn1, insn2, cap, enable = 1
|
||||
.if \enable
|
||||
661: \insn1
|
||||
662: .pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
|
||||
.popsection
|
||||
.subsection 1
|
||||
663: \insn2
|
||||
664: .previous
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Alternative sequences
|
||||
*
|
||||
* The code for the case where the capability is not present will be
|
||||
* assembled and linked as normal. There are no restrictions on this
|
||||
* code.
|
||||
*
|
||||
* The code for the case where the capability is present will be
|
||||
* assembled into a special section to be used for dynamic patching.
|
||||
* Code for that case must:
|
||||
*
|
||||
* 1. Be exactly the same length (in bytes) as the default code
|
||||
* sequence.
|
||||
*
|
||||
* 2. Not contain a branch target that is used outside of the
|
||||
* alternative sequence it is defined in (branches into an
|
||||
* alternative sequence are not fixed up).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Begin an alternative code sequence.
|
||||
*/
|
||||
.macro alternative_if_not cap
|
||||
.set .Lasm_alt_mode, 0
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
|
||||
.popsection
|
||||
661:
|
||||
.endm
|
||||
|
||||
.macro alternative_if cap
|
||||
.set .Lasm_alt_mode, 1
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
|
||||
.popsection
|
||||
.subsection 1
|
||||
.align 2 /* So GAS knows label 661 is suitably aligned */
|
||||
661:
|
||||
.endm
|
||||
|
||||
.macro alternative_cb cb
|
||||
.set .Lasm_alt_mode, 0
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
|
||||
.popsection
|
||||
661:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Provide the other half of the alternative code sequence.
|
||||
*/
|
||||
.macro alternative_else
|
||||
662:
|
||||
.if .Lasm_alt_mode==0
|
||||
.subsection 1
|
||||
.else
|
||||
.previous
|
||||
.endif
|
||||
663:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Complete an alternative code sequence.
|
||||
*/
|
||||
.macro alternative_endif
|
||||
664:
|
||||
.if .Lasm_alt_mode==0
|
||||
.previous
|
||||
.endif
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Callback-based alternative epilogue
|
||||
*/
|
||||
.macro alternative_cb_end
|
||||
662:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Provides a trivial alternative or default sequence consisting solely
|
||||
* of NOPs. The number of NOPs is chosen automatically to match the
|
||||
* previous case.
|
||||
*/
|
||||
.macro alternative_else_nop_endif
|
||||
alternative_else
|
||||
nops (662b-661b) / AARCH64_INSN_SIZE
|
||||
alternative_endif
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
|
||||
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
|
||||
|
||||
.macro user_alt, label, oldinstr, newinstr, cond
|
||||
9999: alternative_insn "\oldinstr", "\newinstr", \cond
|
||||
_asm_extable 9999b, \label
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Generate the assembly for UAO alternatives with exception table entries.
|
||||
* This is complicated as there is no post-increment or pair versions of the
|
||||
* unprivileged instructions, and USER() only works for single instructions.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_UAO
|
||||
.macro uao_ldp l, reg1, reg2, addr, post_inc
|
||||
alternative_if_not ARM64_HAS_UAO
|
||||
8888: ldp \reg1, \reg2, [\addr], \post_inc;
|
||||
8889: nop;
|
||||
nop;
|
||||
alternative_else
|
||||
ldtr \reg1, [\addr];
|
||||
ldtr \reg2, [\addr, #8];
|
||||
add \addr, \addr, \post_inc;
|
||||
alternative_endif
|
||||
|
||||
_asm_extable 8888b,\l;
|
||||
_asm_extable 8889b,\l;
|
||||
.endm
|
||||
|
||||
.macro uao_stp l, reg1, reg2, addr, post_inc
|
||||
alternative_if_not ARM64_HAS_UAO
|
||||
8888: stp \reg1, \reg2, [\addr], \post_inc;
|
||||
8889: nop;
|
||||
nop;
|
||||
alternative_else
|
||||
sttr \reg1, [\addr];
|
||||
sttr \reg2, [\addr, #8];
|
||||
add \addr, \addr, \post_inc;
|
||||
alternative_endif
|
||||
|
||||
_asm_extable 8888b,\l;
|
||||
_asm_extable 8889b,\l;
|
||||
.endm
|
||||
|
||||
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
|
||||
alternative_if_not ARM64_HAS_UAO
|
||||
8888: \inst \reg, [\addr], \post_inc;
|
||||
nop;
|
||||
alternative_else
|
||||
\alt_inst \reg, [\addr];
|
||||
add \addr, \addr, \post_inc;
|
||||
alternative_endif
|
||||
|
||||
_asm_extable 8888b,\l;
|
||||
.endm
|
||||
#else
|
||||
.macro uao_ldp l, reg1, reg2, addr, post_inc
|
||||
USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
|
||||
.endm
|
||||
.macro uao_stp l, reg1, reg2, addr, post_inc
|
||||
USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
|
||||
.endm
|
||||
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
|
||||
USER(\l, \inst \reg, [\addr], \post_inc)
|
||||
.endm
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
|
||||
*
|
||||
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
|
||||
* N.B. If CONFIG_FOO is specified, but not selected, the whole block
|
||||
* will be omitted, including oldinstr.
|
||||
*/
|
||||
#define ALTERNATIVE(oldinstr, newinstr, ...) \
|
||||
_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_ALTERNATIVE_H */
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef __ASM_ASM_UACCESS_H
|
||||
#define __ASM_ASM_UACCESS_H
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/alternative-macros.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/sysreg.h>
|
||||
@ -58,4 +58,33 @@ alternative_else_nop_endif
|
||||
.endm
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generate the assembly for LDTR/STTR with exception table entries.
|
||||
* This is complicated as there is no post-increment or pair versions of the
|
||||
* unprivileged instructions, and USER() only works for single instructions.
|
||||
*/
|
||||
.macro user_ldp l, reg1, reg2, addr, post_inc
|
||||
8888: ldtr \reg1, [\addr];
|
||||
8889: ldtr \reg2, [\addr, #8];
|
||||
add \addr, \addr, \post_inc;
|
||||
|
||||
_asm_extable 8888b,\l;
|
||||
_asm_extable 8889b,\l;
|
||||
.endm
|
||||
|
||||
.macro user_stp l, reg1, reg2, addr, post_inc
|
||||
8888: sttr \reg1, [\addr];
|
||||
8889: sttr \reg2, [\addr, #8];
|
||||
add \addr, \addr, \post_inc;
|
||||
|
||||
_asm_extable 8888b,\l;
|
||||
_asm_extable 8889b,\l;
|
||||
.endm
|
||||
|
||||
.macro user_ldst l, inst, reg, addr, post_inc
|
||||
8888: \inst \reg, [\addr];
|
||||
add \addr, \addr, \post_inc;
|
||||
|
||||
_asm_extable 8888b,\l;
|
||||
.endm
|
||||
#endif
|
||||
|
@ -16,8 +16,6 @@
|
||||
#define ARM64_WORKAROUND_CAVIUM_23154 6
|
||||
#define ARM64_WORKAROUND_834220 7
|
||||
#define ARM64_HAS_NO_HW_PREFETCH 8
|
||||
#define ARM64_HAS_UAO 9
|
||||
#define ARM64_ALT_PAN_NOT_UAO 10
|
||||
#define ARM64_HAS_VIRT_HOST_EXTN 11
|
||||
#define ARM64_WORKAROUND_CAVIUM_27456 12
|
||||
#define ARM64_HAS_32BIT_EL0 13
|
||||
@ -66,7 +64,8 @@
|
||||
#define ARM64_HAS_TLB_RANGE 56
|
||||
#define ARM64_MTE 57
|
||||
#define ARM64_WORKAROUND_1508412 58
|
||||
#define ARM64_HAS_LDAPR 59
|
||||
|
||||
#define ARM64_NCAPS 59
|
||||
#define ARM64_NCAPS 60
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
@ -667,10 +667,16 @@ static __always_inline bool system_supports_fpsimd(void)
|
||||
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
|
||||
}
|
||||
|
||||
static inline bool system_uses_hw_pan(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_PAN) &&
|
||||
cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
static inline bool system_uses_ttbr0_pan(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
|
||||
!cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
!system_uses_hw_pan();
|
||||
}
|
||||
|
||||
static __always_inline bool system_supports_sve(void)
|
||||
@ -762,6 +768,13 @@ static inline bool cpu_has_hw_af(void)
|
||||
ID_AA64MMFR1_HADBS_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool cpu_has_pan(void)
|
||||
{
|
||||
u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
|
||||
return cpuid_feature_extract_unsigned_field(mmfr1,
|
||||
ID_AA64MMFR1_PAN_SHIFT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_AMU_EXTN
|
||||
/* Check whether the cpu supports the Activity Monitors Unit (AMU) */
|
||||
extern bool cpu_has_amu_feat(int cpu);
|
||||
|
@ -10,6 +10,5 @@
|
||||
#include <linux/sched.h>
|
||||
|
||||
extern unsigned long arch_align_stack(unsigned long sp);
|
||||
void uao_thread_switch(struct task_struct *next);
|
||||
|
||||
#endif /* __ASM_EXEC_H */
|
||||
|
@ -16,7 +16,7 @@
|
||||
do { \
|
||||
unsigned int loops = FUTEX_MAX_LOOPS; \
|
||||
\
|
||||
uaccess_enable(); \
|
||||
uaccess_enable_privileged(); \
|
||||
asm volatile( \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxr %w1, %2\n" \
|
||||
@ -39,7 +39,7 @@ do { \
|
||||
"+r" (loops) \
|
||||
: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
|
||||
: "memory"); \
|
||||
uaccess_disable(); \
|
||||
uaccess_disable_privileged(); \
|
||||
} while (0)
|
||||
|
||||
static inline int
|
||||
@ -95,7 +95,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
||||
return -EFAULT;
|
||||
|
||||
uaddr = __uaccess_mask_ptr(_uaddr);
|
||||
uaccess_enable();
|
||||
uaccess_enable_privileged();
|
||||
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxr %w1, %2\n"
|
||||
@ -118,7 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
||||
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
|
||||
: "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
|
||||
: "memory");
|
||||
uaccess_disable();
|
||||
uaccess_disable_privileged();
|
||||
|
||||
if (!ret)
|
||||
*uval = val;
|
||||
|
@ -10,8 +10,7 @@
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* A64 instructions are always 32 bits. */
|
||||
#define AARCH64_INSN_SIZE 4
|
||||
#include <asm/alternative.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
|
@ -8,9 +8,6 @@
|
||||
#ifndef __ASM_PROCESSOR_H
|
||||
#define __ASM_PROCESSOR_H
|
||||
|
||||
#define KERNEL_DS UL(-1)
|
||||
#define USER_DS ((UL(1) << VA_BITS) - 1)
|
||||
|
||||
/*
|
||||
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
|
||||
* no point in shifting all network buffers by 2 bytes just to make some IP
|
||||
@ -48,6 +45,7 @@
|
||||
|
||||
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
|
||||
#define TASK_SIZE_64 (UL(1) << vabits_actual)
|
||||
#define TASK_SIZE_MAX (UL(1) << VA_BITS)
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
|
||||
|
@ -16,6 +16,11 @@
|
||||
#define CurrentEL_EL1 (1 << 2)
|
||||
#define CurrentEL_EL2 (2 << 2)
|
||||
|
||||
#define INIT_PSTATE_EL1 \
|
||||
(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL1h)
|
||||
#define INIT_PSTATE_EL2 \
|
||||
(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h)
|
||||
|
||||
/*
|
||||
* PMR values used to mask/unmask interrupts.
|
||||
*
|
||||
@ -188,8 +193,7 @@ struct pt_regs {
|
||||
s32 syscallno;
|
||||
u32 unused2;
|
||||
#endif
|
||||
|
||||
u64 orig_addr_limit;
|
||||
u64 sdei_ttbr1;
|
||||
/* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
|
||||
u64 pmr_save;
|
||||
u64 stackframe[2];
|
||||
|
73
arch/arm64/include/asm/rwonce.h
Normal file
73
arch/arm64/include/asm/rwonce.h
Normal file
@ -0,0 +1,73 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020 Google LLC.
|
||||
*/
|
||||
#ifndef __ASM_RWONCE_H
|
||||
#define __ASM_RWONCE_H
|
||||
|
||||
#ifdef CONFIG_LTO
|
||||
|
||||
#include <linux/compiler_types.h>
|
||||
#include <asm/alternative-macros.h>
|
||||
|
||||
#ifndef BUILD_VDSO
|
||||
|
||||
#ifdef CONFIG_AS_HAS_LDAPR
|
||||
#define __LOAD_RCPC(sfx, regs...) \
|
||||
ALTERNATIVE( \
|
||||
"ldar" #sfx "\t" #regs, \
|
||||
".arch_extension rcpc\n" \
|
||||
"ldapr" #sfx "\t" #regs, \
|
||||
ARM64_HAS_LDAPR)
|
||||
#else
|
||||
#define __LOAD_RCPC(sfx, regs...) "ldar" #sfx "\t" #regs
|
||||
#endif /* CONFIG_AS_HAS_LDAPR */
|
||||
|
||||
/*
|
||||
* When building with LTO, there is an increased risk of the compiler
|
||||
* converting an address dependency headed by a READ_ONCE() invocation
|
||||
* into a control dependency and consequently allowing for harmful
|
||||
* reordering by the CPU.
|
||||
*
|
||||
* Ensure that such transformations are harmless by overriding the generic
|
||||
* READ_ONCE() definition with one that provides RCpc acquire semantics
|
||||
* when building with LTO.
|
||||
*/
|
||||
#define __READ_ONCE(x) \
|
||||
({ \
|
||||
typeof(&(x)) __x = &(x); \
|
||||
int atomic = 1; \
|
||||
union { __unqual_scalar_typeof(*__x) __val; char __c[1]; } __u; \
|
||||
switch (sizeof(x)) { \
|
||||
case 1: \
|
||||
asm volatile(__LOAD_RCPC(b, %w0, %1) \
|
||||
: "=r" (*(__u8 *)__u.__c) \
|
||||
: "Q" (*__x) : "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm volatile(__LOAD_RCPC(h, %w0, %1) \
|
||||
: "=r" (*(__u16 *)__u.__c) \
|
||||
: "Q" (*__x) : "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm volatile(__LOAD_RCPC(, %w0, %1) \
|
||||
: "=r" (*(__u32 *)__u.__c) \
|
||||
: "Q" (*__x) : "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm volatile(__LOAD_RCPC(, %0, %1) \
|
||||
: "=r" (*(__u64 *)__u.__c) \
|
||||
: "Q" (*__x) : "memory"); \
|
||||
break; \
|
||||
default: \
|
||||
atomic = 0; \
|
||||
} \
|
||||
atomic ? (typeof(*__x))__u.__val : (*(volatile typeof(__x))__x);\
|
||||
})
|
||||
|
||||
#endif /* !BUILD_VDSO */
|
||||
#endif /* CONFIG_LTO */
|
||||
|
||||
#include <asm-generic/rwonce.h>
|
||||
|
||||
#endif /* __ASM_RWONCE_H */
|
@ -98,6 +98,10 @@
|
||||
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
|
||||
#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift))
|
||||
|
||||
#define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x))
|
||||
#define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x))
|
||||
#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x))
|
||||
|
||||
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
|
||||
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
|
||||
|
||||
@ -579,6 +583,9 @@
|
||||
#define ENDIAN_SET_EL2 0
|
||||
#endif
|
||||
|
||||
#define INIT_SCTLR_EL2_MMU_OFF \
|
||||
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
|
||||
|
||||
/* SCTLR_EL1 specific flags. */
|
||||
#define SCTLR_EL1_ATA0 (BIT(42))
|
||||
|
||||
@ -612,12 +619,15 @@
|
||||
#define ENDIAN_SET_EL1 0
|
||||
#endif
|
||||
|
||||
#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
|
||||
SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
|
||||
SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
|
||||
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
|
||||
SCTLR_ELx_ITFSB| SCTLR_ELx_ATA | SCTLR_EL1_ATA0 |\
|
||||
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
|
||||
#define INIT_SCTLR_EL1_MMU_OFF \
|
||||
(ENDIAN_SET_EL1 | SCTLR_EL1_RES1)
|
||||
|
||||
#define INIT_SCTLR_EL1_MMU_ON \
|
||||
(SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \
|
||||
SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
|
||||
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
|
||||
SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \
|
||||
SCTLR_EL1_RES1)
|
||||
|
||||
/* MAIR_ELx memory attributes (used by Linux) */
|
||||
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
|
||||
|
@ -18,14 +18,11 @@ struct task_struct;
|
||||
#include <asm/stack_pointer.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
typedef unsigned long mm_segment_t;
|
||||
|
||||
/*
|
||||
* low level task data that entry.S needs immediate access to.
|
||||
*/
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
mm_segment_t addr_limit; /* address limit */
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
u64 ttbr0; /* saved TTBR0_EL1 */
|
||||
#endif
|
||||
@ -66,8 +63,7 @@ void arch_release_task_struct(struct task_struct *tsk);
|
||||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
|
||||
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
|
||||
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
|
||||
#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */
|
||||
#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
|
||||
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
|
||||
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
|
||||
@ -93,7 +89,6 @@ void arch_release_task_struct(struct task_struct *tsk);
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
||||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||||
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||
#define _TIF_SVE (1 << TIF_SVE)
|
||||
@ -101,7 +96,7 @@ void arch_release_task_struct(struct task_struct *tsk);
|
||||
|
||||
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
|
||||
_TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT)
|
||||
_TIF_UPROBE | _TIF_MTE_ASYNC_FAULT)
|
||||
|
||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
|
||||
@ -119,7 +114,6 @@ void arch_release_task_struct(struct task_struct *tsk);
|
||||
{ \
|
||||
.flags = _TIF_FOREIGN_FPSTATE, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.addr_limit = KERNEL_DS, \
|
||||
INIT_SCS \
|
||||
}
|
||||
|
||||
|
@ -24,44 +24,18 @@
|
||||
#include <asm/memory.h>
|
||||
#include <asm/extable.h>
|
||||
|
||||
#define get_fs() (current_thread_info()->addr_limit)
|
||||
|
||||
static inline void set_fs(mm_segment_t fs)
|
||||
{
|
||||
current_thread_info()->addr_limit = fs;
|
||||
|
||||
/*
|
||||
* Prevent a mispredicted conditional call to set_fs from forwarding
|
||||
* the wrong address limit to access_ok under speculation.
|
||||
*/
|
||||
spec_bar();
|
||||
|
||||
/* On user-mode return, check fs is correct */
|
||||
set_thread_flag(TIF_FSCHECK);
|
||||
|
||||
/*
|
||||
* Enable/disable UAO so that copy_to_user() etc can access
|
||||
* kernel memory with the unprivileged instructions.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
|
||||
else
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
|
||||
CONFIG_ARM64_UAO));
|
||||
}
|
||||
|
||||
#define uaccess_kernel() (get_fs() == KERNEL_DS)
|
||||
#define HAVE_GET_KERNEL_NOFAULT
|
||||
|
||||
/*
|
||||
* Test whether a block of memory is a valid user space address.
|
||||
* Returns 1 if the range is valid, 0 otherwise.
|
||||
*
|
||||
* This is equivalent to the following test:
|
||||
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1
|
||||
* (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
|
||||
*/
|
||||
static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
|
||||
{
|
||||
unsigned long ret, limit = current_thread_info()->addr_limit;
|
||||
unsigned long ret, limit = TASK_SIZE_MAX - 1;
|
||||
|
||||
/*
|
||||
* Asynchronous I/O running in a kernel thread does not have the
|
||||
@ -94,7 +68,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
|
||||
}
|
||||
|
||||
#define access_ok(addr, size) __range_ok(addr, size)
|
||||
#define user_addr_max get_fs
|
||||
|
||||
#define _ASM_EXTABLE(from, to) \
|
||||
" .pushsection __ex_table, \"a\"\n" \
|
||||
@ -186,47 +159,26 @@ static inline void __uaccess_enable_hw_pan(void)
|
||||
CONFIG_ARM64_PAN));
|
||||
}
|
||||
|
||||
#define __uaccess_disable(alt) \
|
||||
do { \
|
||||
if (!uaccess_ttbr0_disable()) \
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
|
||||
CONFIG_ARM64_PAN)); \
|
||||
} while (0)
|
||||
|
||||
#define __uaccess_enable(alt) \
|
||||
do { \
|
||||
if (!uaccess_ttbr0_enable()) \
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
|
||||
CONFIG_ARM64_PAN)); \
|
||||
} while (0)
|
||||
|
||||
static inline void uaccess_disable(void)
|
||||
static inline void uaccess_disable_privileged(void)
|
||||
{
|
||||
__uaccess_disable(ARM64_HAS_PAN);
|
||||
if (uaccess_ttbr0_disable())
|
||||
return;
|
||||
|
||||
__uaccess_enable_hw_pan();
|
||||
}
|
||||
|
||||
static inline void uaccess_enable(void)
|
||||
static inline void uaccess_enable_privileged(void)
|
||||
{
|
||||
__uaccess_enable(ARM64_HAS_PAN);
|
||||
if (uaccess_ttbr0_enable())
|
||||
return;
|
||||
|
||||
__uaccess_disable_hw_pan();
|
||||
}
|
||||
|
||||
/*
|
||||
* These functions are no-ops when UAO is present.
|
||||
*/
|
||||
static inline void uaccess_disable_not_uao(void)
|
||||
{
|
||||
__uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
|
||||
}
|
||||
|
||||
static inline void uaccess_enable_not_uao(void)
|
||||
{
|
||||
__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanitise a uaccess pointer such that it becomes NULL if above the
|
||||
* current addr_limit. In case the pointer is tagged (has the top byte set),
|
||||
* untag the pointer before checking.
|
||||
* Sanitise a uaccess pointer such that it becomes NULL if above the maximum
|
||||
* user address. In case the pointer is tagged (has the top byte set), untag
|
||||
* the pointer before checking.
|
||||
*/
|
||||
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
|
||||
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
||||
@ -237,7 +189,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
||||
" bics xzr, %3, %2\n"
|
||||
" csel %0, %1, xzr, eq\n"
|
||||
: "=&r" (safe_ptr)
|
||||
: "r" (ptr), "r" (current_thread_info()->addr_limit),
|
||||
: "r" (ptr), "r" (TASK_SIZE_MAX - 1),
|
||||
"r" (untagged_addr(ptr))
|
||||
: "cc");
|
||||
|
||||
@ -253,10 +205,9 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
||||
* The "__xxx_error" versions set the third argument to -EFAULT if an error
|
||||
* occurs, and leave it unchanged on success.
|
||||
*/
|
||||
#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
||||
#define __get_mem_asm(load, reg, x, addr, err) \
|
||||
asm volatile( \
|
||||
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
||||
alt_instr " " reg "1, [%2]\n", feature) \
|
||||
"1: " load " " reg "1, [%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup, \"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
@ -268,35 +219,36 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
||||
: "+r" (err), "=&r" (x) \
|
||||
: "r" (addr), "i" (-EFAULT))
|
||||
|
||||
#define __raw_get_user(x, ptr, err) \
|
||||
#define __raw_get_mem(ldr, x, ptr, err) \
|
||||
do { \
|
||||
unsigned long __gu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
uaccess_enable_not_uao(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG(); \
|
||||
} \
|
||||
uaccess_disable_not_uao(); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __raw_get_user(x, ptr, err) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_get_mem("ldtr", x, ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||
@ -318,10 +270,19 @@ do { \
|
||||
|
||||
#define get_user __get_user
|
||||
|
||||
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
int __gkn_err = 0; \
|
||||
\
|
||||
__raw_get_mem("ldr", *((type *)(dst)), \
|
||||
(__force type *)(src), __gkn_err); \
|
||||
if (unlikely(__gkn_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
|
||||
#define __put_mem_asm(store, reg, x, addr, err) \
|
||||
asm volatile( \
|
||||
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
||||
alt_instr " " reg "1, [%2]\n", feature) \
|
||||
"1: " store " " reg "1, [%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
@ -332,32 +293,33 @@ do { \
|
||||
: "+r" (err) \
|
||||
: "r" (x), "r" (addr), "i" (-EFAULT))
|
||||
|
||||
#define __raw_put_user(x, ptr, err) \
|
||||
#define __raw_put_mem(str, x, ptr, err) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__chk_user_ptr(ptr); \
|
||||
uaccess_enable_not_uao(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG(); \
|
||||
} \
|
||||
uaccess_disable_not_uao(); \
|
||||
} while (0)
|
||||
|
||||
#define __raw_put_user(x, ptr, err) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_put_mem("sttr", x, ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_error(x, ptr, err) \
|
||||
@ -381,14 +343,24 @@ do { \
|
||||
|
||||
#define put_user __put_user
|
||||
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
int __pkn_err = 0; \
|
||||
\
|
||||
__raw_put_mem("str", *((type *)(src)), \
|
||||
(__force type *)(dst), __pkn_err); \
|
||||
if (unlikely(__pkn_err)) \
|
||||
goto err_label; \
|
||||
} while(0)
|
||||
|
||||
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
#define raw_copy_from_user(to, from, n) \
|
||||
({ \
|
||||
unsigned long __acfu_ret; \
|
||||
uaccess_enable_not_uao(); \
|
||||
uaccess_ttbr0_enable(); \
|
||||
__acfu_ret = __arch_copy_from_user((to), \
|
||||
__uaccess_mask_ptr(from), (n)); \
|
||||
uaccess_disable_not_uao(); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
__acfu_ret; \
|
||||
})
|
||||
|
||||
@ -396,10 +368,10 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
|
||||
#define raw_copy_to_user(to, from, n) \
|
||||
({ \
|
||||
unsigned long __actu_ret; \
|
||||
uaccess_enable_not_uao(); \
|
||||
uaccess_ttbr0_enable(); \
|
||||
__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
|
||||
(from), (n)); \
|
||||
uaccess_disable_not_uao(); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
__actu_ret; \
|
||||
})
|
||||
|
||||
@ -407,10 +379,10 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
|
||||
#define raw_copy_in_user(to, from, n) \
|
||||
({ \
|
||||
unsigned long __aciu_ret; \
|
||||
uaccess_enable_not_uao(); \
|
||||
uaccess_ttbr0_enable(); \
|
||||
__aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
|
||||
__uaccess_mask_ptr(from), (n)); \
|
||||
uaccess_disable_not_uao(); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
__aciu_ret; \
|
||||
})
|
||||
|
||||
@ -421,9 +393,9 @@ extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned lo
|
||||
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
if (access_ok(to, n)) {
|
||||
uaccess_enable_not_uao();
|
||||
uaccess_ttbr0_enable();
|
||||
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
|
||||
uaccess_disable_not_uao();
|
||||
uaccess_ttbr0_disable();
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
@ -21,7 +21,8 @@
|
||||
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
|
||||
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
|
||||
|
||||
static int all_alternatives_applied;
|
||||
/* Volatile, as we may be patching the guts of READ_ONCE() */
|
||||
static volatile int all_alternatives_applied;
|
||||
|
||||
static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
|
||||
|
||||
@ -205,7 +206,7 @@ static int __apply_alternatives_multi_stop(void *unused)
|
||||
|
||||
/* We always have a CPU 0 at this point (__init) */
|
||||
if (smp_processor_id()) {
|
||||
while (!READ_ONCE(all_alternatives_applied))
|
||||
while (!all_alternatives_applied)
|
||||
cpu_relax();
|
||||
isb();
|
||||
} else {
|
||||
@ -217,7 +218,7 @@ static int __apply_alternatives_multi_stop(void *unused)
|
||||
BUG_ON(all_alternatives_applied);
|
||||
__apply_alternatives(®ion, false, remaining_capabilities);
|
||||
/* Barriers provided by the cache flushing */
|
||||
WRITE_ONCE(all_alternatives_applied, 1);
|
||||
all_alternatives_applied = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -277,7 +277,7 @@ static void __init register_insn_emulation_sysctl(void)
|
||||
|
||||
#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
|
||||
do { \
|
||||
uaccess_enable(); \
|
||||
uaccess_enable_privileged(); \
|
||||
__asm__ __volatile__( \
|
||||
" mov %w3, %w7\n" \
|
||||
"0: ldxr"B" %w2, [%4]\n" \
|
||||
@ -302,7 +302,7 @@ do { \
|
||||
"i" (-EFAULT), \
|
||||
"i" (__SWP_LL_SC_LOOPS) \
|
||||
: "memory"); \
|
||||
uaccess_disable(); \
|
||||
uaccess_disable_privileged(); \
|
||||
} while (0)
|
||||
|
||||
#define __user_swp_asm(data, addr, res, temp, temp2) \
|
||||
|
@ -30,7 +30,6 @@ int main(void)
|
||||
BLANK();
|
||||
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
||||
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
|
||||
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
|
||||
#endif
|
||||
@ -70,7 +69,7 @@ int main(void)
|
||||
DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
|
||||
DEFINE(S_PC, offsetof(struct pt_regs, pc));
|
||||
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
|
||||
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
|
||||
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
|
||||
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
|
||||
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
|
||||
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
||||
|
@ -153,10 +153,6 @@ EXPORT_SYMBOL(cpu_hwcap_keys);
|
||||
.width = 0, \
|
||||
}
|
||||
|
||||
/* meta feature for alternatives */
|
||||
static bool __maybe_unused
|
||||
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
|
||||
|
||||
static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
|
||||
|
||||
static bool __system_matches_cap(unsigned int n);
|
||||
@ -1598,7 +1594,7 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
|
||||
WARN_ON_ONCE(in_interrupt());
|
||||
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
|
||||
asm(SET_PSTATE_PAN(1));
|
||||
set_pstate_pan(1);
|
||||
}
|
||||
#endif /* CONFIG_ARM64_PAN */
|
||||
|
||||
@ -1768,28 +1764,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
.matches = has_no_hw_prefetch,
|
||||
},
|
||||
#ifdef CONFIG_ARM64_UAO
|
||||
{
|
||||
.desc = "User Access Override",
|
||||
.capability = ARM64_HAS_UAO,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_cpuid_feature,
|
||||
.sys_reg = SYS_ID_AA64MMFR2_EL1,
|
||||
.field_pos = ID_AA64MMFR2_UAO_SHIFT,
|
||||
.min_field_value = 1,
|
||||
/*
|
||||
* We rely on stop_machine() calling uao_thread_switch() to set
|
||||
* UAO immediately after patching.
|
||||
*/
|
||||
},
|
||||
#endif /* CONFIG_ARM64_UAO */
|
||||
#ifdef CONFIG_ARM64_PAN
|
||||
{
|
||||
.capability = ARM64_ALT_PAN_NOT_UAO,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = cpufeature_pan_not_uao,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_PAN */
|
||||
#ifdef CONFIG_ARM64_VHE
|
||||
{
|
||||
.desc = "Virtualization Host Extensions",
|
||||
@ -2136,6 +2110,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.cpu_enable = cpu_enable_mte,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
{
|
||||
.desc = "RCpc load-acquire (LDAPR)",
|
||||
.capability = ARM64_HAS_LDAPR,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.sys_reg = SYS_ID_AA64ISAR1_EL1,
|
||||
.sign = FTR_UNSIGNED,
|
||||
.field_pos = ID_AA64ISAR1_LRCPC_SHIFT,
|
||||
.matches = has_cpuid_feature,
|
||||
.min_field_value = 1,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
@ -2650,7 +2634,7 @@ bool this_cpu_has_cap(unsigned int n)
|
||||
* - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
|
||||
* In all other cases cpus_have_{const_}cap() should be used.
|
||||
*/
|
||||
static bool __system_matches_cap(unsigned int n)
|
||||
static bool __maybe_unused __system_matches_cap(unsigned int n)
|
||||
{
|
||||
if (n < ARM64_NCAPS) {
|
||||
const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
|
||||
@ -2730,12 +2714,6 @@ void __init setup_cpu_features(void)
|
||||
ARCH_DMA_MINALIGN);
|
||||
}
|
||||
|
||||
static bool __maybe_unused
|
||||
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
|
||||
{
|
||||
return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
|
||||
}
|
||||
|
||||
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
|
||||
{
|
||||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
||||
|
@ -216,12 +216,6 @@ alternative_else_nop_endif
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
get_current_task tsk
|
||||
/* Save the task's original addr_limit and set USER_DS */
|
||||
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
str x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
mov x20, #USER_DS
|
||||
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
|
||||
.endif /* \el == 0 */
|
||||
mrs x22, elr_el1
|
||||
mrs x23, spsr_el1
|
||||
@ -279,12 +273,6 @@ alternative_else_nop_endif
|
||||
.macro kernel_exit, el
|
||||
.if \el != 0
|
||||
disable_daif
|
||||
|
||||
/* Restore the task's original addr_limit. */
|
||||
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
|
||||
/* No need to restore UAO, it will be restored from SPSR_EL1 */
|
||||
.endif
|
||||
|
||||
/* Restore pmr */
|
||||
@ -999,10 +987,9 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
|
||||
mov x4, xzr
|
||||
|
||||
/*
|
||||
* Use reg->interrupted_regs.addr_limit to remember whether to unmap
|
||||
* the kernel on exit.
|
||||
* Remember whether to unmap the kernel on exit.
|
||||
*/
|
||||
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
|
||||
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
adr x4, tramp_vectors + PAGE_SIZE
|
||||
@ -1023,7 +1010,7 @@ NOKPROBE(__sdei_asm_entry_trampoline)
|
||||
* x4: struct sdei_registered_event argument from registration time.
|
||||
*/
|
||||
SYM_CODE_START(__sdei_asm_exit_trampoline)
|
||||
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
|
||||
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
|
||||
cbnz x4, 1f
|
||||
|
||||
tramp_unmap_kernel tmp=x4
|
||||
|
@ -104,7 +104,7 @@ pe_header:
|
||||
*/
|
||||
SYM_CODE_START(primary_entry)
|
||||
bl preserve_boot_args
|
||||
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
|
||||
bl init_kernel_el // w0=cpu_boot_mode
|
||||
adrp x23, __PHYS_OFFSET
|
||||
and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
|
||||
bl set_cpu_boot_mode_flag
|
||||
@ -482,24 +482,33 @@ EXPORT_SYMBOL(kimage_vaddr)
|
||||
.section ".idmap.text","awx"
|
||||
|
||||
/*
|
||||
* If we're fortunate enough to boot at EL2, ensure that the world is
|
||||
* sane before dropping to EL1.
|
||||
* Starting from EL2 or EL1, configure the CPU to execute at the highest
|
||||
* reachable EL supported by the kernel in a chosen default state. If dropping
|
||||
* from EL2 to EL1, configure EL2 before configuring EL1.
|
||||
*
|
||||
* Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if
|
||||
* SCTLR_ELx.EOS is clear), we place an ISB prior to ERET.
|
||||
*
|
||||
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
|
||||
* booted in EL1 or EL2 respectively.
|
||||
*/
|
||||
SYM_FUNC_START(el2_setup)
|
||||
msr SPsel, #1 // We want to use SP_EL{1,2}
|
||||
SYM_FUNC_START(init_kernel_el)
|
||||
mrs x0, CurrentEL
|
||||
cmp x0, #CurrentEL_EL2
|
||||
b.eq 1f
|
||||
mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
|
||||
msr sctlr_el1, x0
|
||||
mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
|
||||
isb
|
||||
ret
|
||||
b.eq init_el2
|
||||
|
||||
1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
|
||||
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||
msr sctlr_el1, x0
|
||||
isb
|
||||
mov_q x0, INIT_PSTATE_EL1
|
||||
msr spsr_el1, x0
|
||||
msr elr_el1, lr
|
||||
mov w0, #BOOT_CPU_MODE_EL1
|
||||
eret
|
||||
|
||||
SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
|
||||
msr sctlr_el2, x0
|
||||
|
||||
#ifdef CONFIG_ARM64_VHE
|
||||
@ -608,9 +617,12 @@ set_hcr:
|
||||
|
||||
cbz x2, install_el2_stub
|
||||
|
||||
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
|
||||
isb
|
||||
ret
|
||||
mov_q x0, INIT_PSTATE_EL2
|
||||
msr spsr_el2, x0
|
||||
msr elr_el2, lr
|
||||
mov w0, #BOOT_CPU_MODE_EL2
|
||||
eret
|
||||
|
||||
SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
|
||||
/*
|
||||
@ -620,7 +632,7 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
|
||||
* requires no configuration, and all non-hyp-specific EL2 setup
|
||||
* will be done via the _EL1 system register aliases in __cpu_setup.
|
||||
*/
|
||||
mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||
msr sctlr_el1, x0
|
||||
|
||||
/* Coprocessor traps. */
|
||||
@ -642,14 +654,13 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
|
||||
7: adr_l x0, __hyp_stub_vectors
|
||||
msr vbar_el2, x0
|
||||
|
||||
/* spsr */
|
||||
mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
PSR_MODE_EL1h)
|
||||
isb
|
||||
mov x0, #INIT_PSTATE_EL1
|
||||
msr spsr_el2, x0
|
||||
msr elr_el2, lr
|
||||
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
|
||||
mov w0, #BOOT_CPU_MODE_EL2
|
||||
eret
|
||||
SYM_FUNC_END(el2_setup)
|
||||
SYM_FUNC_END(init_kernel_el)
|
||||
|
||||
/*
|
||||
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
|
||||
@ -699,7 +710,7 @@ SYM_DATA_END(__early_cpu_boot_status)
|
||||
* cores are held until we're ready for them to initialise.
|
||||
*/
|
||||
SYM_FUNC_START(secondary_holding_pen)
|
||||
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
|
||||
bl init_kernel_el // w0=cpu_boot_mode
|
||||
bl set_cpu_boot_mode_flag
|
||||
mrs x0, mpidr_el1
|
||||
mov_q x1, MPIDR_HWID_BITMASK
|
||||
@ -717,7 +728,7 @@ SYM_FUNC_END(secondary_holding_pen)
|
||||
* be used where CPUs are brought online dynamically by the kernel.
|
||||
*/
|
||||
SYM_FUNC_START(secondary_entry)
|
||||
bl el2_setup // Drop to EL1
|
||||
bl init_kernel_el // w0=cpu_boot_mode
|
||||
bl set_cpu_boot_mode_flag
|
||||
b secondary_startup
|
||||
SYM_FUNC_END(secondary_entry)
|
||||
|
@ -422,16 +422,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
p->thread.uw.tp_value = tls;
|
||||
} else {
|
||||
/*
|
||||
* A kthread has no context to ERET to, so ensure any buggy
|
||||
* ERET is treated as an illegal exception return.
|
||||
*
|
||||
* When a user task is created from a kthread, childregs will
|
||||
* be initialized by start_thread() or start_compat_thread().
|
||||
*/
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->pstate = PSR_MODE_EL1h;
|
||||
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
|
||||
cpus_have_const_cap(ARM64_HAS_UAO))
|
||||
childregs->pstate |= PSR_UAO_BIT;
|
||||
|
||||
spectre_v4_enable_task_mitigation(p);
|
||||
|
||||
if (system_uses_irq_prio_masking())
|
||||
childregs->pmr_save = GIC_PRIO_IRQON;
|
||||
childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
|
||||
|
||||
p->thread.cpu_context.x19 = stack_start;
|
||||
p->thread.cpu_context.x20 = stk_sz;
|
||||
@ -461,17 +460,6 @@ static void tls_thread_switch(struct task_struct *next)
|
||||
write_sysreg(*task_user_tls(next), tpidr_el0);
|
||||
}
|
||||
|
||||
/* Restore the UAO state depending on next's addr_limit */
|
||||
void uao_thread_switch(struct task_struct *next)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_UAO)) {
|
||||
if (task_thread_info(next)->addr_limit == KERNEL_DS)
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
|
||||
else
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Force SSBS state on context-switch, since it may be lost after migrating
|
||||
* from a CPU which treats the bit as RES0 in a heterogeneous system.
|
||||
@ -555,7 +543,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
|
||||
hw_breakpoint_thread_switch(next);
|
||||
contextidr_thread_switch(next);
|
||||
entry_task_switch(next);
|
||||
uao_thread_switch(next);
|
||||
ssbs_thread_switch(next);
|
||||
erratum_1418040_thread_switch(prev, next);
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include <asm/insn.h>
|
||||
#include <asm/spectre.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
@ -537,12 +538,12 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
|
||||
|
||||
if (spectre_v4_mitigations_off()) {
|
||||
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
|
||||
asm volatile(SET_PSTATE_SSBS(1));
|
||||
set_pstate_ssbs(1);
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
|
||||
/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
|
||||
asm volatile(SET_PSTATE_SSBS(0));
|
||||
set_pstate_ssbs(0);
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
|
||||
|
@ -178,12 +178,6 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
|
||||
sdei_api_event_context(i, ®s->regs[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* We didn't take an exception to get here, set PAN. UAO will be cleared
|
||||
* by sdei_event_handler()s force_uaccess_begin() call.
|
||||
*/
|
||||
__uaccess_enable_hw_pan();
|
||||
|
||||
err = sdei_event_handler(regs, arg);
|
||||
if (err)
|
||||
return SDEI_EV_FAILED;
|
||||
@ -222,12 +216,39 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
|
||||
return vbar + 0x480;
|
||||
}
|
||||
|
||||
static void __kprobes notrace __sdei_pstate_entry(void)
|
||||
{
|
||||
/*
|
||||
* The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
|
||||
* whether PSTATE bits are inherited unchanged or generated from
|
||||
* scratch, and the TF-A implementation always clears PAN and always
|
||||
* clears UAO. There are no other known implementations.
|
||||
*
|
||||
* Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
|
||||
* PSTATE is modified upon architectural exceptions, and so PAN is
|
||||
* either inherited or set per SCTLR_ELx.SPAN, and UAO is always
|
||||
* cleared.
|
||||
*
|
||||
* We must explicitly reset PAN to the expected state, including
|
||||
* clearing it when the host isn't using it, in case a VM had it set.
|
||||
*/
|
||||
if (system_uses_hw_pan())
|
||||
set_pstate_pan(1);
|
||||
else if (cpu_has_pan())
|
||||
set_pstate_pan(0);
|
||||
}
|
||||
|
||||
asmlinkage __kprobes notrace unsigned long
|
||||
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
/*
|
||||
* We didn't take an exception to get here, so the HW hasn't
|
||||
* set/cleared bits in PSTATE that we may rely on. Initialize PAN.
|
||||
*/
|
||||
__sdei_pstate_entry();
|
||||
|
||||
nmi_enter();
|
||||
|
||||
ret = _sdei_handler(regs, arg);
|
||||
|
@ -922,9 +922,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||||
trace_hardirqs_off();
|
||||
|
||||
do {
|
||||
/* Check valid user FS if needed */
|
||||
addr_limit_user_check();
|
||||
|
||||
if (thread_flags & _TIF_NEED_RESCHED) {
|
||||
/* Unmask Debug and SError for the next task */
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
|
@ -99,7 +99,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
|
||||
|
||||
.pushsection ".idmap.text", "awx"
|
||||
SYM_CODE_START(cpu_resume)
|
||||
bl el2_setup // if in EL2 drop to EL1 cleanly
|
||||
bl init_kernel_el
|
||||
bl __cpu_setup
|
||||
/* enable the MMU early - so we can access sleep_save_stash by va */
|
||||
adrp x1, swapper_pg_dir
|
||||
|
@ -58,7 +58,6 @@ void notrace __cpu_suspend_exit(void)
|
||||
* features that might not have been set correctly.
|
||||
*/
|
||||
__uaccess_enable_hw_pan();
|
||||
uao_thread_switch(current);
|
||||
|
||||
/*
|
||||
* Restore HW breakpoint registers to sane values
|
||||
|
@ -28,7 +28,7 @@ ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
|
||||
$(btildflags-y) -T
|
||||
|
||||
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
|
||||
ccflags-y += -DDISABLE_BRANCH_PROFILING
|
||||
ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
|
||||
|
||||
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS)
|
||||
KASAN_SANITIZE := n
|
||||
|
@ -48,7 +48,7 @@ cc32-as-instr = $(call try-run,\
|
||||
# As a result we set our own flags here.
|
||||
|
||||
# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile
|
||||
VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include)
|
||||
VDSO_CPPFLAGS := -DBUILD_VDSO -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include)
|
||||
VDSO_CPPFLAGS += $(LINUXINCLUDE)
|
||||
|
||||
# Common C and assembly flags
|
||||
|
@ -201,7 +201,7 @@ SECTIONS
|
||||
INIT_CALLS
|
||||
CON_INITCALL
|
||||
INIT_RAM_FS
|
||||
*(.init.rodata.* .init.bss) /* from the EFI stub */
|
||||
*(.init.altinstructions .init.rodata.* .init.bss) /* from the EFI stub */
|
||||
}
|
||||
.exit.data : {
|
||||
EXIT_DATA
|
||||
|
@ -24,20 +24,20 @@ SYM_FUNC_START(__arch_clear_user)
|
||||
subs x1, x1, #8
|
||||
b.mi 2f
|
||||
1:
|
||||
uao_user_alternative 9f, str, sttr, xzr, x0, 8
|
||||
user_ldst 9f, sttr, xzr, x0, 8
|
||||
subs x1, x1, #8
|
||||
b.pl 1b
|
||||
2: adds x1, x1, #4
|
||||
b.mi 3f
|
||||
uao_user_alternative 9f, str, sttr, wzr, x0, 4
|
||||
user_ldst 9f, sttr, wzr, x0, 4
|
||||
sub x1, x1, #4
|
||||
3: adds x1, x1, #2
|
||||
b.mi 4f
|
||||
uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
|
||||
user_ldst 9f, sttrh, wzr, x0, 2
|
||||
sub x1, x1, #2
|
||||
4: adds x1, x1, #1
|
||||
b.mi 5f
|
||||
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
|
||||
user_ldst 9f, sttrb, wzr, x0, 0
|
||||
5: mov x0, #0
|
||||
ret
|
||||
SYM_FUNC_END(__arch_clear_user)
|
||||
|
@ -21,7 +21,7 @@
|
||||
*/
|
||||
|
||||
.macro ldrb1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtrb, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro strb1 reg, ptr, val
|
||||
@ -29,7 +29,7 @@
|
||||
.endm
|
||||
|
||||
.macro ldrh1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtrh, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro strh1 reg, ptr, val
|
||||
@ -37,7 +37,7 @@
|
||||
.endm
|
||||
|
||||
.macro ldr1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtr, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro str1 reg, ptr, val
|
||||
@ -45,7 +45,7 @@
|
||||
.endm
|
||||
|
||||
.macro ldp1 reg1, reg2, ptr, val
|
||||
uao_ldp 9998f, \reg1, \reg2, \ptr, \val
|
||||
user_ldp 9998f, \reg1, \reg2, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro stp1 reg1, reg2, ptr, val
|
||||
|
@ -22,35 +22,35 @@
|
||||
* x0 - bytes not copied
|
||||
*/
|
||||
.macro ldrb1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtrb, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro strb1 reg, ptr, val
|
||||
uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttrb, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldrh1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtrh, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro strh1 reg, ptr, val
|
||||
uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttrh, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldr1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtr, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro str1 reg, ptr, val
|
||||
uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttr, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldp1 reg1, reg2, ptr, val
|
||||
uao_ldp 9998f, \reg1, \reg2, \ptr, \val
|
||||
user_ldp 9998f, \reg1, \reg2, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro stp1 reg1, reg2, ptr, val
|
||||
uao_stp 9998f, \reg1, \reg2, \ptr, \val
|
||||
user_stp 9998f, \reg1, \reg2, \ptr, \val
|
||||
.endm
|
||||
|
||||
end .req x5
|
||||
|
@ -24,7 +24,7 @@
|
||||
.endm
|
||||
|
||||
.macro strb1 reg, ptr, val
|
||||
uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttrb, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldrh1 reg, ptr, val
|
||||
@ -32,7 +32,7 @@
|
||||
.endm
|
||||
|
||||
.macro strh1 reg, ptr, val
|
||||
uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttrh, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldr1 reg, ptr, val
|
||||
@ -40,7 +40,7 @@
|
||||
.endm
|
||||
|
||||
.macro str1 reg, ptr, val
|
||||
uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttr, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldp1 reg1, reg2, ptr, val
|
||||
@ -48,7 +48,7 @@
|
||||
.endm
|
||||
|
||||
.macro stp1 reg1, reg2, ptr, val
|
||||
uao_stp 9998f, \reg1, \reg2, \ptr, \val
|
||||
user_stp 9998f, \reg1, \reg2, \ptr, \val
|
||||
.endm
|
||||
|
||||
end .req x5
|
||||
|
@ -4,7 +4,7 @@
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/asm-uaccess.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/mte.h>
|
||||
#include <asm/page.h>
|
||||
@ -67,7 +67,7 @@ SYM_FUNC_START(mte_copy_tags_from_user)
|
||||
mov x3, x1
|
||||
cbz x2, 2f
|
||||
1:
|
||||
uao_user_alternative 2f, ldrb, ldtrb, w4, x1, 0
|
||||
user_ldst 2f, ldtrb, w4, x1, 0
|
||||
lsl x4, x4, #MTE_TAG_SHIFT
|
||||
stg x4, [x0], #MTE_GRANULE_SIZE
|
||||
add x1, x1, #1
|
||||
@ -94,7 +94,7 @@ SYM_FUNC_START(mte_copy_tags_to_user)
|
||||
1:
|
||||
ldg x4, [x1]
|
||||
ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE
|
||||
uao_user_alternative 2f, strb, sttrb, w4, x0, 0
|
||||
user_ldst 2f, sttrb, w4, x0, 0
|
||||
add x0, x0, #1
|
||||
add x1, x1, #MTE_GRANULE_SIZE
|
||||
subs x2, x2, #1
|
||||
|
@ -30,9 +30,7 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from,
|
||||
{
|
||||
unsigned long rc;
|
||||
|
||||
uaccess_enable_not_uao();
|
||||
rc = __arch_copy_from_user(to, from, n);
|
||||
uaccess_disable_not_uao();
|
||||
rc = raw_copy_from_user(to, from, n);
|
||||
|
||||
/* See above */
|
||||
__clean_dcache_area_pop(to, n - rc);
|
||||
|
@ -479,11 +479,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||
}
|
||||
|
||||
if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
|
||||
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
|
||||
if (regs->orig_addr_limit == KERNEL_DS)
|
||||
die_kernel_fault("access to user memory with fs=KERNEL_DS",
|
||||
addr, esr, regs);
|
||||
|
||||
if (is_el1_instruction_abort(esr))
|
||||
die_kernel_fault("execution of user memory",
|
||||
addr, esr, regs);
|
||||
|
@ -489,6 +489,6 @@ SYM_FUNC_START(__cpu_setup)
|
||||
/*
|
||||
* Prepare SCTLR
|
||||
*/
|
||||
mov_q x0, SCTLR_EL1_SET
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_ON
|
||||
ret // return to head.S
|
||||
SYM_FUNC_END(__cpu_setup)
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/*
|
||||
* The call to use to reach the firmware.
|
||||
@ -1092,26 +1091,13 @@ int sdei_event_handler(struct pt_regs *regs,
|
||||
struct sdei_registered_event *arg)
|
||||
{
|
||||
int err;
|
||||
mm_segment_t orig_addr_limit;
|
||||
u32 event_num = arg->event_num;
|
||||
|
||||
/*
|
||||
* Save restore 'fs'.
|
||||
* The architecture's entry code save/restores 'fs' when taking an
|
||||
* exception from the kernel. This ensures addr_limit isn't inherited
|
||||
* if you interrupted something that allowed the uaccess routines to
|
||||
* access kernel memory.
|
||||
* Do the same here because this doesn't come via the same entry code.
|
||||
*/
|
||||
orig_addr_limit = force_uaccess_begin();
|
||||
|
||||
err = arg->callback(event_num, regs, arg->callback_arg);
|
||||
if (err)
|
||||
pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
|
||||
event_num, smp_processor_id(), err);
|
||||
|
||||
force_uaccess_end(orig_addr_limit);
|
||||
|
||||
return err;
|
||||
}
|
||||
NOKPROBE_SYMBOL(sdei_event_handler);
|
||||
|
Loading…
x
Reference in New Issue
Block a user