From 2a2848e7c2fde1c26ff46998ac10f7bf9ca2de04 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 8 Apr 2022 09:40:09 +0530 Subject: [PATCH 001/145] arm64/mm: Compute PTRS_PER_[PMD|PUD] independently of PTRS_PER_PTE Possible page table entries (or pointers) on non-zero page table levels are dependent on a single page size i.e PAGE_SIZE and size required for each individual page table entry i.e 8 bytes. PTRS_PER_[PMD|PUD] as such are not related to PTRS_PER_PTE in any manner, as being implied currently. So lets just make this very explicit and compute these macros independently. Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220408041009.1259701-1-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable-hwdef.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 66671ff05183..dd3d12bce07b 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -49,7 +49,7 @@ #define PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PTRS_PER_PMD PTRS_PER_PTE +#define PTRS_PER_PMD (1 << (PAGE_SHIFT - 3)) #endif /* @@ -59,7 +59,7 @@ #define PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) -#define PTRS_PER_PUD PTRS_PER_PTE +#define PTRS_PER_PUD (1 << (PAGE_SHIFT - 3)) #endif /* From 4f6277e8ac397e5932d9af5c6b3dadefe81d53dc Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 13 Apr 2022 15:59:05 +0100 Subject: [PATCH 002/145] arm64: stacktrace: remove NULL task check from unwind_frame() Currently, there is a check for a NULL task in unwind_frame(). It is not needed since all current callers pass a non-NULL task. There should be no functional change as a result of this patch. Signed-off-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh for the series. Link: https://lore.kernel.org/r/20220413145910.3060139-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index e4103e085681..94932ade5c79 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -70,9 +70,6 @@ static int notrace unwind_frame(struct task_struct *tsk, unsigned long fp = frame->fp; struct stack_info info; - if (!tsk) - tsk = current; - /* Final frame; nothing to unwind */ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; From cb86a41b35c8f5da93ee7370c11634b57509d22b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 13 Apr 2022 15:59:06 +0100 Subject: [PATCH 003/145] arm64: stacktrace: delete PCS comment The comment at the top of stacktrace.c isn't all that helpful, as it's not associated with the code which inspects the frame record, and the code example isn't representative of common code generation today. Delete it. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Madhavan T. Venkataraman Cc: Mark Brown Cc: Will Deacon Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Reviewed-by: Kalesh Singh for the series. Link: https://lore.kernel.org/r/20220413145910.3060139-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 94932ade5c79..08af9ca9a845 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -18,21 +18,6 @@ #include #include -/* - * AArch64 PCS assigns the frame pointer to x29. - * - * A simple function prologue looks like this: - * sub sp, sp, #0x10 - * stp x29, x30, [sp] - * mov x29, sp - * - * A simple function epilogue looks like this: - * mov sp, x29 - * ldp x29, x30, [sp] - * add sp, sp, #0x10 - */ - - static notrace void start_backtrace(struct stackframe *frame, unsigned long fp, unsigned long pc) { From 96bb1530c4f9039996bf95d28dcc957861558696 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 13 Apr 2022 15:59:07 +0100 Subject: [PATCH 004/145] arm64: stacktrace: make struct stackframe private to stacktrace.c Now that arm64 uses arch_stack_walk() consistently, struct stackframe is only used within stacktrace.c. To make it easier to read and maintain this code, it would be nicer if the definition were there too. Move the definition into stacktrace.c. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Madhavan T. Venkataraman Cc: Mark Brown Cc: Will Deacon Reviewed-by: Madhavan T. Venkataraman Reviwed-by: Mark Brown Reviewed-by: Kalesh Singh for the series. Link: https://lore.kernel.org/r/20220413145910.3060139-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace.h | 32 ----------------------------- arch/arm64/kernel/stacktrace.c | 32 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index e77cdef9ca29..aec9315bf156 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -31,38 +31,6 @@ struct stack_info { enum stack_type type; }; -/* - * A snapshot of a frame record or fp/lr register values, along with some - * accounting information necessary for robust unwinding. - * - * @fp: The fp value in the frame record (or the real fp) - * @pc: The lr value in the frame record (or the real lr) - * - * @stacks_done: Stacks which have been entirely unwound, for which it is no - * longer valid to unwind to. - * - * @prev_fp: The fp that pointed to this frame record, or a synthetic value - * of 0. This is used to ensure that within a stack, each - * subsequent frame record is at an increasing address. - * @prev_type: The type of stack this frame record was on, or a synthetic - * value of STACK_TYPE_UNKNOWN. This is used to detect a - * transition from one stack to another. - * - * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance - * associated with the most recently encountered replacement lr - * value. - */ -struct stackframe { - unsigned long fp; - unsigned long pc; - DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); - unsigned long prev_fp; - enum stack_type prev_type; -#ifdef CONFIG_KRETPROBES - struct llist_node *kr_cur; -#endif -}; - extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 08af9ca9a845..073d0941a5b6 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -18,6 +18,38 @@ #include #include +/* + * A snapshot of a frame record or fp/lr register values, along with some + * accounting information necessary for robust unwinding. + * + * @fp: The fp value in the frame record (or the real fp) + * @pc: The lr value in the frame record (or the real lr) + * + * @stacks_done: Stacks which have been entirely unwound, for which it is no + * longer valid to unwind to. + * + * @prev_fp: The fp that pointed to this frame record, or a synthetic value + * of 0. This is used to ensure that within a stack, each + * subsequent frame record is at an increasing address. + * @prev_type: The type of stack this frame record was on, or a synthetic + * value of STACK_TYPE_UNKNOWN. This is used to detect a + * transition from one stack to another. + * + * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance + * associated with the most recently encountered replacement lr + * value. + */ +struct stackframe { + unsigned long fp; + unsigned long pc; + DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); + unsigned long prev_fp; + enum stack_type prev_type; +#ifdef CONFIG_KRETPROBES + struct llist_node *kr_cur; +#endif +}; + static notrace void start_backtrace(struct stackframe *frame, unsigned long fp, unsigned long pc) { From c797bd45480b41d1d988b0a74d8d5a7c615bd674 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 13 Apr 2022 15:59:08 +0100 Subject: [PATCH 005/145] arm64: stacktrace: rename unwinder functions Rename unwinder functions for consistency and better naming. - Rename start_backtrace() to unwind_init(). - Rename unwind_frame() to unwind_next(). - Rename walk_stackframe() to unwind(). There should be no functional change as a result of this patch. Signed-off-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh for the series. Link: https://lore.kernel.org/r/20220413145910.3060139-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 073d0941a5b6..d65fde99b74a 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -50,8 +50,8 @@ struct stackframe { #endif }; -static notrace void start_backtrace(struct stackframe *frame, unsigned long fp, - unsigned long pc) +static notrace void unwind_init(struct stackframe *frame, unsigned long fp, + unsigned long pc) { frame->fp = fp; frame->pc = pc; @@ -62,7 +62,7 @@ static notrace void start_backtrace(struct stackframe *frame, unsigned long fp, /* * Prime the first unwind. * - * In unwind_frame() we'll check that the FP points to a valid stack, + * In unwind_next() we'll check that the FP points to a valid stack, * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be * treated as a transition to whichever stack that happens to be. The * prev_fp value won't be used, but we set it to 0 such that it is @@ -72,7 +72,7 @@ static notrace void start_backtrace(struct stackframe *frame, unsigned long fp, frame->prev_fp = 0; frame->prev_type = STACK_TYPE_UNKNOWN; } -NOKPROBE_SYMBOL(start_backtrace); +NOKPROBE_SYMBOL(unwind_init); /* * Unwind from one frame record (A) to the next frame record (B). @@ -81,8 +81,8 @@ NOKPROBE_SYMBOL(start_backtrace); * records (e.g. a cycle), determined based on the location and fp value of A * and the location (but not the fp value) of B. */ -static int notrace unwind_frame(struct task_struct *tsk, - struct stackframe *frame) +static int notrace unwind_next(struct task_struct *tsk, + struct stackframe *frame) { unsigned long fp = frame->fp; struct stack_info info; @@ -122,7 +122,7 @@ static int notrace unwind_frame(struct task_struct *tsk, /* * Record this frame record's values and location. The prev_fp and - * prev_type are only meaningful to the next unwind_frame() invocation. + * prev_type are only meaningful to the next unwind_next() invocation. */ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); @@ -155,23 +155,23 @@ static int notrace unwind_frame(struct task_struct *tsk, return 0; } -NOKPROBE_SYMBOL(unwind_frame); +NOKPROBE_SYMBOL(unwind_next); -static void notrace walk_stackframe(struct task_struct *tsk, - struct stackframe *frame, - bool (*fn)(void *, unsigned long), void *data) +static void notrace unwind(struct task_struct *tsk, + struct stackframe *frame, + bool (*fn)(void *, unsigned long), void *data) { while (1) { int ret; if (!fn(data, frame->pc)) break; - ret = unwind_frame(tsk, frame); + ret = unwind_next(tsk, frame); if (ret < 0) break; } } -NOKPROBE_SYMBOL(walk_stackframe); +NOKPROBE_SYMBOL(unwind); static bool dump_backtrace_entry(void *arg, unsigned long where) { @@ -213,14 +213,14 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, struct stackframe frame; if (regs) - start_backtrace(&frame, regs->regs[29], regs->pc); + unwind_init(&frame, regs->regs[29], regs->pc); else if (task == current) - start_backtrace(&frame, + unwind_init(&frame, (unsigned long)__builtin_frame_address(1), (unsigned long)__builtin_return_address(0)); else - start_backtrace(&frame, thread_saved_fp(task), + unwind_init(&frame, thread_saved_fp(task), thread_saved_pc(task)); - walk_stackframe(task, &frame, consume_entry, cookie); + unwind(task, &frame, consume_entry, cookie); } From e9d75a0ba87851187fe52493f1527229a7e101b3 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 13 Apr 2022 15:59:09 +0100 Subject: [PATCH 006/145] arm64: stacktrace: rename stackframe to unwind_state Rename "struct stackframe" to "struct unwind_state" for consistency and better naming. Accordingly, rename variable/argument "frame" to "state". There should be no functional change as a result of this patch. Signed-off-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh for the series. Link: https://lore.kernel.org/r/20220413145910.3060139-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 68 +++++++++++++++++----------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index d65fde99b74a..d5a195748aff 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -39,7 +39,7 @@ * associated with the most recently encountered replacement lr * value. */ -struct stackframe { +struct unwind_state { unsigned long fp; unsigned long pc; DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); @@ -50,13 +50,13 @@ struct stackframe { #endif }; -static notrace void unwind_init(struct stackframe *frame, unsigned long fp, +static notrace void unwind_init(struct unwind_state *state, unsigned long fp, unsigned long pc) { - frame->fp = fp; - frame->pc = pc; + state->fp = fp; + state->pc = pc; #ifdef CONFIG_KRETPROBES - frame->kr_cur = NULL; + state->kr_cur = NULL; #endif /* @@ -68,9 +68,9 @@ static notrace void unwind_init(struct stackframe *frame, unsigned long fp, * prev_fp value won't be used, but we set it to 0 such that it is * definitely not an accessible stack address. */ - bitmap_zero(frame->stacks_done, __NR_STACK_TYPES); - frame->prev_fp = 0; - frame->prev_type = STACK_TYPE_UNKNOWN; + bitmap_zero(state->stacks_done, __NR_STACK_TYPES); + state->prev_fp = 0; + state->prev_type = STACK_TYPE_UNKNOWN; } NOKPROBE_SYMBOL(unwind_init); @@ -82,9 +82,9 @@ NOKPROBE_SYMBOL(unwind_init); * and the location (but not the fp value) of B. */ static int notrace unwind_next(struct task_struct *tsk, - struct stackframe *frame) + struct unwind_state *state) { - unsigned long fp = frame->fp; + unsigned long fp = state->fp; struct stack_info info; /* Final frame; nothing to unwind */ @@ -97,7 +97,7 @@ static int notrace unwind_next(struct task_struct *tsk, if (!on_accessible_stack(tsk, fp, 16, &info)) return -EINVAL; - if (test_bit(info.type, frame->stacks_done)) + if (test_bit(info.type, state->stacks_done)) return -EINVAL; /* @@ -113,27 +113,27 @@ static int notrace unwind_next(struct task_struct *tsk, * stack to another, it's never valid to unwind back to that first * stack. */ - if (info.type == frame->prev_type) { - if (fp <= frame->prev_fp) + if (info.type == state->prev_type) { + if (fp <= state->prev_fp) return -EINVAL; } else { - set_bit(frame->prev_type, frame->stacks_done); + set_bit(state->prev_type, state->stacks_done); } /* * Record this frame record's values and location. The prev_fp and * prev_type are only meaningful to the next unwind_next() invocation. */ - frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); - frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); - frame->prev_fp = fp; - frame->prev_type = info.type; + state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); + state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); + state->prev_fp = fp; + state->prev_type = info.type; - frame->pc = ptrauth_strip_insn_pac(frame->pc); + state->pc = ptrauth_strip_insn_pac(state->pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && - (frame->pc == (unsigned long)return_to_handler)) { + (state->pc == (unsigned long)return_to_handler)) { unsigned long orig_pc; /* * This is a case where function graph tracer has @@ -141,16 +141,16 @@ static int notrace unwind_next(struct task_struct *tsk, * to hook a function return. * So replace it to an original value. */ - orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc, - (void *)frame->fp); - if (WARN_ON_ONCE(frame->pc == orig_pc)) + orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, + (void *)state->fp); + if (WARN_ON_ONCE(state->pc == orig_pc)) return -EINVAL; - frame->pc = orig_pc; + state->pc = orig_pc; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_KRETPROBES - if (is_kretprobe_trampoline(frame->pc)) - frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur); + if (is_kretprobe_trampoline(state->pc)) + state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); #endif return 0; @@ -158,15 +158,15 @@ static int notrace unwind_next(struct task_struct *tsk, NOKPROBE_SYMBOL(unwind_next); static void notrace unwind(struct task_struct *tsk, - struct stackframe *frame, + struct unwind_state *state, bool (*fn)(void *, unsigned long), void *data) { while (1) { int ret; - if (!fn(data, frame->pc)) + if (!fn(data, state->pc)) break; - ret = unwind_next(tsk, frame); + ret = unwind_next(tsk, state); if (ret < 0) break; } @@ -210,17 +210,17 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { - struct stackframe frame; + struct unwind_state state; if (regs) - unwind_init(&frame, regs->regs[29], regs->pc); + unwind_init(&state, regs->regs[29], regs->pc); else if (task == current) - unwind_init(&frame, + unwind_init(&state, (unsigned long)__builtin_frame_address(1), (unsigned long)__builtin_return_address(0)); else - unwind_init(&frame, thread_saved_fp(task), + unwind_init(&state, thread_saved_fp(task), thread_saved_pc(task)); - unwind(task, &frame, consume_entry, cookie); + unwind(task, &state, consume_entry, cookie); } From bd5552bc4807a87a6597c629204712c7df7284f4 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 13 Apr 2022 15:59:10 +0100 Subject: [PATCH 007/145] arm64: stacktrace: align with common naming For historical reasons, the naming of parameters and their types in the arm64 stacktrace code differs from that used in generic code and other architectures, even though the types are equivalent. For consistency and clarity, use the generic names. There should be no functional change as a result of this patch. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Mark Rutland Reviewed-by: Mark Brown Reviewed-by: Kalesh Singh for the series. Link: https://lore.kernel.org/r/20220413145910.3060139-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index d5a195748aff..0467cb79f080 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -159,12 +159,12 @@ NOKPROBE_SYMBOL(unwind_next); static void notrace unwind(struct task_struct *tsk, struct unwind_state *state, - bool (*fn)(void *, unsigned long), void *data) + stack_trace_consume_fn consume_entry, void *cookie) { while (1) { int ret; - if (!fn(data, state->pc)) + if (!consume_entry(cookie, state->pc)) break; ret = unwind_next(tsk, state); if (ret < 0) From 96d32e630935c1636b0236c88779e81eff120e0a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:12 +0100 Subject: [PATCH 008/145] arm64/sme: Provide ABI documentation for SME Provide ABI documentation for SME similar to that for SVE. Due to the very large overlap around streaming SVE mode in both implementation and interfaces documentation for streaming mode SVE is added to the SVE document rather than the SME one. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Reviewed-by: Szabolcs Nagy Link: https://lore.kernel.org/r/20220419112247.711548-5-broonie@kernel.org Signed-off-by: Catalin Marinas --- Documentation/arm64/index.rst | 1 + Documentation/arm64/sme.rst | 428 ++++++++++++++++++++++++++++++++++ Documentation/arm64/sve.rst | 70 +++++- 3 files changed, 489 insertions(+), 10 deletions(-) create mode 100644 Documentation/arm64/sme.rst diff --git a/Documentation/arm64/index.rst b/Documentation/arm64/index.rst index 4f840bac083e..ae21f8118830 100644 --- a/Documentation/arm64/index.rst +++ b/Documentation/arm64/index.rst @@ -21,6 +21,7 @@ ARM64 Architecture perf pointer-authentication silicon-errata + sme sve tagged-address-abi tagged-pointers diff --git a/Documentation/arm64/sme.rst b/Documentation/arm64/sme.rst new file mode 100644 index 000000000000..8ba677b87e90 --- /dev/null +++ b/Documentation/arm64/sme.rst @@ -0,0 +1,428 @@ +=================================================== +Scalable Matrix Extension support for AArch64 Linux +=================================================== + +This document outlines briefly the interface provided to userspace by Linux in +order to support use of the ARM Scalable Matrix Extension (SME). + +This is an outline of the most important features and issues only and not +intended to be exhaustive. It should be read in conjunction with the SVE +documentation in sve.rst which provides details on the Streaming SVE mode +included in SME. + +This document does not aim to describe the SME architecture or programmer's +model. To aid understanding, a minimal description of relevant programmer's +model features for SME is included in Appendix A. + + +1. General +----------- + +* PSTATE.SM, PSTATE.ZA, the streaming mode vector length, the ZA + register state and TPIDR2_EL0 are tracked per thread. + +* The presence of SME is reported to userspace via HWCAP2_SME in the aux vector + AT_HWCAP2 entry. Presence of this flag implies the presence of the SME + instructions and registers, and the Linux-specific system interfaces + described in this document. SME is reported in /proc/cpuinfo as "sme". + +* Support for the execution of SME instructions in userspace can also be + detected by reading the CPU ID register ID_AA64PFR1_EL1 using an MRS + instruction, and checking that the value of the SME field is nonzero. [3] + + It does not guarantee the presence of the system interfaces described in the + following sections: software that needs to verify that those interfaces are + present must check for HWCAP2_SME instead. + +* There are a number of optional SME features, presence of these is reported + through AT_HWCAP2 through: + + HWCAP2_SME_I16I64 + HWCAP2_SME_F64F64 + HWCAP2_SME_I8I32 + HWCAP2_SME_F16F32 + HWCAP2_SME_B16F32 + HWCAP2_SME_F32F32 + HWCAP2_SME_FA64 + + This list may be extended over time as the SME architecture evolves. + + These extensions are also reported via the CPU ID register ID_AA64SMFR0_EL1, + which userspace can read using an MRS instruction. See elf_hwcaps.txt and + cpu-feature-registers.txt for details. + +* Debuggers should restrict themselves to interacting with the target via the + NT_ARM_SVE, NT_ARM_SSVE and NT_ARM_ZA regsets. The recommended way + of detecting support for these regsets is to connect to a target process + first and then attempt a + + ptrace(PTRACE_GETREGSET, pid, NT_ARM_, &iov). + +* Whenever ZA register values are exchanged in memory between userspace and + the kernel, the register value is encoded in memory as a series of horizontal + vectors from 0 to VL/8-1 stored in the same endianness invariant format as is + used for SVE vectors. + +* On thread creation TPIDR2_EL0 is preserved unless CLONE_SETTLS is specified, + in which case it is set to 0. + +2. Vector lengths +------------------ + +SME defines a second vector length similar to the SVE vector length which is +controls the size of the streaming mode SVE vectors and the ZA matrix array. +The ZA matrix is square with each side having as many bytes as a streaming +mode SVE vector. + + +3. Sharing of streaming and non-streaming mode SVE state +--------------------------------------------------------- + +It is implementation defined which if any parts of the SVE state are shared +between streaming and non-streaming modes. When switching between modes +via software interfaces such as ptrace if no register content is provided as +part of switching no state will be assumed to be shared and everything will +be zeroed. + + +4. System call behaviour +------------------------- + +* On syscall PSTATE.ZA is preserved, if PSTATE.ZA==1 then the contents of the + ZA matrix are preserved. + +* On syscall PSTATE.SM will be cleared and the SVE registers will be handled + as per the standard SVE ABI. + +* Neither the SVE registers nor ZA are used to pass arguments to or receive + results from any syscall. + +* On process creation (eg, clone()) the newly created process will have + PSTATE.SM cleared. + +* All other SME state of a thread, including the currently configured vector + length, the state of the PR_SME_VL_INHERIT flag, and the deferred vector + length (if any), is preserved across all syscalls, subject to the specific + exceptions for execve() described in section 6. + + +5. Signal handling +------------------- + +* Signal handlers are invoked with streaming mode and ZA disabled. + +* A new signal frame record za_context encodes the ZA register contents on + signal delivery. [1] + +* The signal frame record for ZA always contains basic metadata, in particular + the thread's vector length (in za_context.vl). + +* The ZA matrix may or may not be included in the record, depending on + the value of PSTATE.ZA. The registers are present if and only if: + za_context.head.size >= ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl)) + in which case PSTATE.ZA == 1. + +* If matrix data is present, the remainder of the record has a vl-dependent + size and layout. Macros ZA_SIG_* are defined [1] to facilitate access to + them. + +* The matrix is stored as a series of horizontal vectors in the same format as + is used for SVE vectors. + +* If the ZA context is too big to fit in sigcontext.__reserved[], then extra + space is allocated on the stack, an extra_context record is written in + __reserved[] referencing this space. za_context is then written in the + extra space. Refer to [1] for further details about this mechanism. + + +5. Signal return +----------------- + +When returning from a signal handler: + +* If there is no za_context record in the signal frame, or if the record is + present but contains no register data as described in the previous section, + then ZA is disabled. + +* If za_context is present in the signal frame and contains matrix data then + PSTATE.ZA is set to 1 and ZA is populated with the specified data. + +* The vector length cannot be changed via signal return. If za_context.vl in + the signal frame does not match the current vector length, the signal return + attempt is treated as illegal, resulting in a forced SIGSEGV. + + +6. prctl extensions +-------------------- + +Some new prctl() calls are added to allow programs to manage the SME vector +length: + +prctl(PR_SME_SET_VL, unsigned long arg) + + Sets the vector length of the calling thread and related flags, where + arg == vl | flags. Other threads of the calling process are unaffected. + + vl is the desired vector length, where sve_vl_valid(vl) must be true. + + flags: + + PR_SME_VL_INHERIT + + Inherit the current vector length across execve(). Otherwise, the + vector length is reset to the system default at execve(). (See + Section 9.) + + PR_SME_SET_VL_ONEXEC + + Defer the requested vector length change until the next execve() + performed by this thread. + + The effect is equivalent to implicit execution of the following + call immediately after the next execve() (if any) by the thread: + + prctl(PR_SME_SET_VL, arg & ~PR_SME_SET_VL_ONEXEC) + + This allows launching of a new program with a different vector + length, while avoiding runtime side effects in the caller. + + Without PR_SME_SET_VL_ONEXEC, the requested change takes effect + immediately. + + + Return value: a nonnegative on success, or a negative value on error: + EINVAL: SME not supported, invalid vector length requested, or + invalid flags. + + + On success: + + * Either the calling thread's vector length or the deferred vector length + to be applied at the next execve() by the thread (dependent on whether + PR_SME_SET_VL_ONEXEC is present in arg), is set to the largest value + supported by the system that is less than or equal to vl. If vl == + SVE_VL_MAX, the value set will be the largest value supported by the + system. + + * Any previously outstanding deferred vector length change in the calling + thread is cancelled. + + * The returned value describes the resulting configuration, encoded as for + PR_SME_GET_VL. The vector length reported in this value is the new + current vector length for this thread if PR_SME_SET_VL_ONEXEC was not + present in arg; otherwise, the reported vector length is the deferred + vector length that will be applied at the next execve() by the calling + thread. + + * Changing the vector length causes all of ZA, P0..P15, FFR and all bits of + Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become + unspecified, including both streaming and non-streaming SVE state. + Calling PR_SME_SET_VL with vl equal to the thread's current vector + length, or calling PR_SME_SET_VL with the PR_SVE_SET_VL_ONEXEC flag, + does not constitute a change to the vector length for this purpose. + + * Changing the vector length causes PSTATE.ZA and PSTATE.SM to be cleared. + Calling PR_SME_SET_VL with vl equal to the thread's current vector + length, or calling PR_SME_SET_VL with the PR_SVE_SET_VL_ONEXEC flag, + does not constitute a change to the vector length for this purpose. + + +prctl(PR_SME_GET_VL) + + Gets the vector length of the calling thread. + + The following flag may be OR-ed into the result: + + PR_SME_VL_INHERIT + + Vector length will be inherited across execve(). + + There is no way to determine whether there is an outstanding deferred + vector length change (which would only normally be the case between a + fork() or vfork() and the corresponding execve() in typical use). + + To extract the vector length from the result, bitwise and it with + PR_SME_VL_LEN_MASK. + + Return value: a nonnegative value on success, or a negative value on error: + EINVAL: SME not supported. + + +7. ptrace extensions +--------------------- + +* A new regset NT_ARM_SSVE is defined for access to streaming mode SVE + state via PTRACE_GETREGSET and PTRACE_SETREGSET, this is documented in + sve.rst. + +* A new regset NT_ARM_ZA is defined for ZA state for access to ZA state via + PTRACE_GETREGSET and PTRACE_SETREGSET. + + Refer to [2] for definitions. + +The regset data starts with struct user_za_header, containing: + + size + + Size of the complete regset, in bytes. + This depends on vl and possibly on other things in the future. + + If a call to PTRACE_GETREGSET requests less data than the value of + size, the caller can allocate a larger buffer and retry in order to + read the complete regset. + + max_size + + Maximum size in bytes that the regset can grow to for the target + thread. The regset won't grow bigger than this even if the target + thread changes its vector length etc. + + vl + + Target thread's current streaming vector length, in bytes. + + max_vl + + Maximum possible streaming vector length for the target thread. + + flags + + Zero or more of the following flags, which have the same + meaning and behaviour as the corresponding PR_SET_VL_* flags: + + SME_PT_VL_INHERIT + + SME_PT_VL_ONEXEC (SETREGSET only). + +* The effects of changing the vector length and/or flags are equivalent to + those documented for PR_SME_SET_VL. + + The caller must make a further GETREGSET call if it needs to know what VL is + actually set by SETREGSET, unless is it known in advance that the requested + VL is supported. + +* The size and layout of the payload depends on the header fields. The + SME_PT_ZA_*() macros are provided to facilitate access to the data. + +* In either case, for SETREGSET it is permissible to omit the payload, in which + case the vector length and flags are changed and PSTATE.ZA is set to 0 + (along with any consequences of those changes). If a payload is provided + then PSTATE.ZA will be set to 1. + +* For SETREGSET, if the requested VL is not supported, the effect will be the + same as if the payload were omitted, except that an EIO error is reported. + No attempt is made to translate the payload data to the correct layout + for the vector length actually set. It is up to the caller to translate the + payload layout for the actual VL and retry. + +* The effect of writing a partial, incomplete payload is unspecified. + + +8. ELF coredump extensions +--------------------------- + +* NT_ARM_SSVE notes will be added to each coredump for + each thread of the dumped process. The contents will be equivalent to the + data that would have been read if a PTRACE_GETREGSET of the corresponding + type were executed for each thread when the coredump was generated. + +* A NT_ARM_ZA note will be added to each coredump for each thread of the + dumped process. The contents will be equivalent to the data that would have + been read if a PTRACE_GETREGSET of NT_ARM_ZA were executed for each thread + when the coredump was generated. + + +9. System runtime configuration +-------------------------------- + +* To mitigate the ABI impact of expansion of the signal frame, a policy + mechanism is provided for administrators, distro maintainers and developers + to set the default vector length for userspace processes: + +/proc/sys/abi/sme_default_vector_length + + Writing the text representation of an integer to this file sets the system + default vector length to the specified value, unless the value is greater + than the maximum vector length supported by the system in which case the + default vector length is set to that maximum. + + The result can be determined by reopening the file and reading its + contents. + + At boot, the default vector length is initially set to 32 or the maximum + supported vector length, whichever is smaller and supported. This + determines the initial vector length of the init process (PID 1). + + Reading this file returns the current system default vector length. + +* At every execve() call, the new vector length of the new process is set to + the system default vector length, unless + + * PR_SME_VL_INHERIT (or equivalently SME_PT_VL_INHERIT) is set for the + calling thread, or + + * a deferred vector length change is pending, established via the + PR_SME_SET_VL_ONEXEC flag (or SME_PT_VL_ONEXEC). + +* Modifying the system default vector length does not affect the vector length + of any existing process or thread that does not make an execve() call. + + +Appendix A. SME programmer's model (informative) +================================================= + +This section provides a minimal description of the additions made by SVE to the +ARMv8-A programmer's model that are relevant to this document. + +Note: This section is for information only and not intended to be complete or +to replace any architectural specification. + +A.1. Registers +--------------- + +In A64 state, SME adds the following: + +* A new mode, streaming mode, in which a subset of the normal FPSIMD and SVE + features are available. When supported EL0 software may enter and leave + streaming mode at any time. + + For best system performance it is strongly encouraged for software to enable + streaming mode only when it is actively being used. + +* A new vector length controlling the size of ZA and the Z registers when in + streaming mode, separately to the vector length used for SVE when not in + streaming mode. There is no requirement that either the currently selected + vector length or the set of vector lengths supported for the two modes in + a given system have any relationship. The streaming mode vector length + is referred to as SVL. + +* A new ZA matrix register. This is a square matrix of SVLxSVL bits. Most + operations on ZA require that streaming mode be enabled but ZA can be + enabled without streaming mode in order to load, save and retain data. + + For best system performance it is strongly encouraged for software to enable + ZA only when it is actively being used. + +* Two new 1 bit fields in PSTATE which may be controlled via the SMSTART and + SMSTOP instructions or by access to the SVCR system register: + + * PSTATE.ZA, if this is 1 then the ZA matrix is accessible and has valid + data while if it is 0 then ZA can not be accessed. When PSTATE.ZA is + changed from 0 to 1 all bits in ZA are cleared. + + * PSTATE.SM, if this is 1 then the PE is in streaming mode. When the value + of PSTATE.SM is changed then it is implementation defined if the subset + of the floating point register bits valid in both modes may be retained. + Any other bits will be cleared. + + +References +========== + +[1] arch/arm64/include/uapi/asm/sigcontext.h + AArch64 Linux signal ABI definitions + +[2] arch/arm64/include/uapi/asm/ptrace.h + AArch64 Linux ptrace ABI definitions + +[3] Documentation/arm64/cpu-feature-registers.rst diff --git a/Documentation/arm64/sve.rst b/Documentation/arm64/sve.rst index 9d9a4de5bc34..93c2c2990584 100644 --- a/Documentation/arm64/sve.rst +++ b/Documentation/arm64/sve.rst @@ -7,7 +7,9 @@ Author: Dave Martin Date: 4 August 2017 This document outlines briefly the interface provided to userspace by Linux in -order to support use of the ARM Scalable Vector Extension (SVE). +order to support use of the ARM Scalable Vector Extension (SVE), including +interactions with Streaming SVE mode added by the Scalable Matrix Extension +(SME). This is an outline of the most important features and issues only and not intended to be exhaustive. @@ -23,6 +25,10 @@ model features for SVE is included in Appendix A. * SVE registers Z0..Z31, P0..P15 and FFR and the current vector length VL, are tracked per-thread. +* In streaming mode FFR is not accessible unless HWCAP2_SME_FA64 is present + in the system, when it is not supported and these interfaces are used to + access streaming mode FFR is read and written as zero. + * The presence of SVE is reported to userspace via HWCAP_SVE in the aux vector AT_HWCAP entry. Presence of this flag implies the presence of the SVE instructions and registers, and the Linux-specific system interfaces @@ -53,10 +59,19 @@ model features for SVE is included in Appendix A. which userspace can read using an MRS instruction. See elf_hwcaps.txt and cpu-feature-registers.txt for details. +* On hardware that supports the SME extensions, HWCAP2_SME will also be + reported in the AT_HWCAP2 aux vector entry. Among other things SME adds + streaming mode which provides a subset of the SVE feature set using a + separate SME vector length and the same Z/V registers. See sme.rst + for more details. + * Debuggers should restrict themselves to interacting with the target via the NT_ARM_SVE regset. The recommended way of detecting support for this regset is to connect to a target process first and then attempt a - ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov). + ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov). Note that when SME is + present and streaming SVE mode is in use the FPSIMD subset of registers + will be read via NT_ARM_SVE and NT_ARM_SVE writes will exit streaming mode + in the target. * Whenever SVE scalable register values (Zn, Pn, FFR) are exchanged in memory between userspace and the kernel, the register value is encoded in memory in @@ -126,6 +141,11 @@ the SVE instruction set architecture. are only present in fpsimd_context. For convenience, the content of V0..V31 is duplicated between sve_context and fpsimd_context. +* The record contains a flag field which includes a flag SVE_SIG_FLAG_SM which + if set indicates that the thread is in streaming mode and the vector length + and register data (if present) describe the streaming SVE data and vector + length. + * The signal frame record for SVE always contains basic metadata, in particular the thread's vector length (in sve_context.vl). @@ -170,6 +190,11 @@ When returning from a signal handler: the signal frame does not match the current vector length, the signal return attempt is treated as illegal, resulting in a forced SIGSEGV. +* It is permitted to enter or leave streaming mode by setting or clearing + the SVE_SIG_FLAG_SM flag but applications should take care to ensure that + when doing so sve_context.vl and any register data are appropriate for the + vector length in the new mode. + 6. prctl extensions -------------------- @@ -265,8 +290,14 @@ prctl(PR_SVE_GET_VL) 7. ptrace extensions --------------------- -* A new regset NT_ARM_SVE is defined for use with PTRACE_GETREGSET and - PTRACE_SETREGSET. +* New regsets NT_ARM_SVE and NT_ARM_SSVE are defined for use with + PTRACE_GETREGSET and PTRACE_SETREGSET. NT_ARM_SSVE describes the + streaming mode SVE registers and NT_ARM_SVE describes the + non-streaming mode SVE registers. + + In this description a register set is referred to as being "live" when + the target is in the appropriate streaming or non-streaming mode and is + using data beyond the subset shared with the FPSIMD Vn registers. Refer to [2] for definitions. @@ -297,7 +328,7 @@ The regset data starts with struct user_sve_header, containing: flags - either + at most one of SVE_PT_REGS_FPSIMD @@ -331,6 +362,10 @@ The regset data starts with struct user_sve_header, containing: SVE_PT_VL_ONEXEC (SETREGSET only). + If neither FPSIMD nor SVE flags are provided then no register + payload is available, this is only possible when SME is implemented. + + * The effects of changing the vector length and/or flags are equivalent to those documented for PR_SVE_SET_VL. @@ -346,6 +381,13 @@ The regset data starts with struct user_sve_header, containing: case only the vector length and flags are changed (along with any consequences of those changes). +* In systems supporting SME when in streaming mode a GETREGSET for + NT_REG_SVE will return only the user_sve_header with no register data, + similarly a GETREGSET for NT_REG_SSVE will not return any register data + when not in streaming mode. + +* A GETREGSET for NT_ARM_SSVE will never return SVE_PT_REGS_FPSIMD. + * For SETREGSET, if an SVE_PT_REGS_SVE payload is present and the requested VL is not supported, the effect will be the same as if the payload were omitted, except that an EIO error is reported. No @@ -355,17 +397,25 @@ The regset data starts with struct user_sve_header, containing: unspecified. It is up to the caller to translate the payload layout for the actual VL and retry. +* Where SME is implemented it is not possible to GETREGSET the register + state for normal SVE when in streaming mode, nor the streaming mode + register state when in normal mode, regardless of the implementation defined + behaviour of the hardware for sharing data between the two modes. + +* Any SETREGSET of NT_ARM_SVE will exit streaming mode if the target was in + streaming mode and any SETREGSET of NT_ARM_SSVE will enter streaming mode + if the target was not in streaming mode. + * The effect of writing a partial, incomplete payload is unspecified. 8. ELF coredump extensions --------------------------- -* A NT_ARM_SVE note will be added to each coredump for each thread of the - dumped process. The contents will be equivalent to the data that would have - been read if a PTRACE_GETREGSET of NT_ARM_SVE were executed for each thread - when the coredump was generated. - +* NT_ARM_SVE and NT_ARM_SSVE notes will be added to each coredump for + each thread of the dumped process. The contents will be equivalent to the + data that would have been read if a PTRACE_GETREGSET of the corresponding + type were executed for each thread when the coredump was generated. 9. System runtime configuration -------------------------------- From b4adc83b07706042ad6e6a767f6c04636db69bcc Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:13 +0100 Subject: [PATCH 009/145] arm64/sme: System register and exception syndrome definitions The arm64 Scalable Matrix Extension (SME) adds some new system registers, fields in existing system registers and exception syndromes. This patch adds definitions for these for use in future patches implementing support for this extension. Since SME will be the first user of FEAT_HCX in the kernel also include the definitions for enumerating it and the HCRX system register it adds. Signed-off-by: Mark Brown Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-6-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/esr.h | 12 +++++- arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/include/asm/sysreg.h | 67 ++++++++++++++++++++++++++++++++ arch/arm64/kernel/traps.c | 1 + 4 files changed, 80 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index d52a0b269ee8..43872e0cfd1e 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -37,7 +37,8 @@ #define ESR_ELx_EC_ERET (0x1a) /* EL2 only */ /* Unallocated EC: 0x1B */ #define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */ -/* Unallocated EC: 0x1D - 0x1E */ +#define ESR_ELx_EC_SME (0x1D) +/* Unallocated EC: 0x1E */ #define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ #define ESR_ELx_EC_IABT_LOW (0x20) #define ESR_ELx_EC_IABT_CUR (0x21) @@ -327,6 +328,15 @@ #define ESR_ELx_CP15_32_ISS_SYS_CNTFRQ (ESR_ELx_CP15_32_ISS_SYS_VAL(0, 0, 14, 0) |\ ESR_ELx_CP15_32_ISS_DIR_READ) +/* + * ISS values for SME traps + */ + +#define ESR_ELx_SME_ISS_SME_DISABLED 0 +#define ESR_ELx_SME_ISS_ILL 1 +#define ESR_ELx_SME_ISS_SM_DISABLED 2 +#define ESR_ELx_SME_ISS_ZA_DISABLED 3 + #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 1767ded83888..13ae232ec4a1 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -279,6 +279,7 @@ #define CPTR_EL2_TCPAC (1U << 31) #define CPTR_EL2_TAM (1 << 30) #define CPTR_EL2_TTA (1 << 20) +#define CPTR_EL2_TSM (1 << 12) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) #define CPTR_EL2_TZ (1 << 8) #define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index fbf5f8bb9055..bebfdd27296a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -118,6 +118,10 @@ * System registers, organised loosely by encoding but grouped together * where the architected name contains an index. e.g. ID_MMFR_EL1. */ +#define SYS_SVCR_SMSTOP_SM_EL0 sys_reg(0, 3, 4, 2, 3) +#define SYS_SVCR_SMSTART_SM_EL0 sys_reg(0, 3, 4, 3, 3) +#define SYS_SVCR_SMSTOP_SMZA_EL0 sys_reg(0, 3, 4, 6, 3) + #define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2) #define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0) #define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2) @@ -181,6 +185,7 @@ #define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0) #define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1) #define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4) +#define SYS_ID_AA64SMFR0_EL1 sys_reg(3, 0, 0, 4, 5) #define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) #define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) @@ -204,6 +209,8 @@ #define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) #define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) +#define SYS_SMPRI_EL1 sys_reg(3, 0, 1, 2, 4) +#define SYS_SMCR_EL1 sys_reg(3, 0, 1, 2, 6) #define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) #define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1) @@ -396,6 +403,8 @@ #define TRBIDR_ALIGN_MASK GENMASK(3, 0) #define TRBIDR_ALIGN_SHIFT 0 +#define SMPRI_EL1_PRIORITY_MASK 0xf + #define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) #define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2) @@ -451,8 +460,13 @@ #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) #define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1) #define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4) +#define SYS_SMIDR_EL1 sys_reg(3, 1, 0, 0, 6) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) +#define SYS_SMIDR_EL1_IMPLEMENTER_SHIFT 24 +#define SYS_SMIDR_EL1_SMPS_SHIFT 15 +#define SYS_SMIDR_EL1_AFFINITY_SHIFT 0 + #define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0) #define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1) @@ -461,6 +475,10 @@ #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) +#define SYS_SVCR_EL0 sys_reg(3, 3, 4, 2, 2) +#define SYS_SVCR_EL0_ZA_MASK 2 +#define SYS_SVCR_EL0_SM_MASK 1 + #define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0) #define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) #define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) @@ -477,6 +495,7 @@ #define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2) #define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3) +#define SYS_TPIDR2_EL0 sys_reg(3, 3, 13, 0, 5) #define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7) @@ -546,6 +565,9 @@ #define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) +#define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2) +#define SYS_SMPRIMAP_EL2 sys_reg(3, 4, 1, 2, 5) +#define SYS_SMCR_EL2 sys_reg(3, 4, 1, 2, 6) #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) #define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) @@ -605,6 +627,7 @@ #define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) #define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) #define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) +#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6) #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) #define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) @@ -628,6 +651,7 @@ #define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2) /* Common SCTLR_ELx flags. */ +#define SCTLR_ELx_ENTP2 (BIT(60)) #define SCTLR_ELx_DSSBS (BIT(44)) #define SCTLR_ELx_ATA (BIT(43)) @@ -836,6 +860,7 @@ #define ID_AA64PFR0_ELx_32BIT_64BIT 0x2 /* id_aa64pfr1 */ +#define ID_AA64PFR1_SME_SHIFT 24 #define ID_AA64PFR1_MPAMFRAC_SHIFT 16 #define ID_AA64PFR1_RASFRAC_SHIFT 12 #define ID_AA64PFR1_MTE_SHIFT 8 @@ -846,6 +871,7 @@ #define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 #define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 #define ID_AA64PFR1_BT_BTI 0x1 +#define ID_AA64PFR1_SME 1 #define ID_AA64PFR1_MTE_NI 0x0 #define ID_AA64PFR1_MTE_EL0 0x1 @@ -874,6 +900,23 @@ #define ID_AA64ZFR0_AES_PMULL 0x2 #define ID_AA64ZFR0_SVEVER_SVE2 0x1 +/* id_aa64smfr0 */ +#define ID_AA64SMFR0_FA64_SHIFT 63 +#define ID_AA64SMFR0_I16I64_SHIFT 52 +#define ID_AA64SMFR0_F64F64_SHIFT 48 +#define ID_AA64SMFR0_I8I32_SHIFT 36 +#define ID_AA64SMFR0_F16F32_SHIFT 35 +#define ID_AA64SMFR0_B16F32_SHIFT 34 +#define ID_AA64SMFR0_F32F32_SHIFT 32 + +#define ID_AA64SMFR0_FA64 0x1 +#define ID_AA64SMFR0_I16I64 0x4 +#define ID_AA64SMFR0_F64F64 0x1 +#define ID_AA64SMFR0_I8I32 0x4 +#define ID_AA64SMFR0_F16F32 0x1 +#define ID_AA64SMFR0_B16F32 0x1 +#define ID_AA64SMFR0_F32F32 0x1 + /* id_aa64mmfr0 */ #define ID_AA64MMFR0_ECV_SHIFT 60 #define ID_AA64MMFR0_FGT_SHIFT 56 @@ -926,6 +969,7 @@ /* id_aa64mmfr1 */ #define ID_AA64MMFR1_ECBHB_SHIFT 60 +#define ID_AA64MMFR1_HCX_SHIFT 40 #define ID_AA64MMFR1_AFP_SHIFT 44 #define ID_AA64MMFR1_ETS_SHIFT 36 #define ID_AA64MMFR1_TWED_SHIFT 32 @@ -1119,9 +1163,24 @@ #define ZCR_ELx_LEN_SIZE 9 #define ZCR_ELx_LEN_MASK 0x1ff +#define SMCR_ELx_FA64_SHIFT 31 +#define SMCR_ELx_FA64_MASK (1 << SMCR_ELx_FA64_SHIFT) + +/* + * The SMCR_ELx_LEN_* definitions intentionally include bits [8:4] which + * are reserved by the SME architecture for future expansion of the LEN + * field, with compatible semantics. + */ +#define SMCR_ELx_LEN_SHIFT 0 +#define SMCR_ELx_LEN_SIZE 9 +#define SMCR_ELx_LEN_MASK 0x1ff + #define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */ #define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */ +#define CPACR_EL1_SMEN_EL1EN (BIT(24)) /* enable EL1 access */ +#define CPACR_EL1_SMEN_EL0EN (BIT(25)) /* enable EL0 access, if EL1EN set */ + #define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */ #define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */ @@ -1170,6 +1229,8 @@ #define TRFCR_ELx_ExTRE BIT(1) #define TRFCR_ELx_E0TRE BIT(0) +/* HCRX_EL2 definitions */ +#define HCRX_EL2_SMPME_MASK (1 << 5) /* GIC Hypervisor interface registers */ /* ICH_MISR_EL2 bit definitions */ @@ -1233,6 +1294,12 @@ #define ICH_VTR_TDS_SHIFT 19 #define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT) +/* HFG[WR]TR_EL2 bit definitions */ +#define HFGxTR_EL2_nTPIDR2_EL0_SHIFT 55 +#define HFGxTR_EL2_nTPIDR2_EL0_MASK BIT_MASK(HFGxTR_EL2_nTPIDR2_EL0_SHIFT) +#define HFGxTR_EL2_nSMPRI_EL1_SHIFT 54 +#define HFGxTR_EL2_nSMPRI_EL1_MASK BIT_MASK(HFGxTR_EL2_nSMPRI_EL1_SHIFT) + #define ARM64_FEATURE_FIELD_BITS 4 /* Create a mask for the feature bits of the specified feature. */ diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 0529fd57567e..6751621e5bea 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -821,6 +821,7 @@ static const char *esr_class_str[] = { [ESR_ELx_EC_SVE] = "SVE", [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB", [ESR_ELx_EC_FPAC] = "FPAC", + [ESR_ELx_EC_SME] = "SME", [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", From ca8a4ebcff4465f0272637433c789a5e4a272626 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:14 +0100 Subject: [PATCH 010/145] arm64/sme: Manually encode SME instructions As with SVE rather than impose ambitious toolchain requirements for SME we manually encode the few instructions which we require in order to perform the work the kernel needs to do. The instructions used to save and restore context are provided as assembler macros while those for entering and leaving streaming mode are done in asm volatile blocks since they are expected to be used from C. We could do the SMSTART and SMSTOP operations with read/modify/write cycles on SVCR but using the aliases provided for individual field accesses should be slightly faster. These instructions are aliases for MSR but since our minimum toolchain requirements are old enough to mean that we can't use the sX_X_cX_cX_X form and they always use xzr rather than taking a value like write_sysreg_s() wants we just use .inst. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-7-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 25 +++++++++++++ arch/arm64/include/asm/fpsimdmacros.h | 54 +++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index cb24385e3632..6e2dc9dcbf49 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -249,6 +249,31 @@ static inline void sve_setup(void) { } #endif /* ! CONFIG_ARM64_SVE */ +#ifdef CONFIG_ARM64_SME + +static inline void sme_smstart_sm(void) +{ + asm volatile(__msr_s(SYS_SVCR_SMSTART_SM_EL0, "xzr")); +} + +static inline void sme_smstop_sm(void) +{ + asm volatile(__msr_s(SYS_SVCR_SMSTOP_SM_EL0, "xzr")); +} + +static inline void sme_smstop(void) +{ + asm volatile(__msr_s(SYS_SVCR_SMSTOP_SMZA_EL0, "xzr")); +} + +#else + +static inline void sme_smstart_sm(void) { } +static inline void sme_smstop_sm(void) { } +static inline void sme_smstop(void) { } + +#endif /* ! CONFIG_ARM64_SME */ + /* For use by EFI runtime services calls only */ extern void __efi_fpsimd_begin(void); extern void __efi_fpsimd_end(void); diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 2509d7dde55a..2e9a33155081 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -93,6 +93,12 @@ .endif .endm +.macro _sme_check_wv v + .if (\v) < 12 || (\v) > 15 + .error "Bad vector select register \v." + .endif +.endm + /* SVE instruction encodings for non-SVE-capable assemblers */ /* (pre binutils 2.28, all kernel capable clang versions support SVE) */ @@ -174,6 +180,54 @@ | (\np) .endm +/* SME instruction encodings for non-SME-capable assemblers */ +/* (pre binutils 2.38/LLVM 13) */ + +/* RDSVL X\nx, #\imm */ +.macro _sme_rdsvl nx, imm + _check_general_reg \nx + _check_num (\imm), -0x20, 0x1f + .inst 0x04bf5800 \ + | (\nx) \ + | (((\imm) & 0x3f) << 5) +.endm + +/* + * STR (vector from ZA array): + * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] + */ +.macro _sme_str_zav nw, nxbase, offset=0 + _sme_check_wv \nw + _check_general_reg \nxbase + _check_num (\offset), -0x100, 0xff + .inst 0xe1200000 \ + | (((\nw) & 3) << 13) \ + | ((\nxbase) << 5) \ + | ((\offset) & 7) +.endm + +/* + * LDR (vector to ZA array): + * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] + */ +.macro _sme_ldr_zav nw, nxbase, offset=0 + _sme_check_wv \nw + _check_general_reg \nxbase + _check_num (\offset), -0x100, 0xff + .inst 0xe1000000 \ + | (((\nw) & 3) << 13) \ + | ((\nxbase) << 5) \ + | ((\offset) & 7) +.endm + +/* + * Zero the entire ZA array + * ZERO ZA + */ +.macro zero_za + .inst 0xc00800ff +.endm + .macro __for from:req, to:req .if (\from) == (\to) _for__body %\from From b2cf6a23289b3268cc7915a09c0c8372147b2727 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:15 +0100 Subject: [PATCH 011/145] arm64/sme: Early CPU setup for SME SME requires similar setup to that for SVE: disable traps to EL2 and make sure that the maximum vector length is available to EL1, for SME we have two traps - one for SME itself and one for TPIDR2. In addition since we currently make no active use of priority control for SCMUs we map all SME priorities lower ELs may configure to 0, the architecture specified minimum priority, to ensure that nothing we manage is able to configure itself to consume excessive resources. This will need to be revisited should there be a need to manage SME priorities at runtime. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-8-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/el2_setup.h | 64 ++++++++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index c31be7eda9df..fabdbde0fe02 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -143,6 +143,50 @@ .Lskip_sve_\@: .endm +/* SME register access and priority mapping */ +.macro __init_el2_nvhe_sme + mrs x1, id_aa64pfr1_el1 + ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4 + cbz x1, .Lskip_sme_\@ + + bic x0, x0, #CPTR_EL2_TSM // Also disable SME traps + msr cptr_el2, x0 // Disable copro. traps to EL2 + isb + + mrs x1, sctlr_el2 + orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps + msr sctlr_el2, x1 + isb + + mov x1, #0 // SMCR controls + + mrs_s x2, SYS_ID_AA64SMFR0_EL1 + ubfx x2, x2, #ID_AA64SMFR0_FA64_SHIFT, #1 // Full FP in SM? + cbz x2, .Lskip_sme_fa64_\@ + + orr x1, x1, SMCR_ELx_FA64_MASK +.Lskip_sme_fa64_\@: + + orr x1, x1, #SMCR_ELx_LEN_MASK // Enable full SME vector + msr_s SYS_SMCR_EL2, x1 // length for EL1. + + mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? + ubfx x1, x1, #SYS_SMIDR_EL1_SMPS_SHIFT, #1 + cbz x1, .Lskip_sme_\@ + + msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal + + mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? + ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4 + cbz x1, .Lskip_sme_\@ + + mrs_s x1, SYS_HCRX_EL2 + orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping + msr_s SYS_HCRX_EL2, x1 + +.Lskip_sme_\@: +.endm + /* Disable any fine grained traps */ .macro __init_el2_fgt mrs x1, id_aa64mmfr0_el1 @@ -153,15 +197,26 @@ mrs x1, id_aa64dfr0_el1 ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 cmp x1, #3 - b.lt .Lset_fgt_\@ + b.lt .Lset_debug_fgt_\@ /* Disable PMSNEVFR_EL1 read and write traps */ orr x0, x0, #(1 << 62) -.Lset_fgt_\@: +.Lset_debug_fgt_\@: msr_s SYS_HDFGRTR_EL2, x0 msr_s SYS_HDFGWTR_EL2, x0 - msr_s SYS_HFGRTR_EL2, xzr - msr_s SYS_HFGWTR_EL2, xzr + + mov x0, xzr + mrs x1, id_aa64pfr1_el1 + ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4 + cbz x1, .Lset_fgt_\@ + + /* Disable nVHE traps of TPIDR2 and SMPRI */ + orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK + orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK + +.Lset_fgt_\@: + msr_s SYS_HFGRTR_EL2, x0 + msr_s SYS_HFGWTR_EL2, x0 msr_s SYS_HFGITR_EL2, xzr mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU @@ -196,6 +251,7 @@ __init_el2_nvhe_idregs __init_el2_nvhe_cptr __init_el2_nvhe_sve + __init_el2_nvhe_sme __init_el2_fgt __init_el2_nvhe_prepare_eret .endm From 5e64b862c4823ab53aac028042abd918c2f27041 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:16 +0100 Subject: [PATCH 012/145] arm64/sme: Basic enumeration support This patch introduces basic cpufeature support for discovering the presence of the Scalable Matrix Extension. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-9-broonie@kernel.org Signed-off-by: Catalin Marinas --- Documentation/arm64/elf_hwcaps.rst | 33 ++++++++++++++++ arch/arm64/include/asm/cpu.h | 1 + arch/arm64/include/asm/cpufeature.h | 12 ++++++ arch/arm64/include/asm/fpsimd.h | 2 + arch/arm64/include/asm/hwcap.h | 8 ++++ arch/arm64/include/uapi/asm/hwcap.h | 8 ++++ arch/arm64/kernel/cpufeature.c | 59 +++++++++++++++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 9 +++++ arch/arm64/kernel/fpsimd.c | 30 +++++++++++++++ arch/arm64/tools/cpucaps | 2 + 10 files changed, 164 insertions(+) diff --git a/Documentation/arm64/elf_hwcaps.rst b/Documentation/arm64/elf_hwcaps.rst index a8f30963e550..f8d818eaaff5 100644 --- a/Documentation/arm64/elf_hwcaps.rst +++ b/Documentation/arm64/elf_hwcaps.rst @@ -264,6 +264,39 @@ HWCAP2_MTE3 Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0011, as described by Documentation/arm64/memory-tagging-extension.rst. +HWCAP2_SME + + Functionality implied by ID_AA64PFR1_EL1.SME == 0b0001, as described + by Documentation/arm64/sme.rst. + +HWCAP2_SME_I16I64 + + Functionality implied by ID_AA64SMFR0_EL1.I16I64 == 0b1111. + +HWCAP2_SME_F64F64 + + Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1. + +HWCAP2_SME_I8I32 + + Functionality implied by ID_AA64SMFR0_EL1.I8I32 == 0b1111. + +HWCAP2_SME_F16F32 + + Functionality implied by ID_AA64SMFR0_EL1.F16F32 == 0b1. + +HWCAP2_SME_B16F32 + + Functionality implied by ID_AA64SMFR0_EL1.B16F32 == 0b1. + +HWCAP2_SME_F32F32 + + Functionality implied by ID_AA64SMFR0_EL1.F32F32 == 0b1. + +HWCAP2_SME_FA64 + + Functionality implied by ID_AA64SMFR0_EL1.FA64 == 0b1. + 4. Unused AT_HWCAP bits ----------------------- diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index a58e366f0b07..d08062bcb9c1 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -58,6 +58,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64pfr0; u64 reg_id_aa64pfr1; u64 reg_id_aa64zfr0; + u64 reg_id_aa64smfr0; struct cpuinfo_32bit aarch32; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index c62e7e5e2f0c..8ac12e4094aa 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -759,6 +759,18 @@ static __always_inline bool system_supports_sve(void) cpus_have_const_cap(ARM64_SVE); } +static __always_inline bool system_supports_sme(void) +{ + return IS_ENABLED(CONFIG_ARM64_SME) && + cpus_have_const_cap(ARM64_SME); +} + +static __always_inline bool system_supports_fa64(void) +{ + return IS_ENABLED(CONFIG_ARM64_SME) && + cpus_have_const_cap(ARM64_SME_FA64); +} + static __always_inline bool system_supports_cnp(void) { return IS_ENABLED(CONFIG_ARM64_CNP) && diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 6e2dc9dcbf49..2e8ef00e7520 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -74,6 +74,8 @@ extern void sve_set_vq(unsigned long vq_minus_1); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); +extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused); +extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern u64 read_zcr_features(void); diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 8db5ec0089db..9f0ce004fdbc 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -109,6 +109,14 @@ #define KERNEL_HWCAP_AFP __khwcap2_feature(AFP) #define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES) #define KERNEL_HWCAP_MTE3 __khwcap2_feature(MTE3) +#define KERNEL_HWCAP_SME __khwcap2_feature(SME) +#define KERNEL_HWCAP_SME_I16I64 __khwcap2_feature(SME_I16I64) +#define KERNEL_HWCAP_SME_F64F64 __khwcap2_feature(SME_F64F64) +#define KERNEL_HWCAP_SME_I8I32 __khwcap2_feature(SME_I8I32) +#define KERNEL_HWCAP_SME_F16F32 __khwcap2_feature(SME_F16F32) +#define KERNEL_HWCAP_SME_B16F32 __khwcap2_feature(SME_B16F32) +#define KERNEL_HWCAP_SME_F32F32 __khwcap2_feature(SME_F32F32) +#define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 99cb5d383048..b0256cec63b5 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -79,5 +79,13 @@ #define HWCAP2_AFP (1 << 20) #define HWCAP2_RPRES (1 << 21) #define HWCAP2_MTE3 (1 << 22) +#define HWCAP2_SME (1 << 23) +#define HWCAP2_SME_I16I64 (1 << 24) +#define HWCAP2_SME_F64F64 (1 << 25) +#define HWCAP2_SME_I8I32 (1 << 26) +#define HWCAP2_SME_F16F32 (1 << 27) +#define HWCAP2_SME_B16F32 (1 << 28) +#define HWCAP2_SME_F32F32 (1 << 29) +#define HWCAP2_SME_FA64 (1 << 30) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d72c4b4d389c..0f2d7ddd69ae 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -261,6 +261,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SME_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), @@ -293,6 +295,24 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_FA64_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_I16I64_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F64F64_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_I8I32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F16F32_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_B16F32_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F32F32_SHIFT, 1, 0), + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0), @@ -645,6 +665,7 @@ static const struct __ftr_reg_entry { ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1, &id_aa64pfr1_override), ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0), + ARM64_FTR_REG(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0), /* Op1 = 0, CRn = 0, CRm = 5 */ ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), @@ -960,6 +981,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); + init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0); if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) init_32bit_cpu_features(&info->aarch32); @@ -2442,6 +2464,33 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .min_field_value = 1, }, +#ifdef CONFIG_ARM64_SME + { + .desc = "Scalable Matrix Extension", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_SME, + .sys_reg = SYS_ID_AA64PFR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64PFR1_SME_SHIFT, + .field_width = 4, + .min_field_value = ID_AA64PFR1_SME, + .matches = has_cpuid_feature, + .cpu_enable = sme_kernel_enable, + }, + /* FA64 should be sorted after the base SME capability */ + { + .desc = "FA64", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_SME_FA64, + .sys_reg = SYS_ID_AA64SMFR0_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64SMFR0_FA64_SHIFT, + .field_width = 1, + .min_field_value = ID_AA64SMFR0_FA64, + .matches = has_cpuid_feature, + .cpu_enable = fa64_kernel_enable, + }, +#endif /* CONFIG_ARM64_SME */ {}, }; @@ -2575,6 +2624,16 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), +#ifdef CONFIG_ARM64_SME + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_FA64, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_I16I64, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F64F64, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_I8I32, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F16F32, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_B16F32, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F32F32, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), +#endif /* CONFIG_ARM64_SME */ {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 330b92ea863a..a73fe2888b7e 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -98,6 +98,14 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_AFP] = "afp", [KERNEL_HWCAP_RPRES] = "rpres", [KERNEL_HWCAP_MTE3] = "mte3", + [KERNEL_HWCAP_SME] = "sme", + [KERNEL_HWCAP_SME_I16I64] = "smei16i64", + [KERNEL_HWCAP_SME_F64F64] = "smef64f64", + [KERNEL_HWCAP_SME_I8I32] = "smei8i32", + [KERNEL_HWCAP_SME_F16F32] = "smef16f32", + [KERNEL_HWCAP_SME_B16F32] = "smeb16f32", + [KERNEL_HWCAP_SME_F32F32] = "smef32f32", + [KERNEL_HWCAP_SME_FA64] = "smefa64", }; #ifdef CONFIG_COMPAT @@ -401,6 +409,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1); + info->reg_id_aa64smfr0 = read_cpuid(ID_AA64SMFR0_EL1); if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) info->reg_gmid = read_cpuid(GMID_EL1); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 47af76e53221..e4fba0bfb55e 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -993,6 +993,32 @@ void fpsimd_release_task(struct task_struct *dead_task) #endif /* CONFIG_ARM64_SVE */ +#ifdef CONFIG_ARM64_SME + +void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +{ + /* Set priority for all PEs to architecturally defined minimum */ + write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK, + SYS_SMPRI_EL1); + + /* Allow SME in kernel */ + write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1); + isb(); +} + +/* + * This must be called after sme_kernel_enable(), we rely on the + * feature table being sorted to ensure this. + */ +void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +{ + /* Allow use of FA64 */ + write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK, + SYS_SMCR_EL1); +} + +#endif /* CONFIG_ARM64_SVE */ + /* * Trapped SVE access * @@ -1538,6 +1564,10 @@ static int __init fpsimd_init(void) if (!cpu_have_named_feature(ASIMD)) pr_notice("Advanced SIMD is not implemented\n"); + + if (cpu_have_named_feature(SME) && !cpu_have_named_feature(SVE)) + pr_notice("SME is implemented but not SVE\n"); + return sve_sysctl_init(); } core_initcall(fpsimd_init); diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 3ed418f70e3b..e52b289a27c2 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -43,6 +43,8 @@ KVM_PROTECTED_MODE MISMATCHED_CACHE_TYPE MTE MTE_ASYMM +SME +SME_FA64 SPECTRE_V2 SPECTRE_V3A SPECTRE_V4 From b42990d3bf77cc29d7c33e21518c1f806dae6b21 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:17 +0100 Subject: [PATCH 013/145] arm64/sme: Identify supported SME vector lengths at boot The vector lengths used for SME are controlled through a similar set of registers to those for SVE and enumerated using a similar algorithm with some slight differences due to the fact that unlike SVE there are no restrictions on which combinations of vector lengths can be supported nor any mandatory vector lengths which must be implemented. Add a new vector type and implement support for enumerating it. One slightly awkward feature is that we need to read the current vector length using a different instruction (or enter streaming mode which would have the same issue and be higher cost). Rather than add an ops structure we add special cases directly in the otherwise generic vec_probe_vqs() function, this is a bit inelegant but it's the only place where this is an issue. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-10-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpu.h | 3 + arch/arm64/include/asm/cpufeature.h | 7 ++ arch/arm64/include/asm/fpsimd.h | 26 ++++++ arch/arm64/include/asm/processor.h | 1 + arch/arm64/kernel/cpufeature.c | 47 +++++++++++ arch/arm64/kernel/cpuinfo.c | 4 + arch/arm64/kernel/entry-fpsimd.S | 9 ++ arch/arm64/kernel/fpsimd.c | 123 +++++++++++++++++++++++++++- 8 files changed, 218 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index d08062bcb9c1..115cdec1ae87 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -64,6 +64,9 @@ struct cpuinfo_arm64 { /* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */ u64 reg_zcr; + + /* pseudo-SMCR for recording maximum SMCR_EL1 LEN value: */ + u64 reg_smcr; }; DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data); diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8ac12e4094aa..5ddfae233ea5 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -622,6 +622,13 @@ static inline bool id_aa64pfr0_sve(u64 pfr0) return val > 0; } +static inline bool id_aa64pfr1_sme(u64 pfr1) +{ + u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT); + + return val > 0; +} + static inline bool id_aa64pfr1_mte(u64 pfr1) { u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT); diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 2e8ef00e7520..32cd682258d9 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -78,6 +78,7 @@ extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern u64 read_zcr_features(void); +extern u64 read_smcr_features(void); /* * Helpers to translate bit indices in sve_vq_map to VQ values (and @@ -172,6 +173,12 @@ static inline void write_vl(enum vec_type type, u64 val) tmp = read_sysreg_s(SYS_ZCR_EL1) & ~ZCR_ELx_LEN_MASK; write_sysreg_s(tmp | val, SYS_ZCR_EL1); break; +#endif +#ifdef CONFIG_ARM64_SME + case ARM64_VEC_SME: + tmp = read_sysreg_s(SYS_SMCR_EL1) & ~SMCR_ELx_LEN_MASK; + write_sysreg_s(tmp | val, SYS_SMCR_EL1); + break; #endif default: WARN_ON_ONCE(1); @@ -268,12 +275,31 @@ static inline void sme_smstop(void) asm volatile(__msr_s(SYS_SVCR_SMSTOP_SMZA_EL0, "xzr")); } +extern void __init sme_setup(void); + +static inline int sme_max_vl(void) +{ + return vec_max_vl(ARM64_VEC_SME); +} + +static inline int sme_max_virtualisable_vl(void) +{ + return vec_max_virtualisable_vl(ARM64_VEC_SME); +} + +extern unsigned int sme_get_vl(void); + #else static inline void sme_smstart_sm(void) { } static inline void sme_smstop_sm(void) { } static inline void sme_smstop(void) { } +static inline void sme_setup(void) { } +static inline unsigned int sme_get_vl(void) { return 0; } +static inline int sme_max_vl(void) { return 0; } +static inline int sme_max_virtualisable_vl(void) { return 0; } + #endif /* ! CONFIG_ARM64_SME */ /* For use by EFI runtime services calls only */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 73e38d9a540c..abf34a9c2eab 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -118,6 +118,7 @@ struct debug_info { enum vec_type { ARM64_VEC_SVE = 0, + ARM64_VEC_SME, ARM64_VEC_MAX, }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0f2d7ddd69ae..082b3f48cbfd 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -581,6 +581,12 @@ static const struct arm64_ftr_bits ftr_zcr[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_smcr[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, + SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_SIZE, 0), /* LEN */ + ARM64_FTR_END, +}; + /* * Common ftr bits for a 32bit register with all hidden, strict * attributes, with 4bit feature fields and a default safe value of @@ -687,6 +693,7 @@ static const struct __ftr_reg_entry { /* Op1 = 0, CRn = 1, CRm = 2 */ ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr), + ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr), /* Op1 = 1, CRn = 0, CRm = 0 */ ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid), @@ -991,6 +998,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) vec_init_vq_map(ARM64_VEC_SVE); } + if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) { + init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr); + if (IS_ENABLED(CONFIG_ARM64_SME)) + vec_init_vq_map(ARM64_VEC_SME); + } + if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); @@ -1217,6 +1230,9 @@ void update_cpu_features(int cpu, taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu, info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0); + taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu, + info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0); + if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu, info->reg_zcr, boot->reg_zcr); @@ -1227,6 +1243,16 @@ void update_cpu_features(int cpu, vec_update_vq_map(ARM64_VEC_SVE); } + if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) { + taint |= check_update_ftr_reg(SYS_SMCR_EL1, cpu, + info->reg_smcr, boot->reg_smcr); + + /* Probe vector lengths, unless we already gave up on SME */ + if (id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1)) && + !system_capabilities_finalized()) + vec_update_vq_map(ARM64_VEC_SME); + } + /* * The kernel uses the LDGM/STGM instructions and the number of tags * they read/write depends on the GMID_EL1.BS field. Check that the @@ -2931,6 +2957,23 @@ static void verify_sve_features(void) /* Add checks on other ZCR bits here if necessary */ } +static void verify_sme_features(void) +{ + u64 safe_smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1); + u64 smcr = read_smcr_features(); + + unsigned int safe_len = safe_smcr & SMCR_ELx_LEN_MASK; + unsigned int len = smcr & SMCR_ELx_LEN_MASK; + + if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SME)) { + pr_crit("CPU%d: SME: vector length support mismatch\n", + smp_processor_id()); + cpu_die_early(); + } + + /* Add checks on other SMCR bits here if necessary */ +} + static void verify_hyp_capabilities(void) { u64 safe_mmfr1, mmfr0, mmfr1; @@ -2983,6 +3026,9 @@ static void verify_local_cpu_capabilities(void) if (system_supports_sve()) verify_sve_features(); + if (system_supports_sme()) + verify_sme_features(); + if (is_hyp_mode_available()) verify_hyp_capabilities(); } @@ -3100,6 +3146,7 @@ void __init setup_cpu_features(void) pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); sve_setup(); + sme_setup(); minsigstksz_setup(); /* Advertise that we have computed the system capabilities */ diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index a73fe2888b7e..8a8136a096ac 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -421,6 +421,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) id_aa64pfr0_sve(info->reg_id_aa64pfr0)) info->reg_zcr = read_zcr_features(); + if (IS_ENABLED(CONFIG_ARM64_SME) && + id_aa64pfr1_sme(info->reg_id_aa64pfr1)) + info->reg_smcr = read_smcr_features(); + cpuinfo_detect_icache_policy(info); } diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index dc242e269f9a..deee5f01462e 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -86,3 +86,12 @@ SYM_FUNC_START(sve_flush_live) SYM_FUNC_END(sve_flush_live) #endif /* CONFIG_ARM64_SVE */ + +#ifdef CONFIG_ARM64_SME + +SYM_FUNC_START(sme_get_vl) + _sme_rdsvl 0, 1 + ret +SYM_FUNC_END(sme_get_vl) + +#endif /* CONFIG_ARM64_SME */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index e4fba0bfb55e..5e5fbd9cba75 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -136,6 +136,12 @@ __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = { .max_virtualisable_vl = SVE_VL_MIN, }, #endif +#ifdef CONFIG_ARM64_SME + [ARM64_VEC_SME] = { + .type = ARM64_VEC_SME, + .name = "SME", + }, +#endif }; static unsigned int vec_vl_inherit_flag(enum vec_type type) @@ -186,6 +192,20 @@ extern void __percpu *efi_sve_state; #endif /* ! CONFIG_ARM64_SVE */ +#ifdef CONFIG_ARM64_SME + +static int get_sme_default_vl(void) +{ + return get_default_vl(ARM64_VEC_SME); +} + +static void set_sme_default_vl(int val) +{ + set_default_vl(ARM64_VEC_SME, val); +} + +#endif + DEFINE_PER_CPU(bool, fpsimd_context_busy); EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy); @@ -409,6 +429,8 @@ static unsigned int find_supported_vector_length(enum vec_type type, if (vl > max_vl) vl = max_vl; + if (vl < info->min_vl) + vl = info->min_vl; bit = find_next_bit(info->vq_map, SVE_VQ_MAX, __vq_to_bit(sve_vq_from_vl(vl))); @@ -770,7 +792,23 @@ static void vec_probe_vqs(struct vl_info *info, for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) { write_vl(info->type, vq - 1); /* self-syncing */ - vl = sve_get_vl(); + + switch (info->type) { + case ARM64_VEC_SVE: + vl = sve_get_vl(); + break; + case ARM64_VEC_SME: + vl = sme_get_vl(); + break; + default: + vl = 0; + break; + } + + /* Minimum VL identified? */ + if (sve_vq_from_vl(vl) > vq) + break; + vq = sve_vq_from_vl(vl); /* skip intervening lengths */ set_bit(__vq_to_bit(vq), map); } @@ -1017,7 +1055,88 @@ void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) SYS_SMCR_EL1); } -#endif /* CONFIG_ARM64_SVE */ +/* + * Read the pseudo-SMCR used by cpufeatures to identify the supported + * vector length. + * + * Use only if SME is present. + * This function clobbers the SME vector length. + */ +u64 read_smcr_features(void) +{ + u64 smcr; + unsigned int vq_max; + + sme_kernel_enable(NULL); + sme_smstart_sm(); + + /* + * Set the maximum possible VL. + */ + write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK, + SYS_SMCR_EL1); + + smcr = read_sysreg_s(SYS_SMCR_EL1); + smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */ + vq_max = sve_vq_from_vl(sve_get_vl()); + smcr |= vq_max - 1; /* set LEN field to maximum effective value */ + + sme_smstop_sm(); + + return smcr; +} + +void __init sme_setup(void) +{ + struct vl_info *info = &vl_info[ARM64_VEC_SME]; + u64 smcr; + int min_bit; + + if (!system_supports_sme()) + return; + + /* + * SME doesn't require any particular vector length be + * supported but it does require at least one. We should have + * disabled the feature entirely while bringing up CPUs but + * let's double check here. + */ + WARN_ON(bitmap_empty(info->vq_map, SVE_VQ_MAX)); + + min_bit = find_last_bit(info->vq_map, SVE_VQ_MAX); + info->min_vl = sve_vl_from_vq(__bit_to_vq(min_bit)); + + smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1); + info->max_vl = sve_vl_from_vq((smcr & SMCR_ELx_LEN_MASK) + 1); + + /* + * Sanity-check that the max VL we determined through CPU features + * corresponds properly to sme_vq_map. If not, do our best: + */ + if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SME, + info->max_vl))) + info->max_vl = find_supported_vector_length(ARM64_VEC_SME, + info->max_vl); + + WARN_ON(info->min_vl > info->max_vl); + + /* + * For the default VL, pick the maximum supported value <= 32 + * (256 bits) if there is one since this is guaranteed not to + * grow the signal frame when in streaming mode, otherwise the + * minimum available VL will be used. + */ + set_sme_default_vl(find_supported_vector_length(ARM64_VEC_SME, 32)); + + pr_info("SME: minimum available vector length %u bytes per vector\n", + info->min_vl); + pr_info("SME: maximum available vector length %u bytes per vector\n", + info->max_vl); + pr_info("SME: default vector length %u bytes per vector\n", + get_sme_default_vl()); +} + +#endif /* CONFIG_ARM64_SME */ /* * Trapped SVE access From 12f1bacfc5d9e55bedbfc7a25bf42ff6d19d1dab Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:18 +0100 Subject: [PATCH 014/145] arm64/sme: Implement sysctl to set the default vector length As for SVE provide a sysctl which allows the default SME vector length to be configured. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-11-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 5e5fbd9cba75..754a96563f6f 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -489,6 +489,30 @@ static int __init sve_sysctl_init(void) static int __init sve_sysctl_init(void) { return 0; } #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ +#if defined(CONFIG_ARM64_SME) && defined(CONFIG_SYSCTL) +static struct ctl_table sme_default_vl_table[] = { + { + .procname = "sme_default_vector_length", + .mode = 0644, + .proc_handler = vec_proc_do_default_vl, + .extra1 = &vl_info[ARM64_VEC_SME], + }, + { } +}; + +static int __init sme_sysctl_init(void) +{ + if (system_supports_sme()) + if (!register_sysctl("abi", sme_default_vl_table)) + return -EINVAL; + + return 0; +} + +#else /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */ +static int __init sme_sysctl_init(void) { return 0; } +#endif /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */ + #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) @@ -1687,6 +1711,9 @@ static int __init fpsimd_init(void) if (cpu_have_named_feature(SME) && !cpu_have_named_feature(SVE)) pr_notice("SME is implemented but not SVE\n"); - return sve_sysctl_init(); + sve_sysctl_init(); + sme_sysctl_init(); + + return 0; } core_initcall(fpsimd_init); From 9e4ab6c89109472082616f8d2f6ada7deaffe161 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:19 +0100 Subject: [PATCH 015/145] arm64/sme: Implement vector length configuration prctl()s As for SVE provide a prctl() interface which allows processes to configure their SME vector length. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-12-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 4 ++++ arch/arm64/include/asm/processor.h | 4 +++- arch/arm64/include/asm/thread_info.h | 1 + arch/arm64/kernel/fpsimd.c | 32 ++++++++++++++++++++++++++++ include/uapi/linux/prctl.h | 9 ++++++++ kernel/sys.c | 12 +++++++++++ 6 files changed, 61 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 32cd682258d9..38fd6aab7feb 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -288,6 +288,8 @@ static inline int sme_max_virtualisable_vl(void) } extern unsigned int sme_get_vl(void); +extern int sme_set_current_vl(unsigned long arg); +extern int sme_get_current_vl(void); #else @@ -299,6 +301,8 @@ static inline void sme_setup(void) { } static inline unsigned int sme_get_vl(void) { return 0; } static inline int sme_max_vl(void) { return 0; } static inline int sme_max_virtualisable_vl(void) { return 0; } +static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; } +static inline int sme_get_current_vl(void) { return -EINVAL; } #endif /* ! CONFIG_ARM64_SME */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index abf34a9c2eab..7a57cbff8a03 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -355,9 +355,11 @@ extern void __init minsigstksz_setup(void); */ #include -/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ +/* Userspace interface for PR_S[MV]E_{SET,GET}_VL prctl()s: */ #define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_GET_VL() sve_get_current_vl() +#define SME_SET_VL(arg) sme_set_current_vl(arg) +#define SME_GET_VL() sme_get_current_vl() /* PR_PAC_RESET_KEYS prctl */ #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index e1317b7c4525..4e6b58dcd6f9 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -82,6 +82,7 @@ int arch_dup_task_struct(struct task_struct *dst, #define TIF_SVE_VL_INHERIT 24 /* Inherit SVE vl_onexec across exec */ #define TIF_SSBD 25 /* Wants SSB mitigation */ #define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ +#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 754a96563f6f..39f44fcb9b99 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -149,6 +149,8 @@ static unsigned int vec_vl_inherit_flag(enum vec_type type) switch (type) { case ARM64_VEC_SVE: return TIF_SVE_VL_INHERIT; + case ARM64_VEC_SME: + return TIF_SME_VL_INHERIT; default: WARN_ON_ONCE(1); return 0; @@ -807,6 +809,36 @@ int sve_get_current_vl(void) return vec_prctl_status(ARM64_VEC_SVE, 0); } +#ifdef CONFIG_ARM64_SME +/* PR_SME_SET_VL */ +int sme_set_current_vl(unsigned long arg) +{ + unsigned long vl, flags; + int ret; + + vl = arg & PR_SME_VL_LEN_MASK; + flags = arg & ~vl; + + if (!system_supports_sme() || is_compat_task()) + return -EINVAL; + + ret = vec_set_vector_length(current, ARM64_VEC_SME, vl, flags); + if (ret) + return ret; + + return vec_prctl_status(ARM64_VEC_SME, flags); +} + +/* PR_SME_GET_VL */ +int sme_get_current_vl(void) +{ + if (!system_supports_sme() || is_compat_task()) + return -EINVAL; + + return vec_prctl_status(ARM64_VEC_SME, 0); +} +#endif /* CONFIG_ARM64_SME */ + static void vec_probe_vqs(struct vl_info *info, DECLARE_BITMAP(map, SVE_VQ_MAX)) { diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index e998764f0262..a5e06dcbba13 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -272,6 +272,15 @@ struct prctl_mm_map { # define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1 # define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2 +/* arm64 Scalable Matrix Extension controls */ +/* Flag values must be in sync with SVE versions */ +#define PR_SME_SET_VL 63 /* set task vector length */ +# define PR_SME_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */ +#define PR_SME_GET_VL 64 /* get task vector length */ +/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */ +# define PR_SME_VL_LEN_MASK 0xffff +# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */ + #define PR_SET_VMA 0x53564d41 # define PR_SET_VMA_ANON_NAME 0 diff --git a/kernel/sys.c b/kernel/sys.c index 374f83e95239..b911fa6d81ab 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -117,6 +117,12 @@ #ifndef SVE_GET_VL # define SVE_GET_VL() (-EINVAL) #endif +#ifndef SME_SET_VL +# define SME_SET_VL(a) (-EINVAL) +#endif +#ifndef SME_GET_VL +# define SME_GET_VL() (-EINVAL) +#endif #ifndef PAC_RESET_KEYS # define PAC_RESET_KEYS(a, b) (-EINVAL) #endif @@ -2541,6 +2547,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, case PR_SVE_GET_VL: error = SVE_GET_VL(); break; + case PR_SME_SET_VL: + error = SME_SET_VL(arg2); + break; + case PR_SME_GET_VL: + error = SME_GET_VL(); + break; case PR_GET_SPECULATION_CTRL: if (arg3 || arg4 || arg5) return -EINVAL; From a9d69158595017d260ab37bf88b8f125e5e8144c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:20 +0100 Subject: [PATCH 016/145] arm64/sme: Implement support for TPIDR2 The Scalable Matrix Extension introduces support for a new thread specific data register TPIDR2 intended for use by libc. The kernel must save the value of TPIDR2 on context switch and should ensure that all new threads start off with a default value of 0. Add a field to the thread_struct to store TPIDR2 and context switch it with the other thread specific data. In case there are future extensions which also use TPIDR2 we introduce system_supports_tpidr2() and use that rather than system_supports_sme() for TPIDR2 handling. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-13-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 5 +++++ arch/arm64/include/asm/processor.h | 1 + arch/arm64/kernel/fpsimd.c | 4 ++++ arch/arm64/kernel/process.c | 14 ++++++++++++-- 4 files changed, 22 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 5ddfae233ea5..14a8f3d93add 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -778,6 +778,11 @@ static __always_inline bool system_supports_fa64(void) cpus_have_const_cap(ARM64_SME_FA64); } +static __always_inline bool system_supports_tpidr2(void) +{ + return system_supports_sme(); +} + static __always_inline bool system_supports_cnp(void) { return IS_ENABLED(CONFIG_ARM64_CNP) && diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 7a57cbff8a03..849e97d418a8 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -169,6 +169,7 @@ struct thread_struct { u64 mte_ctrl; #endif u64 sctlr_user; + u64 tpidr2_el0; }; static inline unsigned int thread_get_vl(struct thread_struct *thread, diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 39f44fcb9b99..231f2d85b65e 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1098,6 +1098,10 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) /* Allow SME in kernel */ write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1); isb(); + + /* Allow EL0 to access TPIDR2 */ + write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1); + isb(); } /* diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 7fa97df55e3a..e20571f19718 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -250,6 +250,8 @@ void show_regs(struct pt_regs *regs) static void tls_thread_flush(void) { write_sysreg(0, tpidr_el0); + if (system_supports_tpidr2()) + write_sysreg_s(0, SYS_TPIDR2_EL0); if (is_compat_task()) { current->thread.uw.tp_value = 0; @@ -343,6 +345,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, * out-of-sync with the saved value. */ *task_user_tls(p) = read_sysreg(tpidr_el0); + if (system_supports_tpidr2()) + p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); if (stack_start) { if (is_compat_thread(task_thread_info(p))) @@ -353,10 +357,12 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, /* * If a TLS pointer was passed to clone, use it for the new - * thread. + * thread. We also reset TPIDR2 if it's in use. */ - if (clone_flags & CLONE_SETTLS) + if (clone_flags & CLONE_SETTLS) { p->thread.uw.tp_value = tls; + p->thread.tpidr2_el0 = 0; + } } else { /* * A kthread has no context to ERET to, so ensure any buggy @@ -387,6 +393,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, void tls_preserve_current_state(void) { *task_user_tls(current) = read_sysreg(tpidr_el0); + if (system_supports_tpidr2() && !is_compat_task()) + current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); } static void tls_thread_switch(struct task_struct *next) @@ -399,6 +407,8 @@ static void tls_thread_switch(struct task_struct *next) write_sysreg(0, tpidrro_el0); write_sysreg(*task_user_tls(next), tpidr_el0); + if (system_supports_tpidr2()) + write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0); } /* From b40c559b45bec736f588c57dd5be967fe573058b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:21 +0100 Subject: [PATCH 017/145] arm64/sme: Implement SVCR context switching In SME the use of both streaming SVE mode and ZA are tracked through PSTATE.SM and PSTATE.ZA, visible through the system register SVCR. In order to context switch the floating point state for SME we need to context switch the contents of this register as part of context switching the floating point state. Since changing the vector length exits streaming SVE mode and disables ZA we also make sure we update SVCR appropriately when setting vector length, and similarly ensure that new threads have streaming SVE mode and ZA disabled. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-14-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 3 ++- arch/arm64/include/asm/processor.h | 1 + arch/arm64/include/asm/thread_info.h | 1 + arch/arm64/kernel/fpsimd.c | 18 +++++++++++++++++- arch/arm64/kernel/process.c | 2 ++ arch/arm64/kvm/fpsimd.c | 7 ++++++- 6 files changed, 29 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 38fd6aab7feb..821d270980da 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -46,7 +46,8 @@ extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, - void *sve_state, unsigned int sve_vl); + void *sve_state, unsigned int sve_vl, + u64 *svcr); extern void fpsimd_flush_task_state(struct task_struct *target); extern void fpsimd_save_and_flush_cpu_state(void); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 849e97d418a8..22cd11e86854 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -169,6 +169,7 @@ struct thread_struct { u64 mte_ctrl; #endif u64 sctlr_user; + u64 svcr; u64 tpidr2_el0; }; diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 4e6b58dcd6f9..848739c15de8 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -82,6 +82,7 @@ int arch_dup_task_struct(struct task_struct *dst, #define TIF_SVE_VL_INHERIT 24 /* Inherit SVE vl_onexec across exec */ #define TIF_SSBD 25 /* Wants SSB mitigation */ #define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ +#define TIF_SME 27 /* SME in use */ #define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 231f2d85b65e..1c113349f6cc 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -121,6 +121,7 @@ struct fpsimd_last_state_struct { struct user_fpsimd_state *st; void *sve_state; + u64 *svcr; unsigned int sve_vl; }; @@ -359,6 +360,9 @@ static void task_fpsimd_load(void) WARN_ON(!system_supports_fpsimd()); WARN_ON(!have_cpu_fpsimd_context()); + if (IS_ENABLED(CONFIG_ARM64_SME) && test_thread_flag(TIF_SME)) + write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0); + if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) { sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1); sve_load_state(sve_pffr(¤t->thread), @@ -390,6 +394,12 @@ static void fpsimd_save(void) if (test_thread_flag(TIF_FOREIGN_FPSTATE)) return; + if (IS_ENABLED(CONFIG_ARM64_SME) && + test_thread_flag(TIF_SME)) { + u64 *svcr = last->svcr; + *svcr = read_sysreg_s(SYS_SVCR_EL0); + } + if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) { if (WARN_ON(sve_get_vl() != last->sve_vl)) { @@ -741,6 +751,10 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) sve_to_fpsimd(task); + if (system_supports_sme() && type == ARM64_VEC_SME) + task->thread.svcr &= ~(SYS_SVCR_EL0_SM_MASK | + SYS_SVCR_EL0_ZA_MASK); + if (task == current) put_cpu_fpsimd_context(); @@ -1404,6 +1418,7 @@ static void fpsimd_bind_task_to_cpu(void) last->st = ¤t->thread.uw.fpsimd_state; last->sve_state = current->thread.sve_state; last->sve_vl = task_get_sve_vl(current); + last->svcr = ¤t->thread.svcr; current->thread.fpsimd_cpu = smp_processor_id(); if (system_supports_sve()) { @@ -1418,7 +1433,7 @@ static void fpsimd_bind_task_to_cpu(void) } void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, - unsigned int sve_vl) + unsigned int sve_vl, u64 *svcr) { struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); @@ -1427,6 +1442,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, WARN_ON(!in_softirq() && !irqs_disabled()); last->st = st; + last->svcr = svcr; last->sve_state = sve_state; last->sve_vl = sve_vl; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index e20571f19718..07f235b46cf5 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -310,6 +310,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) dst->thread.sve_state = NULL; clear_tsk_thread_flag(dst, TIF_SVE); + dst->thread.svcr = 0; + /* clear any pending asynchronous tag fault raised by the parent */ clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 397fdac75cb1..ac09f1f682ff 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -109,9 +109,14 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) WARN_ON_ONCE(!irqs_disabled()); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { + /* + * Currently we do not support SME guests so SVCR is + * always 0 and we just need a variable to point to. + */ fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs, vcpu->arch.sve_state, - vcpu->arch.sve_max_vl); + vcpu->arch.sve_max_vl, + NULL); clear_thread_flag(TIF_FOREIGN_FPSTATE); update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); From af7167d6d2675f3343eff3ad6c9b4a8e30122e2c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:22 +0100 Subject: [PATCH 018/145] arm64/sme: Implement streaming SVE context switching When in streaming mode we need to save and restore the streaming mode SVE register state rather than the regular SVE register state. This uses the streaming mode vector length and omits FFR but is otherwise identical, if TIF_SVE is enabled when we are in streaming mode then streaming mode takes precedence. This does not handle use of streaming SVE state with KVM, ptrace or signals. This will be updated in further patches. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-15-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 22 ++++- arch/arm64/include/asm/fpsimdmacros.h | 11 +++ arch/arm64/include/asm/processor.h | 10 +++ arch/arm64/kernel/entry-fpsimd.S | 5 ++ arch/arm64/kernel/fpsimd.c | 115 ++++++++++++++++++++------ arch/arm64/kvm/fpsimd.c | 2 +- 6 files changed, 139 insertions(+), 26 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 821d270980da..cd94f5c5b516 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -47,11 +47,21 @@ extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, void *sve_state, unsigned int sve_vl, - u64 *svcr); + unsigned int sme_vl, u64 *svcr); extern void fpsimd_flush_task_state(struct task_struct *target); extern void fpsimd_save_and_flush_cpu_state(void); +static inline bool thread_sm_enabled(struct thread_struct *thread) +{ + return system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_SM_MASK); +} + +static inline bool thread_za_enabled(struct thread_struct *thread) +{ + return system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_ZA_MASK); +} + /* Maximum VL that SVE/SME VL-agnostic software can transparently support */ #define VL_ARCH_MAX 0x100 @@ -63,7 +73,14 @@ static inline size_t sve_ffr_offset(int vl) static inline void *sve_pffr(struct thread_struct *thread) { - return (char *)thread->sve_state + sve_ffr_offset(thread_get_sve_vl(thread)); + unsigned int vl; + + if (system_supports_sme() && thread_sm_enabled(thread)) + vl = thread_get_sme_vl(thread); + else + vl = thread_get_sve_vl(thread); + + return (char *)thread->sve_state + sve_ffr_offset(vl); } extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr); @@ -72,6 +89,7 @@ extern void sve_load_state(void const *state, u32 const *pfpsr, extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); extern void sve_set_vq(unsigned long vq_minus_1); +extern void sme_set_vq(unsigned long vq_minus_1); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 2e9a33155081..f6ab36e0cd8d 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -262,6 +262,17 @@ 921: .endm +/* Update SMCR_EL1.LEN with the new VQ */ +.macro sme_load_vq xvqminus1, xtmp, xtmp2 + mrs_s \xtmp, SYS_SMCR_EL1 + bic \xtmp2, \xtmp, SMCR_ELx_LEN_MASK + orr \xtmp2, \xtmp2, \xvqminus1 + cmp \xtmp2, \xtmp + b.eq 921f + msr_s SYS_SMCR_EL1, \xtmp2 //self-synchronising +921: +.endm + /* Preserve the first 128-bits of Znz and zero the rest. */ .macro _sve_flush_z nz _sve_check_zreg \nz diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 22cd11e86854..7542310b4e6b 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -184,6 +184,11 @@ static inline unsigned int thread_get_sve_vl(struct thread_struct *thread) return thread_get_vl(thread, ARM64_VEC_SVE); } +static inline unsigned int thread_get_sme_vl(struct thread_struct *thread) +{ + return thread_get_vl(thread, ARM64_VEC_SME); +} + unsigned int task_get_vl(const struct task_struct *task, enum vec_type type); void task_set_vl(struct task_struct *task, enum vec_type type, unsigned long vl); @@ -197,6 +202,11 @@ static inline unsigned int task_get_sve_vl(const struct task_struct *task) return task_get_vl(task, ARM64_VEC_SVE); } +static inline unsigned int task_get_sme_vl(const struct task_struct *task) +{ + return task_get_vl(task, ARM64_VEC_SME); +} + static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl) { task_set_vl(task, ARM64_VEC_SVE, vl); diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index deee5f01462e..6f88c0f86d50 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -94,4 +94,9 @@ SYM_FUNC_START(sme_get_vl) ret SYM_FUNC_END(sme_get_vl) +SYM_FUNC_START(sme_set_vq) + sme_load_vq x0, x1, x2 + ret +SYM_FUNC_END(sme_set_vq) + #endif /* CONFIG_ARM64_SME */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 1c113349f6cc..f8506a875eb2 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -123,6 +123,7 @@ struct fpsimd_last_state_struct { void *sve_state; u64 *svcr; unsigned int sve_vl; + unsigned int sme_vl; }; static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); @@ -301,17 +302,28 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, task->thread.vl_onexec[type] = vl; } +/* + * TIF_SME controls whether a task can use SME without trapping while + * in userspace, when TIF_SME is set then we must have storage + * alocated in sve_state and za_state to store the contents of both ZA + * and the SVE registers for both streaming and non-streaming modes. + * + * If both SVCR.ZA and SVCR.SM are disabled then at any point we + * may disable TIF_SME and reenable traps. + */ + + /* * TIF_SVE controls whether a task can use SVE without trapping while - * in userspace, and also the way a task's FPSIMD/SVE state is stored - * in thread_struct. + * in userspace, and also (together with TIF_SME) the way a task's + * FPSIMD/SVE state is stored in thread_struct. * * The kernel uses this flag to track whether a user task is actively * using SVE, and therefore whether full SVE register state needs to * be tracked. If not, the cheaper FPSIMD context handling code can * be used instead of the more costly SVE equivalents. * - * * TIF_SVE set: + * * TIF_SVE or SVCR.SM set: * * The task can execute SVE instructions while in userspace without * trapping to the kernel. @@ -319,7 +331,8 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the * corresponding Zn), P0-P15 and FFR are encoded in in * task->thread.sve_state, formatted appropriately for vector - * length task->thread.sve_vl. + * length task->thread.sve_vl or, if SVCR.SM is set, + * task->thread.sme_vl. * * task->thread.sve_state must point to a valid buffer at least * sve_state_size(task) bytes in size. @@ -357,19 +370,40 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, */ static void task_fpsimd_load(void) { + bool restore_sve_regs = false; + bool restore_ffr; + WARN_ON(!system_supports_fpsimd()); WARN_ON(!have_cpu_fpsimd_context()); - if (IS_ENABLED(CONFIG_ARM64_SME) && test_thread_flag(TIF_SME)) - write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0); - + /* Check if we should restore SVE first */ if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) { sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1); - sve_load_state(sve_pffr(¤t->thread), - ¤t->thread.uw.fpsimd_state.fpsr, true); - } else { - fpsimd_load_state(¤t->thread.uw.fpsimd_state); + restore_sve_regs = true; + restore_ffr = true; } + + /* Restore SME, override SVE register configuration if needed */ + if (system_supports_sme()) { + unsigned long sme_vl = task_get_sme_vl(current); + + if (test_thread_flag(TIF_SME)) + sme_set_vq(sve_vq_from_vl(sme_vl) - 1); + + write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0); + + if (thread_sm_enabled(¤t->thread)) { + restore_sve_regs = true; + restore_ffr = system_supports_fa64(); + } + } + + if (restore_sve_regs) + sve_load_state(sve_pffr(¤t->thread), + ¤t->thread.uw.fpsimd_state.fpsr, + restore_ffr); + else + fpsimd_load_state(¤t->thread.uw.fpsimd_state); } /* @@ -387,6 +421,9 @@ static void fpsimd_save(void) struct fpsimd_last_state_struct const *last = this_cpu_ptr(&fpsimd_last_state); /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ + bool save_sve_regs = false; + bool save_ffr; + unsigned int vl; WARN_ON(!system_supports_fpsimd()); WARN_ON(!have_cpu_fpsimd_context()); @@ -394,15 +431,33 @@ static void fpsimd_save(void) if (test_thread_flag(TIF_FOREIGN_FPSTATE)) return; - if (IS_ENABLED(CONFIG_ARM64_SME) && - test_thread_flag(TIF_SME)) { - u64 *svcr = last->svcr; - *svcr = read_sysreg_s(SYS_SVCR_EL0); + if (test_thread_flag(TIF_SVE)) { + save_sve_regs = true; + save_ffr = true; + vl = last->sve_vl; } - if (IS_ENABLED(CONFIG_ARM64_SVE) && - test_thread_flag(TIF_SVE)) { - if (WARN_ON(sve_get_vl() != last->sve_vl)) { + if (system_supports_sme()) { + u64 *svcr = last->svcr; + *svcr = read_sysreg_s(SYS_SVCR_EL0); + + if (thread_za_enabled(¤t->thread)) { + /* ZA state managment is not implemented yet */ + force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); + return; + } + + /* If we are in streaming mode override regular SVE. */ + if (*svcr & SYS_SVCR_EL0_SM_MASK) { + save_sve_regs = true; + save_ffr = system_supports_fa64(); + vl = last->sme_vl; + } + } + + if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) { + /* Get the configured VL from RDVL, will account for SM */ + if (WARN_ON(sve_get_vl() != vl)) { /* * Can't save the user regs, so current would * re-enter user with corrupt state. @@ -413,8 +468,8 @@ static void fpsimd_save(void) } sve_save_state((char *)last->sve_state + - sve_ffr_offset(last->sve_vl), - &last->st->fpsr, true); + sve_ffr_offset(vl), + &last->st->fpsr, save_ffr); } else { fpsimd_save_state(last->st); } @@ -619,7 +674,14 @@ static void sve_to_fpsimd(struct task_struct *task) */ static size_t sve_state_size(struct task_struct const *task) { - return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task_get_sve_vl(task))); + unsigned int vl = 0; + + if (system_supports_sve()) + vl = task_get_sve_vl(task); + if (system_supports_sme()) + vl = max(vl, task_get_sme_vl(task)); + + return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)); } /* @@ -748,7 +810,8 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, } fpsimd_flush_task_state(task); - if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) + if (test_and_clear_tsk_thread_flag(task, TIF_SVE) || + thread_sm_enabled(&task->thread)) sve_to_fpsimd(task); if (system_supports_sme() && type == ARM64_VEC_SME) @@ -1375,6 +1438,9 @@ void fpsimd_flush_thread(void) fpsimd_flush_thread_vl(ARM64_VEC_SVE); } + if (system_supports_sme()) + fpsimd_flush_thread_vl(ARM64_VEC_SME); + put_cpu_fpsimd_context(); } @@ -1418,6 +1484,7 @@ static void fpsimd_bind_task_to_cpu(void) last->st = ¤t->thread.uw.fpsimd_state; last->sve_state = current->thread.sve_state; last->sve_vl = task_get_sve_vl(current); + last->sme_vl = task_get_sme_vl(current); last->svcr = ¤t->thread.svcr; current->thread.fpsimd_cpu = smp_processor_id(); @@ -1433,7 +1500,8 @@ static void fpsimd_bind_task_to_cpu(void) } void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, - unsigned int sve_vl, u64 *svcr) + unsigned int sve_vl, unsigned int sme_vl, + u64 *svcr) { struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); @@ -1445,6 +1513,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, last->svcr = svcr; last->sve_state = sve_state; last->sve_vl = sve_vl; + last->sme_vl = sme_vl; } /* diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index ac09f1f682ff..394e583bb73e 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -116,7 +116,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs, vcpu->arch.sve_state, vcpu->arch.sve_max_vl, - NULL); + 0, NULL); clear_thread_flag(TIF_FOREIGN_FPSTATE); update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); From 0033cd9339642f9b7bef23f96aa2e7277ab51cce Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:23 +0100 Subject: [PATCH 019/145] arm64/sme: Implement ZA context switching Allocate space for storing ZA on first access to SME and use that to save and restore ZA state when context switching. We do this by using the vector form of the LDR and STR ZA instructions, these do not require streaming mode and have implementation recommendations that they avoid contention issues in shared SMCU implementations. Since ZA is architecturally guaranteed to be zeroed when enabled we do not need to explicitly zero ZA, either we will be restoring from a saved copy or trapping on first use of SME so we know that ZA must be disabled. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-16-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 5 ++++- arch/arm64/include/asm/fpsimdmacros.h | 22 ++++++++++++++++++++++ arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/include/asm/processor.h | 1 + arch/arm64/kernel/entry-fpsimd.S | 22 ++++++++++++++++++++++ arch/arm64/kernel/fpsimd.c | 20 +++++++++++++------- arch/arm64/kvm/fpsimd.c | 2 +- 7 files changed, 66 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index cd94f5c5b516..1a709c03bb6c 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -47,7 +47,8 @@ extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, void *sve_state, unsigned int sve_vl, - unsigned int sme_vl, u64 *svcr); + void *za_state, unsigned int sme_vl, + u64 *svcr); extern void fpsimd_flush_task_state(struct task_struct *target); extern void fpsimd_save_and_flush_cpu_state(void); @@ -90,6 +91,8 @@ extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); extern void sve_set_vq(unsigned long vq_minus_1); extern void sme_set_vq(unsigned long vq_minus_1); +extern void za_save_state(void *state); +extern void za_load_state(void const *state); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index f6ab36e0cd8d..5e0910cf4832 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -319,3 +319,25 @@ ldr w\nxtmp, [\xpfpsr, #4] msr fpcr, x\nxtmp .endm + +.macro sme_save_za nxbase, xvl, nw + mov w\nw, #0 + +423: + _sme_str_zav \nw, \nxbase + add x\nxbase, x\nxbase, \xvl + add x\nw, x\nw, #1 + cmp \xvl, x\nw + bne 423b +.endm + +.macro sme_load_za nxbase, xvl, nw + mov w\nw, #0 + +423: + _sme_ldr_zav \nw, \nxbase + add x\nxbase, x\nxbase, \xvl + add x\nw, x\nw, #1 + cmp \xvl, x\nw + bne 423b +.endm diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 94a27a7520f4..8a7c442d5b57 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -295,8 +295,11 @@ struct vcpu_reset_state { struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; + + /* Guest floating point state */ void *sve_state; unsigned int sve_max_vl; + u64 svcr; /* Stage 2 paging state used by the hardware on next switch */ struct kvm_s2_mmu *hw_mmu; diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 7542310b4e6b..6a3a6c3dec90 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -154,6 +154,7 @@ struct thread_struct { unsigned int fpsimd_cpu; void *sve_state; /* SVE registers, if any */ + void *za_state; /* ZA register, if any */ unsigned int vl[ARM64_VEC_MAX]; /* vector length */ unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */ unsigned long fault_address; /* fault info */ diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 6f88c0f86d50..229436f33df5 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -99,4 +99,26 @@ SYM_FUNC_START(sme_set_vq) ret SYM_FUNC_END(sme_set_vq) +/* + * Save the SME state + * + * x0 - pointer to buffer for state + */ +SYM_FUNC_START(za_save_state) + _sme_rdsvl 1, 1 // x1 = VL/8 + sme_save_za 0, x1, 12 + ret +SYM_FUNC_END(za_save_state) + +/* + * Load the SME state + * + * x0 - pointer to buffer for state + */ +SYM_FUNC_START(za_load_state) + _sme_rdsvl 1, 1 // x1 = VL/8 + sme_load_za 0, x1, 12 + ret +SYM_FUNC_END(za_load_state) + #endif /* CONFIG_ARM64_SME */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index f8506a875eb2..dc38f3f2a28a 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -121,6 +121,7 @@ struct fpsimd_last_state_struct { struct user_fpsimd_state *st; void *sve_state; + void *za_state; u64 *svcr; unsigned int sve_vl; unsigned int sme_vl; @@ -387,11 +388,15 @@ static void task_fpsimd_load(void) if (system_supports_sme()) { unsigned long sme_vl = task_get_sme_vl(current); + /* Ensure VL is set up for restoring data */ if (test_thread_flag(TIF_SME)) sme_set_vq(sve_vq_from_vl(sme_vl) - 1); write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0); + if (thread_za_enabled(¤t->thread)) + za_load_state(current->thread.za_state); + if (thread_sm_enabled(¤t->thread)) { restore_sve_regs = true; restore_ffr = system_supports_fa64(); @@ -441,11 +446,10 @@ static void fpsimd_save(void) u64 *svcr = last->svcr; *svcr = read_sysreg_s(SYS_SVCR_EL0); - if (thread_za_enabled(¤t->thread)) { - /* ZA state managment is not implemented yet */ - force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); - return; - } + *svcr = read_sysreg_s(SYS_SVCR_EL0); + + if (*svcr & SYS_SVCR_EL0_ZA_MASK) + za_save_state(last->za_state); /* If we are in streaming mode override regular SVE. */ if (*svcr & SYS_SVCR_EL0_SM_MASK) { @@ -1483,6 +1487,7 @@ static void fpsimd_bind_task_to_cpu(void) WARN_ON(!system_supports_fpsimd()); last->st = ¤t->thread.uw.fpsimd_state; last->sve_state = current->thread.sve_state; + last->za_state = current->thread.za_state; last->sve_vl = task_get_sve_vl(current); last->sme_vl = task_get_sme_vl(current); last->svcr = ¤t->thread.svcr; @@ -1500,8 +1505,8 @@ static void fpsimd_bind_task_to_cpu(void) } void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, - unsigned int sve_vl, unsigned int sme_vl, - u64 *svcr) + unsigned int sve_vl, void *za_state, + unsigned int sme_vl, u64 *svcr) { struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); @@ -1512,6 +1517,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, last->st = st; last->svcr = svcr; last->sve_state = sve_state; + last->za_state = za_state; last->sve_vl = sve_vl; last->sme_vl = sme_vl; } diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 394e583bb73e..57d7ac3cfa0c 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -116,7 +116,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs, vcpu->arch.sve_state, vcpu->arch.sve_max_vl, - 0, NULL); + NULL, 0, &vcpu->arch.svcr); clear_thread_flag(TIF_FOREIGN_FPSTATE); update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); From 8bd7f91c03d886f41d35f6108078d20be5a4a1bd Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:24 +0100 Subject: [PATCH 020/145] arm64/sme: Implement traps and syscall handling for SME By default all SME operations in userspace will trap. When this happens we allocate storage space for the SME register state, set up the SVE registers and disable traps. We do not need to initialize ZA since the architecture guarantees that it will be zeroed when enabled and when we trap ZA is disabled. On syscall we exit streaming mode if we were previously in it and ensure that all but the lower 128 bits of the registers are zeroed while preserving the state of ZA. This follows the aarch64 PCS for SME, ZA state is preserved over a function call and streaming mode is exited. Since the traps for SME do not distinguish between streaming mode SVE and ZA usage if ZA is in use rather than reenabling traps we instead zero the parts of the SVE registers not shared with FPSIMD and leave SME enabled, this simplifies handling SME traps. If ZA is not in use then we reenable SME traps and fall through to normal handling of SVE. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-17-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/esr.h | 1 + arch/arm64/include/asm/exception.h | 1 + arch/arm64/include/asm/fpsimd.h | 39 +++++++ arch/arm64/kernel/entry-common.c | 11 ++ arch/arm64/kernel/fpsimd.c | 167 +++++++++++++++++++++++++---- arch/arm64/kernel/process.c | 30 +++++- arch/arm64/kernel/syscall.c | 29 ++++- 7 files changed, 255 insertions(+), 23 deletions(-) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 43872e0cfd1e..0467837fd66b 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -76,6 +76,7 @@ #define ESR_ELx_IL_SHIFT (25) #define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT) #define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) +#define ESR_ELx_ISS(esr) ((esr) & ESR_ELx_ISS_MASK) /* ISS field definitions shared by different classes */ #define ESR_ELx_WNR_SHIFT (6) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 339477dca551..2add7f33b7c2 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -64,6 +64,7 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, struct pt_regs *regs); void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); void do_sve_acc(unsigned int esr, struct pt_regs *regs); +void do_sme_acc(unsigned int esr, struct pt_regs *regs); void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs); void do_sysinstr(unsigned int esr, struct pt_regs *regs); void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 1a709c03bb6c..6c33bc832ed4 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -239,6 +239,8 @@ static inline bool sve_vq_available(unsigned int vq) return vq_available(ARM64_VEC_SVE, vq); } +size_t sve_state_size(struct task_struct const *task); + #else /* ! CONFIG_ARM64_SVE */ static inline void sve_alloc(struct task_struct *task) { } @@ -278,10 +280,25 @@ static inline void vec_update_vq_map(enum vec_type t) { } static inline int vec_verify_vq_map(enum vec_type t) { return 0; } static inline void sve_setup(void) { } +static inline size_t sve_state_size(struct task_struct const *task) +{ + return 0; +} + #endif /* ! CONFIG_ARM64_SVE */ #ifdef CONFIG_ARM64_SME +static inline void sme_user_disable(void) +{ + sysreg_clear_set(cpacr_el1, CPACR_EL1_SMEN_EL0EN, 0); +} + +static inline void sme_user_enable(void) +{ + sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_SMEN_EL0EN); +} + static inline void sme_smstart_sm(void) { asm volatile(__msr_s(SYS_SVCR_SMSTART_SM_EL0, "xzr")); @@ -309,16 +326,33 @@ static inline int sme_max_virtualisable_vl(void) return vec_max_virtualisable_vl(ARM64_VEC_SME); } +extern void sme_alloc(struct task_struct *task); extern unsigned int sme_get_vl(void); extern int sme_set_current_vl(unsigned long arg); extern int sme_get_current_vl(void); +/* + * Return how many bytes of memory are required to store the full SME + * specific state (currently just ZA) for task, given task's currently + * configured vector length. + */ +static inline size_t za_state_size(struct task_struct const *task) +{ + unsigned int vl = task_get_sme_vl(task); + + return ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); +} + #else +static inline void sme_user_disable(void) { BUILD_BUG(); } +static inline void sme_user_enable(void) { BUILD_BUG(); } + static inline void sme_smstart_sm(void) { } static inline void sme_smstop_sm(void) { } static inline void sme_smstop(void) { } +static inline void sme_alloc(struct task_struct *task) { } static inline void sme_setup(void) { } static inline unsigned int sme_get_vl(void) { return 0; } static inline int sme_max_vl(void) { return 0; } @@ -326,6 +360,11 @@ static inline int sme_max_virtualisable_vl(void) { return 0; } static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; } static inline int sme_get_current_vl(void) { return -EINVAL; } +static inline size_t za_state_size(struct task_struct const *task) +{ + return 0; +} + #endif /* ! CONFIG_ARM64_SME */ /* For use by EFI runtime services calls only */ diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 878c65aa7206..29139e9a1517 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -537,6 +537,14 @@ static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr) exit_to_user_mode(regs); } +static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr) +{ + enter_from_user_mode(regs); + local_daif_restore(DAIF_PROCCTX); + do_sme_acc(esr, regs); + exit_to_user_mode(regs); +} + static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) { enter_from_user_mode(regs); @@ -645,6 +653,9 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) case ESR_ELx_EC_SVE: el0_sve_acc(regs, esr); break; + case ESR_ELx_EC_SME: + el0_sme_acc(regs, esr); + break; case ESR_ELx_EC_FP_EXC64: el0_fpsimd_exc(regs, esr); break; diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index dc38f3f2a28a..00a0cbd01ce5 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -209,6 +209,12 @@ static void set_sme_default_vl(int val) set_default_vl(ARM64_VEC_SME, val); } +static void sme_free(struct task_struct *); + +#else + +static inline void sme_free(struct task_struct *t) { } + #endif DEFINE_PER_CPU(bool, fpsimd_context_busy); @@ -676,7 +682,7 @@ static void sve_to_fpsimd(struct task_struct *task) * Return how many bytes of memory are required to store the full SVE * state for task, given task's currently configured vector length. */ -static size_t sve_state_size(struct task_struct const *task) +size_t sve_state_size(struct task_struct const *task) { unsigned int vl = 0; @@ -818,18 +824,22 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, thread_sm_enabled(&task->thread)) sve_to_fpsimd(task); - if (system_supports_sme() && type == ARM64_VEC_SME) + if (system_supports_sme() && type == ARM64_VEC_SME) { task->thread.svcr &= ~(SYS_SVCR_EL0_SM_MASK | SYS_SVCR_EL0_ZA_MASK); + clear_thread_flag(TIF_SME); + } if (task == current) put_cpu_fpsimd_context(); /* - * Force reallocation of task SVE state to the correct size - * on next use: + * Force reallocation of task SVE and SME state to the correct + * size on next use: */ sve_free(task); + if (system_supports_sme() && type == ARM64_VEC_SME) + sme_free(task); task_set_vl(task, type, vl); @@ -1164,12 +1174,43 @@ void __init sve_setup(void) void fpsimd_release_task(struct task_struct *dead_task) { __sve_free(dead_task); + sme_free(dead_task); } #endif /* CONFIG_ARM64_SVE */ #ifdef CONFIG_ARM64_SME +/* This will move to uapi/asm/sigcontext.h when signals are implemented */ +#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) + +/* + * Ensure that task->thread.za_state is allocated and sufficiently large. + * + * This function should be used only in preparation for replacing + * task->thread.za_state with new data. The memory is always zeroed + * here to prevent stale data from showing through: this is done in + * the interest of testability and predictability, the architecture + * guarantees that when ZA is enabled it will be zeroed. + */ +void sme_alloc(struct task_struct *task) +{ + if (task->thread.za_state) { + memset(task->thread.za_state, 0, za_state_size(task)); + return; + } + + /* This could potentially be up to 64K. */ + task->thread.za_state = + kzalloc(za_state_size(task), GFP_KERNEL); +} + +static void sme_free(struct task_struct *task) +{ + kfree(task->thread.za_state); + task->thread.za_state = NULL; +} + void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) { /* Set priority for all PEs to architecturally defined minimum */ @@ -1279,6 +1320,29 @@ void __init sme_setup(void) #endif /* CONFIG_ARM64_SME */ +static void sve_init_regs(void) +{ + /* + * Convert the FPSIMD state to SVE, zeroing all the state that + * is not shared with FPSIMD. If (as is likely) the current + * state is live in the registers then do this there and + * update our metadata for the current task including + * disabling the trap, otherwise update our in-memory copy. + * We are guaranteed to not be in streaming mode, we can only + * take a SVE trap when not in streaming mode and we can't be + * in streaming mode when taking a SME trap. + */ + if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { + unsigned long vq_minus_one = + sve_vq_from_vl(task_get_sve_vl(current)) - 1; + sve_set_vq(vq_minus_one); + sve_flush_live(true, vq_minus_one); + fpsimd_bind_task_to_cpu(); + } else { + fpsimd_to_sve(current); + } +} + /* * Trapped SVE access * @@ -1310,22 +1374,77 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs) WARN_ON(1); /* SVE access shouldn't have trapped */ /* - * Convert the FPSIMD state to SVE, zeroing all the state that - * is not shared with FPSIMD. If (as is likely) the current - * state is live in the registers then do this there and - * update our metadata for the current task including - * disabling the trap, otherwise update our in-memory copy. + * Even if the task can have used streaming mode we can only + * generate SVE access traps in normal SVE mode and + * transitioning out of streaming mode may discard any + * streaming mode state. Always clear the high bits to avoid + * any potential errors tracking what is properly initialised. */ + sve_init_regs(); + + put_cpu_fpsimd_context(); +} + +/* + * Trapped SME access + * + * Storage is allocated for the full SVE and SME state, the current + * FPSIMD register contents are migrated to SVE if SVE is not already + * active, and the access trap is disabled. + * + * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state() + * would have disabled the SME access trap for userspace during + * ret_to_user, making an SVE access trap impossible in that case. + */ +void do_sme_acc(unsigned int esr, struct pt_regs *regs) +{ + /* Even if we chose not to use SME, the hardware could still trap: */ + if (unlikely(!system_supports_sme()) || WARN_ON(is_compat_task())) { + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + return; + } + + /* + * If this not a trap due to SME being disabled then something + * is being used in the wrong mode, report as SIGILL. + */ + if (ESR_ELx_ISS(esr) != ESR_ELx_SME_ISS_SME_DISABLED) { + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + return; + } + + sve_alloc(current); + sme_alloc(current); + if (!current->thread.sve_state || !current->thread.za_state) { + force_sig(SIGKILL); + return; + } + + get_cpu_fpsimd_context(); + + /* With TIF_SME userspace shouldn't generate any traps */ + if (test_and_set_thread_flag(TIF_SME)) + WARN_ON(1); + if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { unsigned long vq_minus_one = - sve_vq_from_vl(task_get_sve_vl(current)) - 1; - sve_set_vq(vq_minus_one); - sve_flush_live(true, vq_minus_one); + sve_vq_from_vl(task_get_sme_vl(current)) - 1; + sme_set_vq(vq_minus_one); + fpsimd_bind_task_to_cpu(); - } else { - fpsimd_to_sve(current); } + /* + * If SVE was not already active initialise the SVE registers, + * any non-shared state between the streaming and regular SVE + * registers is architecturally guaranteed to be zeroed when + * we enter streaming mode. We do not need to initialize ZA + * since ZA must be disabled at this point and enabling ZA is + * architecturally defined to zero ZA. + */ + if (system_supports_sve() && !test_thread_flag(TIF_SVE)) + sve_init_regs(); + put_cpu_fpsimd_context(); } @@ -1442,8 +1561,12 @@ void fpsimd_flush_thread(void) fpsimd_flush_thread_vl(ARM64_VEC_SVE); } - if (system_supports_sme()) + if (system_supports_sme()) { + clear_thread_flag(TIF_SME); + sme_free(current); fpsimd_flush_thread_vl(ARM64_VEC_SME); + current->thread.svcr = 0; + } put_cpu_fpsimd_context(); } @@ -1493,14 +1616,22 @@ static void fpsimd_bind_task_to_cpu(void) last->svcr = ¤t->thread.svcr; current->thread.fpsimd_cpu = smp_processor_id(); + /* + * Toggle SVE and SME trapping for userspace if needed, these + * are serialsied by ret_to_user(). + */ + if (system_supports_sme()) { + if (test_thread_flag(TIF_SME)) + sme_user_enable(); + else + sme_user_disable(); + } + if (system_supports_sve()) { - /* Toggle SVE trapping for userspace if needed */ if (test_thread_flag(TIF_SVE)) sve_user_enable(); else sve_user_disable(); - - /* Serialised by exception return to user */ } } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 07f235b46cf5..99c293513817 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -300,17 +300,41 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) /* * Detach src's sve_state (if any) from dst so that it does not - * get erroneously used or freed prematurely. dst's sve_state + * get erroneously used or freed prematurely. dst's copies * will be allocated on demand later on if dst uses SVE. * For consistency, also clear TIF_SVE here: this could be done * later in copy_process(), but to avoid tripping up future - * maintainers it is best not to leave TIF_SVE and sve_state in + * maintainers it is best not to leave TIF flags and buffers in * an inconsistent state, even temporarily. */ dst->thread.sve_state = NULL; clear_tsk_thread_flag(dst, TIF_SVE); - dst->thread.svcr = 0; + /* + * In the unlikely event that we create a new thread with ZA + * enabled we should retain the ZA state so duplicate it here. + * This may be shortly freed if we exec() or if CLONE_SETTLS + * but it's simpler to do it here. To avoid confusing the rest + * of the code ensure that we have a sve_state allocated + * whenever za_state is allocated. + */ + if (thread_za_enabled(&src->thread)) { + dst->thread.sve_state = kzalloc(sve_state_size(src), + GFP_KERNEL); + if (!dst->thread.za_state) + return -ENOMEM; + dst->thread.za_state = kmemdup(src->thread.za_state, + za_state_size(src), + GFP_KERNEL); + if (!dst->thread.za_state) { + kfree(dst->thread.sve_state); + dst->thread.sve_state = NULL; + return -ENOMEM; + } + } else { + dst->thread.za_state = NULL; + clear_tsk_thread_flag(dst, TIF_SME); + } /* clear any pending asynchronous tag fault raised by the parent */ clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index c938603b3ba0..92c69e5ac269 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -158,11 +158,36 @@ trace_exit: syscall_trace_exit(regs); } -static inline void sve_user_discard(void) +/* + * As per the ABI exit SME streaming mode and clear the SVE state not + * shared with FPSIMD on syscall entry. + */ +static inline void fp_user_discard(void) { + /* + * If SME is active then exit streaming mode. If ZA is active + * then flush the SVE registers but leave userspace access to + * both SVE and SME enabled, otherwise disable SME for the + * task and fall through to disabling SVE too. This means + * that after a syscall we never have any streaming mode + * register state to track, if this changes the KVM code will + * need updating. + */ + if (system_supports_sme() && test_thread_flag(TIF_SME)) { + u64 svcr = read_sysreg_s(SYS_SVCR_EL0); + + if (svcr & SYS_SVCR_EL0_SM_MASK) + sme_smstop_sm(); + } + if (!system_supports_sve()) return; + /* + * If SME is not active then disable SVE, the registers will + * be cleared when userspace next attempts to access them and + * we do not need to track the SVE register state until then. + */ clear_thread_flag(TIF_SVE); /* @@ -177,7 +202,7 @@ static inline void sve_user_discard(void) void do_el0_svc(struct pt_regs *regs) { - sve_user_discard(); + fp_user_discard(); el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table); } From 40a8e87bb32855b39839d35b5b5b125494b3a604 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:25 +0100 Subject: [PATCH 021/145] arm64/sme: Disable ZA and streaming mode when handling signals The ABI requires that streaming mode and ZA are disabled when invoking signal handlers, do this in setup_return() when we prepare the task state for the signal handler. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-18-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/signal.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 4a4122ef6f39..42efa464e46e 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -759,6 +759,13 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, /* TCO (Tag Check Override) always cleared for signal handlers */ regs->pstate &= ~PSR_TCO_BIT; + /* Signal handlers are invoked with ZA and streaming mode disabled */ + if (system_supports_sme()) { + current->thread.svcr &= ~(SYS_SVCR_EL0_ZA_MASK | + SYS_SVCR_EL0_SM_MASK); + sme_smstop(); + } + if (ka->sa.sa_flags & SA_RESTORER) sigtramp = ka->sa.sa_restorer; else From 85ed24dad2904f7c141911d91b7807ab02694b5e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:26 +0100 Subject: [PATCH 022/145] arm64/sme: Implement streaming SVE signal handling When in streaming mode we have the same set of SVE registers as we do in regular SVE mode with the exception of FFR and the use of the SME vector length. Provide signal handling for these registers by taking one of the reserved words in the SVE signal context as a flags field and defining a flag which is set for streaming mode. When the flag is set the vector length is set to the streaming mode vector length and we save and restore streaming mode data. We support entering or leaving streaming mode based on the value of the flag but do not support changing the vector length, this is not currently supported SVE signal handling. We could instead allocate a separate record in the signal frame for the streaming mode SVE context but this inflates the size of the maximal signal frame required and adds complication when validating signal frames from userspace, especially given the current structure of the code. Any implementation of support for streaming mode vectors in signals will have some potential for causing issues for applications that attempt to handle SVE vectors in signals, use streaming mode but do not understand streaming mode in their signal handling code, it is hard to identify a case that is clearly better than any other - they all have cases where they could cause unexpected register corruption or faults. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-19-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/processor.h | 8 +++++ arch/arm64/include/uapi/asm/sigcontext.h | 16 +++++++-- arch/arm64/kernel/signal.c | 42 ++++++++++++++++++------ 3 files changed, 53 insertions(+), 13 deletions(-) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 6a3a6c3dec90..1d2ca4870b84 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -190,6 +190,14 @@ static inline unsigned int thread_get_sme_vl(struct thread_struct *thread) return thread_get_vl(thread, ARM64_VEC_SME); } +static inline unsigned int thread_get_cur_vl(struct thread_struct *thread) +{ + if (system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_SM_MASK)) + return thread_get_sme_vl(thread); + else + return thread_get_sve_vl(thread); +} + unsigned int task_get_vl(const struct task_struct *task, enum vec_type type); void task_set_vl(struct task_struct *task, enum vec_type type, unsigned long vl); diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 0c796c795dbe..57e9f8c3ee9e 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -134,9 +134,12 @@ struct extra_context { struct sve_context { struct _aarch64_ctx head; __u16 vl; - __u16 __reserved[3]; + __u16 flags; + __u16 __reserved[2]; }; +#define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */ + #endif /* !__ASSEMBLY__ */ #include @@ -186,9 +189,16 @@ struct sve_context { * sve_context.vl must equal the thread's current vector length when * doing a sigreturn. * + * On systems with support for SME the SVE register state may reflect either + * streaming or non-streaming mode. In streaming mode the streaming mode + * vector length will be used and the flag SVE_SIG_FLAG_SM will be set in + * the flags field. It is permitted to enter or leave streaming mode in + * a signal return, applications should take care to ensure that any difference + * in vector length between the two modes is handled, including any resizing + * and movement of context blocks. * - * Note: for all these macros, the "vq" argument denotes the SVE - * vector length in quadwords (i.e., units of 128 bits). + * Note: for all these macros, the "vq" argument denotes the vector length + * in quadwords (i.e., units of 128 bits). * * The correct way to obtain vq is to use sve_vq_from_vl(vl). The * result is valid if and only if sve_vl_valid(vl) is true. This is diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 42efa464e46e..0ddce6afd2a3 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -226,11 +226,17 @@ static int preserve_sve_context(struct sve_context __user *ctx) { int err = 0; u16 reserved[ARRAY_SIZE(ctx->__reserved)]; + u16 flags = 0; unsigned int vl = task_get_sve_vl(current); unsigned int vq = 0; - if (test_thread_flag(TIF_SVE)) + if (thread_sm_enabled(¤t->thread)) { + vl = task_get_sme_vl(current); vq = sve_vq_from_vl(vl); + flags |= SVE_SIG_FLAG_SM; + } else if (test_thread_flag(TIF_SVE)) { + vq = sve_vq_from_vl(vl); + } memset(reserved, 0, sizeof(reserved)); @@ -238,6 +244,7 @@ static int preserve_sve_context(struct sve_context __user *ctx) __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), &ctx->head.size, err); __put_user_error(vl, &ctx->vl, err); + __put_user_error(flags, &ctx->flags, err); BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); @@ -258,18 +265,28 @@ static int preserve_sve_context(struct sve_context __user *ctx) static int restore_sve_fpsimd_context(struct user_ctxs *user) { int err; - unsigned int vq; + unsigned int vl, vq; struct user_fpsimd_state fpsimd; struct sve_context sve; if (__copy_from_user(&sve, user->sve, sizeof(sve))) return -EFAULT; - if (sve.vl != task_get_sve_vl(current)) + if (sve.flags & SVE_SIG_FLAG_SM) { + if (!system_supports_sme()) + return -EINVAL; + + vl = task_get_sme_vl(current); + } else { + vl = task_get_sve_vl(current); + } + + if (sve.vl != vl) return -EINVAL; if (sve.head.size <= sizeof(*user->sve)) { clear_thread_flag(TIF_SVE); + current->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK; goto fpsimd_only; } @@ -301,7 +318,10 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) if (err) return -EFAULT; - set_thread_flag(TIF_SVE); + if (sve.flags & SVE_SIG_FLAG_SM) + current->thread.svcr |= SYS_SVCR_EL0_SM_MASK; + else + set_thread_flag(TIF_SVE); fpsimd_only: /* copy the FP and status/control registers */ @@ -393,7 +413,7 @@ static int parse_user_sigframe(struct user_ctxs *user, break; case SVE_MAGIC: - if (!system_supports_sve()) + if (!system_supports_sve() && !system_supports_sme()) goto invalid; if (user->sve) @@ -594,11 +614,12 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, if (system_supports_sve()) { unsigned int vq = 0; - if (add_all || test_thread_flag(TIF_SVE)) { - int vl = sve_max_vl(); + if (add_all || test_thread_flag(TIF_SVE) || + thread_sm_enabled(¤t->thread)) { + int vl = max(sve_max_vl(), sme_max_vl()); if (!add_all) - vl = task_get_sve_vl(current); + vl = thread_get_cur_vl(¤t->thread); vq = sve_vq_from_vl(vl); } @@ -649,8 +670,9 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); } - /* Scalable Vector Extension state, if present */ - if (system_supports_sve() && err == 0 && user->sve_offset) { + /* Scalable Vector Extension state (including streaming), if present */ + if ((system_supports_sve() || system_supports_sme()) && + err == 0 && user->sve_offset) { struct sve_context __user *sve_ctx = apply_user_offset(user, user->sve_offset); err |= preserve_sve_context(sve_ctx); From 39782210eb7e87634d96cacb6ece370bc59d74ba Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:27 +0100 Subject: [PATCH 023/145] arm64/sme: Implement ZA signal handling Implement support for ZA in signal handling in a very similar way to how we implement support for SVE registers, using a signal context structure with optional register state after it. Where present this register state stores the ZA matrix as a series of horizontal vectors numbered from 0 to VL/8 in the endinanness independent format used for vectors. As with SVE we do not allow changes in the vector length during signal return but we do allow ZA to be enabled or disabled. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-20-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/uapi/asm/sigcontext.h | 41 +++++++ arch/arm64/kernel/fpsimd.c | 3 - arch/arm64/kernel/signal.c | 139 +++++++++++++++++++++++ 3 files changed, 180 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 57e9f8c3ee9e..4aaf31e3bf16 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -140,6 +140,14 @@ struct sve_context { #define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */ +#define ZA_MAGIC 0x54366345 + +struct za_context { + struct _aarch64_ctx head; + __u16 vl; + __u16 __reserved[3]; +}; + #endif /* !__ASSEMBLY__ */ #include @@ -259,4 +267,37 @@ struct sve_context { #define SVE_SIG_CONTEXT_SIZE(vq) \ (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) +/* + * If the ZA register is enabled for the thread at signal delivery then, + * za_context.head.size >= ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl)) + * and the register data may be accessed using the ZA_SIG_*() macros. + * + * If za_context.head.size < ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl)) + * then ZA was not enabled and no register data was included in which case + * ZA register was not enabled for the thread and no register data + * the ZA_SIG_*() macros should not be used except for this check. + * + * The same convention applies when returning from a signal: a caller + * will need to remove or resize the za_context block if it wants to + * enable the ZA register when it was previously non-live or vice-versa. + * This may require the caller to allocate fresh memory and/or move other + * context blocks in the signal frame. + * + * Changing the vector length during signal return is not permitted: + * za_context.vl must equal the thread's current SME vector length when + * doing a sigreturn. + */ + +#define ZA_SIG_REGS_OFFSET \ + ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) + +#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) + +#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \ + (SVE_SIG_ZREG_SIZE(vq) * n)) + +#define ZA_SIG_CONTEXT_SIZE(vq) \ + (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) + #endif /* _UAPI__ASM_SIGCONTEXT_H */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 00a0cbd01ce5..80f7ca12f855 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1181,9 +1181,6 @@ void fpsimd_release_task(struct task_struct *dead_task) #ifdef CONFIG_ARM64_SME -/* This will move to uapi/asm/sigcontext.h when signals are implemented */ -#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) - /* * Ensure that task->thread.za_state is allocated and sufficiently large. * diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0ddce6afd2a3..2295948d97fd 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -56,6 +56,7 @@ struct rt_sigframe_user_layout { unsigned long fpsimd_offset; unsigned long esr_offset; unsigned long sve_offset; + unsigned long za_offset; unsigned long extra_offset; unsigned long end_offset; }; @@ -218,6 +219,7 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx) struct user_ctxs { struct fpsimd_context __user *fpsimd; struct sve_context __user *sve; + struct za_context __user *za; }; #ifdef CONFIG_ARM64_SVE @@ -346,6 +348,101 @@ extern int restore_sve_fpsimd_context(struct user_ctxs *user); #endif /* ! CONFIG_ARM64_SVE */ +#ifdef CONFIG_ARM64_SME + +static int preserve_za_context(struct za_context __user *ctx) +{ + int err = 0; + u16 reserved[ARRAY_SIZE(ctx->__reserved)]; + unsigned int vl = task_get_sme_vl(current); + unsigned int vq; + + if (thread_za_enabled(¤t->thread)) + vq = sve_vq_from_vl(vl); + else + vq = 0; + + memset(reserved, 0, sizeof(reserved)); + + __put_user_error(ZA_MAGIC, &ctx->head.magic, err); + __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), + &ctx->head.size, err); + __put_user_error(vl, &ctx->vl, err); + BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); + err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); + + if (vq) { + /* + * This assumes that the ZA state has already been saved to + * the task struct by calling the function + * fpsimd_signal_preserve_current_state(). + */ + err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, + current->thread.za_state, + ZA_SIG_REGS_SIZE(vq)); + } + + return err ? -EFAULT : 0; +} + +static int restore_za_context(struct user_ctxs __user *user) +{ + int err; + unsigned int vq; + struct za_context za; + + if (__copy_from_user(&za, user->za, sizeof(za))) + return -EFAULT; + + if (za.vl != task_get_sme_vl(current)) + return -EINVAL; + + if (za.head.size <= sizeof(*user->za)) { + current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + return 0; + } + + vq = sve_vq_from_vl(za.vl); + + if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq)) + return -EINVAL; + + /* + * Careful: we are about __copy_from_user() directly into + * thread.za_state with preemption enabled, so protection is + * needed to prevent a racing context switch from writing stale + * registers back over the new data. + */ + + fpsimd_flush_task_state(current); + /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ + + sme_alloc(current); + if (!current->thread.za_state) { + current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + clear_thread_flag(TIF_SME); + return -ENOMEM; + } + + err = __copy_from_user(current->thread.za_state, + (char __user const *)user->za + + ZA_SIG_REGS_OFFSET, + ZA_SIG_REGS_SIZE(vq)); + if (err) + return -EFAULT; + + set_thread_flag(TIF_SME); + current->thread.svcr |= SYS_SVCR_EL0_ZA_MASK; + + return 0; +} +#else /* ! CONFIG_ARM64_SME */ + +/* Turn any non-optimised out attempts to use these into a link error: */ +extern int preserve_za_context(void __user *ctx); +extern int restore_za_context(struct user_ctxs *user); + +#endif /* ! CONFIG_ARM64_SME */ static int parse_user_sigframe(struct user_ctxs *user, struct rt_sigframe __user *sf) @@ -360,6 +457,7 @@ static int parse_user_sigframe(struct user_ctxs *user, user->fpsimd = NULL; user->sve = NULL; + user->za = NULL; if (!IS_ALIGNED((unsigned long)base, 16)) goto invalid; @@ -425,6 +523,19 @@ static int parse_user_sigframe(struct user_ctxs *user, user->sve = (struct sve_context __user *)head; break; + case ZA_MAGIC: + if (!system_supports_sme()) + goto invalid; + + if (user->za) + goto invalid; + + if (size < sizeof(*user->za)) + goto invalid; + + user->za = (struct za_context __user *)head; + break; + case EXTRA_MAGIC: if (have_extra_context) goto invalid; @@ -548,6 +659,9 @@ static int restore_sigframe(struct pt_regs *regs, } } + if (err == 0 && system_supports_sme() && user.za) + err = restore_za_context(&user); + return err; } @@ -630,6 +744,24 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, return err; } + if (system_supports_sme()) { + unsigned int vl; + unsigned int vq = 0; + + if (add_all) + vl = sme_max_vl(); + else + vl = task_get_sme_vl(current); + + if (thread_za_enabled(¤t->thread)) + vq = sve_vq_from_vl(vl); + + err = sigframe_alloc(user, &user->za_offset, + ZA_SIG_CONTEXT_SIZE(vq)); + if (err) + return err; + } + return sigframe_alloc_end(user); } @@ -678,6 +810,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, err |= preserve_sve_context(sve_ctx); } + /* ZA state if present */ + if (system_supports_sme() && err == 0 && user->za_offset) { + struct za_context __user *za_ctx = + apply_user_offset(user, user->za_offset); + err |= preserve_za_context(za_ctx); + } + if (err == 0 && user->extra_offset) { char __user *sfp = (char __user *)user->sigframe; char __user *userp = From e12310a0d30f260b26297bc8d7c95769489af038 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:28 +0100 Subject: [PATCH 024/145] arm64/sme: Implement ptrace support for streaming mode SVE registers The streaming mode SVE registers are represented using the same data structures as for SVE but since the vector lengths supported and in use may not be the same as SVE we represent them with a new type NT_ARM_SSVE. Unfortunately we only have a single 16 bit reserved field available in the header so there is no space to fit the current and maximum vector length for both standard and streaming SVE mode without redefining the structure in a way the creates a complicatd and fragile ABI. Since FFR is not present in streaming mode it is read and written as zero. Setting NT_ARM_SSVE registers will put the task into streaming mode, similarly setting NT_ARM_SVE registers will exit it. Reads that do not correspond to the current mode of the task will return the header with no register data. For compatibility reasons on write setting no flag for the register type will be interpreted as setting SVE registers, though users can provide no register data as an alternative mechanism for doing so. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-21-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 1 + arch/arm64/include/uapi/asm/ptrace.h | 13 +- arch/arm64/kernel/fpsimd.c | 31 +++- arch/arm64/kernel/ptrace.c | 214 +++++++++++++++++++++------ include/uapi/linux/elf.h | 1 + 5 files changed, 201 insertions(+), 59 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 6c33bc832ed4..5afcd0709aae 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -144,6 +144,7 @@ struct vl_info { extern void sve_alloc(struct task_struct *task); extern void fpsimd_release_task(struct task_struct *task); extern void fpsimd_sync_to_sve(struct task_struct *task); +extern void fpsimd_force_sync_to_sve(struct task_struct *task); extern void sve_sync_to_fpsimd(struct task_struct *task); extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task); diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 758ae984ff97..522b925a78c1 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -109,7 +109,7 @@ struct user_hwdebug_state { } dbg_regs[16]; }; -/* SVE/FP/SIMD state (NT_ARM_SVE) */ +/* SVE/FP/SIMD state (NT_ARM_SVE & NT_ARM_SSVE) */ struct user_sve_header { __u32 size; /* total meaningful regset content in bytes */ @@ -220,6 +220,7 @@ struct user_sve_header { (SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \ SVE_PT_SVE_PREGS_OFFSET(vq)) +/* For streaming mode SVE (SSVE) FFR must be read and written as zero */ #define SVE_PT_SVE_FFR_OFFSET(vq) \ (SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) @@ -240,10 +241,12 @@ struct user_sve_header { - SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1)) \ / __SVE_VQ_BYTES * __SVE_VQ_BYTES) -#define SVE_PT_SIZE(vq, flags) \ - (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \ - SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \ - : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags)) +#define SVE_PT_SIZE(vq, flags) \ + (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \ + SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \ + : ((((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD ? \ + SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags) \ + : SVE_PT_REGS_OFFSET))) /* pointer authentication masks (NT_ARM_PAC_MASK) */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 80f7ca12f855..94f06e9d37cf 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -643,7 +643,7 @@ static void fpsimd_to_sve(struct task_struct *task) if (!system_supports_sve()) return; - vq = sve_vq_from_vl(task_get_sve_vl(task)); + vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); __fpsimd_to_sve(sst, fst, vq); } @@ -660,7 +660,7 @@ static void fpsimd_to_sve(struct task_struct *task) */ static void sve_to_fpsimd(struct task_struct *task) { - unsigned int vq; + unsigned int vq, vl; void const *sst = task->thread.sve_state; struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; unsigned int i; @@ -669,7 +669,8 @@ static void sve_to_fpsimd(struct task_struct *task) if (!system_supports_sve()) return; - vq = sve_vq_from_vl(task_get_sve_vl(task)); + vl = thread_get_cur_vl(&task->thread); + vq = sve_vq_from_vl(vl); for (i = 0; i < SVE_NUM_ZREGS; ++i) { p = (__uint128_t const *)ZREG(sst, vq, i); fst->vregs[i] = arm64_le128_to_cpu(*p); @@ -717,6 +718,19 @@ void sve_alloc(struct task_struct *task) } +/* + * Force the FPSIMD state shared with SVE to be updated in the SVE state + * even if the SVE state is the current active state. + * + * This should only be called by ptrace. task must be non-runnable. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + */ +void fpsimd_force_sync_to_sve(struct task_struct *task) +{ + fpsimd_to_sve(task); +} + /* * Ensure that task->thread.sve_state is up to date with respect to * the user task, irrespective of when SVE is in use or not. @@ -727,7 +741,8 @@ void sve_alloc(struct task_struct *task) */ void fpsimd_sync_to_sve(struct task_struct *task) { - if (!test_tsk_thread_flag(task, TIF_SVE)) + if (!test_tsk_thread_flag(task, TIF_SVE) && + !thread_sm_enabled(&task->thread)) fpsimd_to_sve(task); } @@ -741,7 +756,8 @@ void fpsimd_sync_to_sve(struct task_struct *task) */ void sve_sync_to_fpsimd(struct task_struct *task) { - if (test_tsk_thread_flag(task, TIF_SVE)) + if (test_tsk_thread_flag(task, TIF_SVE) || + thread_sm_enabled(&task->thread)) sve_to_fpsimd(task); } @@ -766,7 +782,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) if (!test_tsk_thread_flag(task, TIF_SVE)) return; - vq = sve_vq_from_vl(task_get_sve_vl(task)); + vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread)); memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); __fpsimd_to_sve(sst, fst, vq); @@ -810,8 +826,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, /* * To ensure the FPSIMD bits of the SVE vector registers are preserved, * write any live register state back to task_struct, and convert to a - * regular FPSIMD thread. Since the vector length can only be changed - * with a syscall we can't be in streaming mode while reconfiguring. + * regular FPSIMD thread. */ if (task == current) { get_cpu_fpsimd_context(); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 230a47b9189e..60185c27b394 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -713,21 +713,51 @@ static int system_call_set(struct task_struct *target, #ifdef CONFIG_ARM64_SVE static void sve_init_header_from_task(struct user_sve_header *header, - struct task_struct *target) + struct task_struct *target, + enum vec_type type) { unsigned int vq; + bool active; + bool fpsimd_only; + enum vec_type task_type; memset(header, 0, sizeof(*header)); - header->flags = test_tsk_thread_flag(target, TIF_SVE) ? - SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD; - if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) - header->flags |= SVE_PT_VL_INHERIT; + /* Check if the requested registers are active for the task */ + if (thread_sm_enabled(&target->thread)) + task_type = ARM64_VEC_SME; + else + task_type = ARM64_VEC_SVE; + active = (task_type == type); - header->vl = task_get_sve_vl(target); + switch (type) { + case ARM64_VEC_SVE: + if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) + header->flags |= SVE_PT_VL_INHERIT; + fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE); + break; + case ARM64_VEC_SME: + if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) + header->flags |= SVE_PT_VL_INHERIT; + fpsimd_only = false; + break; + default: + WARN_ON_ONCE(1); + return; + } + + if (active) { + if (fpsimd_only) { + header->flags |= SVE_PT_REGS_FPSIMD; + } else { + header->flags |= SVE_PT_REGS_SVE; + } + } + + header->vl = task_get_vl(target, type); vq = sve_vq_from_vl(header->vl); - header->max_vl = sve_max_vl(); + header->max_vl = vec_max_vl(type); header->size = SVE_PT_SIZE(vq, header->flags); header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), SVE_PT_REGS_SVE); @@ -738,19 +768,17 @@ static unsigned int sve_size_from_header(struct user_sve_header const *header) return ALIGN(header->size, SVE_VQ_BYTES); } -static int sve_get(struct task_struct *target, - const struct user_regset *regset, - struct membuf to) +static int sve_get_common(struct task_struct *target, + const struct user_regset *regset, + struct membuf to, + enum vec_type type) { struct user_sve_header header; unsigned int vq; unsigned long start, end; - if (!system_supports_sve()) - return -EINVAL; - /* Header */ - sve_init_header_from_task(&header, target); + sve_init_header_from_task(&header, target, type); vq = sve_vq_from_vl(header.vl); membuf_write(&to, &header, sizeof(header)); @@ -758,49 +786,61 @@ static int sve_get(struct task_struct *target, if (target == current) fpsimd_preserve_current_state(); - /* Registers: FPSIMD-only case */ - BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); - if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) + BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); + + switch ((header.flags & SVE_PT_REGS_MASK)) { + case SVE_PT_REGS_FPSIMD: return __fpr_get(target, regset, to); - /* Otherwise: full SVE case */ + case SVE_PT_REGS_SVE: + start = SVE_PT_SVE_OFFSET; + end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); + membuf_write(&to, target->thread.sve_state, end - start); - BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); - start = SVE_PT_SVE_OFFSET; - end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); - membuf_write(&to, target->thread.sve_state, end - start); + start = end; + end = SVE_PT_SVE_FPSR_OFFSET(vq); + membuf_zero(&to, end - start); - start = end; - end = SVE_PT_SVE_FPSR_OFFSET(vq); - membuf_zero(&to, end - start); + /* + * Copy fpsr, and fpcr which must follow contiguously in + * struct fpsimd_state: + */ + start = end; + end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; + membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, + end - start); - /* - * Copy fpsr, and fpcr which must follow contiguously in - * struct fpsimd_state: - */ - start = end; - end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; - membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, end - start); + start = end; + end = sve_size_from_header(&header); + return membuf_zero(&to, end - start); - start = end; - end = sve_size_from_header(&header); - return membuf_zero(&to, end - start); + default: + return 0; + } } -static int sve_set(struct task_struct *target, +static int sve_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) + struct membuf to) +{ + if (!system_supports_sve()) + return -EINVAL; + + return sve_get_common(target, regset, to, ARM64_VEC_SVE); +} + +static int sve_set_common(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf, + enum vec_type type) { int ret; struct user_sve_header header; unsigned int vq; unsigned long start, end; - if (!system_supports_sve()) - return -EINVAL; - /* Header */ if (count < sizeof(header)) return -EINVAL; @@ -813,13 +853,37 @@ static int sve_set(struct task_struct *target, * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by * vec_set_vector_length(), which will also validate them for us: */ - ret = vec_set_vector_length(target, ARM64_VEC_SVE, header.vl, + ret = vec_set_vector_length(target, type, header.vl, ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); if (ret) goto out; /* Actual VL set may be less than the user asked for: */ - vq = sve_vq_from_vl(task_get_sve_vl(target)); + vq = sve_vq_from_vl(task_get_vl(target, type)); + + /* Enter/exit streaming mode */ + if (system_supports_sme()) { + u64 old_svcr = target->thread.svcr; + + switch (type) { + case ARM64_VEC_SVE: + target->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK; + break; + case ARM64_VEC_SME: + target->thread.svcr |= SYS_SVCR_EL0_SM_MASK; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + /* + * If we switched then invalidate any existing SVE + * state and ensure there's storage. + */ + if (target->thread.svcr != old_svcr) + sve_alloc(target); + } /* Registers: FPSIMD-only case */ @@ -828,10 +892,15 @@ static int sve_set(struct task_struct *target, ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, SVE_PT_FPSIMD_OFFSET); clear_tsk_thread_flag(target, TIF_SVE); + if (type == ARM64_VEC_SME) + fpsimd_force_sync_to_sve(target); goto out; } - /* Otherwise: full SVE case */ + /* + * Otherwise: no registers or full SVE case. For backwards + * compatibility reasons we treat empty flags as SVE registers. + */ /* * If setting a different VL from the requested VL and there is @@ -852,8 +921,9 @@ static int sve_set(struct task_struct *target, /* * Ensure target->thread.sve_state is up to date with target's - * FPSIMD regs, so that a short copyin leaves trailing registers - * unmodified. + * FPSIMD regs, so that a short copyin leaves trailing + * registers unmodified. Always enable SVE even if going into + * streaming mode. */ fpsimd_sync_to_sve(target); set_tsk_thread_flag(target, TIF_SVE); @@ -889,8 +959,46 @@ out: return ret; } +static int sve_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + if (!system_supports_sve()) + return -EINVAL; + + return sve_set_common(target, regset, pos, count, kbuf, ubuf, + ARM64_VEC_SVE); +} + #endif /* CONFIG_ARM64_SVE */ +#ifdef CONFIG_ARM64_SME + +static int ssve_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + if (!system_supports_sme()) + return -EINVAL; + + return sve_get_common(target, regset, to, ARM64_VEC_SME); +} + +static int ssve_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + if (!system_supports_sme()) + return -EINVAL; + + return sve_set_common(target, regset, pos, count, kbuf, ubuf, + ARM64_VEC_SME); +} + +#endif /* CONFIG_ARM64_SME */ + #ifdef CONFIG_ARM64_PTR_AUTH static int pac_mask_get(struct task_struct *target, const struct user_regset *regset, @@ -1108,6 +1216,9 @@ enum aarch64_regset { #ifdef CONFIG_ARM64_SVE REGSET_SVE, #endif +#ifdef CONFIG_ARM64_SVE + REGSET_SSVE, +#endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, REGSET_PAC_ENABLED_KEYS, @@ -1188,6 +1299,17 @@ static const struct user_regset aarch64_regsets[] = { .set = sve_set, }, #endif +#ifdef CONFIG_ARM64_SME + [REGSET_SSVE] = { /* Streaming mode SVE */ + .core_note_type = NT_ARM_SSVE, + .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), + SVE_VQ_BYTES), + .size = SVE_VQ_BYTES, + .align = SVE_VQ_BYTES, + .regset_get = ssve_get, + .set = ssve_set, + }, +#endif #ifdef CONFIG_ARM64_PTR_AUTH [REGSET_PAC_MASK] = { .core_note_type = NT_ARM_PAC_MASK, diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 787c657bfae8..a8dc688e1826 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -431,6 +431,7 @@ typedef struct elf64_shdr { #define NT_ARM_PACG_KEYS 0x408 /* ARM pointer authentication generic key */ #define NT_ARM_TAGGED_ADDR_CTRL 0x409 /* arm64 tagged address control (prctl()) */ #define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */ +#define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */ #define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ #define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */ #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ From 776b4a1cf36411e96972455ca72906b722b80ea1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:29 +0100 Subject: [PATCH 025/145] arm64/sme: Add ptrace support for ZA The ZA array can be read and written with the NT_ARM_ZA. Similarly to our interface for the SVE vector registers the regset consists of a header with information on the current vector length followed by an optional register data payload, represented as for signals as a series of horizontal vectors from 0 to VL/8 in the endianness independent format used for vectors. On get if ZA is enabled then register data will be provided, otherwise it will be omitted. On set if register data is provided then ZA is enabled and initialized using the provided data, otherwise it is disabled. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-22-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/uapi/asm/ptrace.h | 56 +++++++++++ arch/arm64/kernel/ptrace.c | 144 +++++++++++++++++++++++++++ include/uapi/linux/elf.h | 1 + 3 files changed, 201 insertions(+) diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 522b925a78c1..7fa2f7036aa7 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -268,6 +268,62 @@ struct user_pac_generic_keys { __uint128_t apgakey; }; +/* ZA state (NT_ARM_ZA) */ + +struct user_za_header { + __u32 size; /* total meaningful regset content in bytes */ + __u32 max_size; /* maxmium possible size for this thread */ + __u16 vl; /* current vector length */ + __u16 max_vl; /* maximum possible vector length */ + __u16 flags; + __u16 __reserved; +}; + +/* + * Common ZA_PT_* flags: + * These must be kept in sync with prctl interface in + */ +#define ZA_PT_VL_INHERIT ((1 << 17) /* PR_SME_VL_INHERIT */ >> 16) +#define ZA_PT_VL_ONEXEC ((1 << 18) /* PR_SME_SET_VL_ONEXEC */ >> 16) + + +/* + * The remainder of the ZA state follows struct user_za_header. The + * total size of the ZA state (including header) depends on the + * metadata in the header: ZA_PT_SIZE(vq, flags) gives the total size + * of the state in bytes, including the header. + * + * Refer to for details of how to pass the correct + * "vq" argument to these macros. + */ + +/* Offset from the start of struct user_za_header to the register data */ +#define ZA_PT_ZA_OFFSET \ + ((sizeof(struct user_za_header) + (__SVE_VQ_BYTES - 1)) \ + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) + +/* + * The payload starts at offset ZA_PT_ZA_OFFSET, and is of size + * ZA_PT_ZA_SIZE(vq, flags). + * + * The ZA array is stored as a sequence of horizontal vectors ZAV of SVL/8 + * bytes each, starting from vector 0. + * + * Additional data might be appended in the future. + * + * The ZA matrix is represented in memory in an endianness-invariant layout + * which differs from the layout used for the FPSIMD V-registers on big-endian + * systems: see sigcontext.h for more explanation. + */ + +#define ZA_PT_ZAV_OFFSET(vq, n) \ + (ZA_PT_ZA_OFFSET + ((vq * __SVE_VQ_BYTES) * n)) + +#define ZA_PT_ZA_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) + +#define ZA_PT_SIZE(vq) \ + (ZA_PT_ZA_OFFSET + ZA_PT_ZA_SIZE(vq)) + #endif /* __ASSEMBLY__ */ #endif /* _UAPI__ASM_PTRACE_H */ diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 60185c27b394..47d8a7472171 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -997,6 +997,141 @@ static int ssve_set(struct task_struct *target, ARM64_VEC_SME); } +static int za_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct user_za_header header; + unsigned int vq; + unsigned long start, end; + + if (!system_supports_sme()) + return -EINVAL; + + /* Header */ + memset(&header, 0, sizeof(header)); + + if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) + header.flags |= ZA_PT_VL_INHERIT; + + header.vl = task_get_sme_vl(target); + vq = sve_vq_from_vl(header.vl); + header.max_vl = sme_max_vl(); + header.max_size = ZA_PT_SIZE(vq); + + /* If ZA is not active there is only the header */ + if (thread_za_enabled(&target->thread)) + header.size = ZA_PT_SIZE(vq); + else + header.size = ZA_PT_ZA_OFFSET; + + membuf_write(&to, &header, sizeof(header)); + + BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); + end = ZA_PT_ZA_OFFSET; + + if (target == current) + fpsimd_preserve_current_state(); + + /* Any register data to include? */ + if (thread_za_enabled(&target->thread)) { + start = end; + end = ZA_PT_SIZE(vq); + membuf_write(&to, target->thread.za_state, end - start); + } + + /* Zero any trailing padding */ + start = end; + end = ALIGN(header.size, SVE_VQ_BYTES); + return membuf_zero(&to, end - start); +} + +static int za_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_za_header header; + unsigned int vq; + unsigned long start, end; + + if (!system_supports_sme()) + return -EINVAL; + + /* Header */ + if (count < sizeof(header)) + return -EINVAL; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, + 0, sizeof(header)); + if (ret) + goto out; + + /* + * All current ZA_PT_* flags are consumed by + * vec_set_vector_length(), which will also validate them for + * us: + */ + ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl, + ((unsigned long)header.flags) << 16); + if (ret) + goto out; + + /* Actual VL set may be less than the user asked for: */ + vq = sve_vq_from_vl(task_get_sme_vl(target)); + + /* Ensure there is some SVE storage for streaming mode */ + if (!target->thread.sve_state) { + sve_alloc(target); + if (!target->thread.sve_state) { + clear_thread_flag(TIF_SME); + ret = -ENOMEM; + goto out; + } + } + + /* Allocate/reinit ZA storage */ + sme_alloc(target); + if (!target->thread.za_state) { + ret = -ENOMEM; + clear_tsk_thread_flag(target, TIF_SME); + goto out; + } + + /* If there is no data then disable ZA */ + if (!count) { + target->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + goto out; + } + + /* + * If setting a different VL from the requested VL and there is + * register data, the data layout will be wrong: don't even + * try to set the registers in this case. + */ + if (vq != sve_vq_from_vl(header.vl)) { + ret = -EIO; + goto out; + } + + BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); + start = ZA_PT_ZA_OFFSET; + end = ZA_PT_SIZE(vq); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + target->thread.za_state, + start, end); + if (ret) + goto out; + + /* Mark ZA as active and let userspace use it */ + set_tsk_thread_flag(target, TIF_SME); + target->thread.svcr |= SYS_SVCR_EL0_ZA_MASK; + +out: + fpsimd_flush_task_state(target); + return ret; +} + #endif /* CONFIG_ARM64_SME */ #ifdef CONFIG_ARM64_PTR_AUTH @@ -1218,6 +1353,7 @@ enum aarch64_regset { #endif #ifdef CONFIG_ARM64_SVE REGSET_SSVE, + REGSET_ZA, #endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, @@ -1309,6 +1445,14 @@ static const struct user_regset aarch64_regsets[] = { .regset_get = ssve_get, .set = ssve_set, }, + [REGSET_ZA] = { /* SME ZA */ + .core_note_type = NT_ARM_ZA, + .n = DIV_ROUND_UP(ZA_PT_ZA_SIZE(SVE_VQ_MAX), SVE_VQ_BYTES), + .size = SVE_VQ_BYTES, + .align = SVE_VQ_BYTES, + .regset_get = za_get, + .set = za_set, + }, #endif #ifdef CONFIG_ARM64_PTR_AUTH [REGSET_PAC_MASK] = { diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index a8dc688e1826..97808f958903 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -432,6 +432,7 @@ typedef struct elf64_shdr { #define NT_ARM_TAGGED_ADDR_CTRL 0x409 /* arm64 tagged address control (prctl()) */ #define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */ #define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */ +#define NT_ARM_ZA 0x40c /* ARM SME ZA registers */ #define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ #define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */ #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ From d45d7ff7047f7f6c3221b0f028fade640812f931 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:30 +0100 Subject: [PATCH 026/145] arm64/sme: Disable streaming mode and ZA when flushing CPU state Both streaming mode and ZA may increase power consumption when they are enabled and streaming mode makes many FPSIMD and SVE instructions undefined which will cause problems for any kernel mode floating point so disable both when we flush the CPU state. This covers both kernel_neon_begin() and idle and after flushing the state a reload is always required anyway. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-23-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 94f06e9d37cf..9592cdd7d635 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1759,6 +1759,15 @@ static void fpsimd_flush_cpu_state(void) { WARN_ON(!system_supports_fpsimd()); __this_cpu_write(fpsimd_last_state.st, NULL); + + /* + * Leaving streaming mode enabled will cause issues for any kernel + * NEON and leaving streaming mode or ZA enabled may increase power + * consumption. + */ + if (system_supports_sme()) + sme_smstop(); + set_thread_flag(TIF_FOREIGN_FPSTATE); } From e0838f6373e5cb72516fc4c26bba309097e2a80a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:31 +0100 Subject: [PATCH 027/145] arm64/sme: Save and restore streaming mode over EFI runtime calls When saving and restoring the floating point state over an EFI runtime call ensure that we handle streaming mode, only handling FFR if we are not in streaming mode and ensuring that we are in normal mode over the call into runtime services. We currently assume that ZA will not be modified by runtime services, the specification is not yet finalised so this may need updating if that changes. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-24-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 48 +++++++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 9592cdd7d635..64431bc62472 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1056,21 +1056,25 @@ int vec_verify_vq_map(enum vec_type type) static void __init sve_efi_setup(void) { - struct vl_info *info = &vl_info[ARM64_VEC_SVE]; + int max_vl = 0; + int i; if (!IS_ENABLED(CONFIG_EFI)) return; + for (i = 0; i < ARRAY_SIZE(vl_info); i++) + max_vl = max(vl_info[i].max_vl, max_vl); + /* * alloc_percpu() warns and prints a backtrace if this goes wrong. * This is evidence of a crippled system and we are returning void, * so no attempt is made to handle this situation here. */ - if (!sve_vl_valid(info->max_vl)) + if (!sve_vl_valid(max_vl)) goto fail; efi_sve_state = __alloc_percpu( - SVE_SIG_REGS_SIZE(sve_vq_from_vl(info->max_vl)), SVE_VQ_BYTES); + SVE_SIG_REGS_SIZE(sve_vq_from_vl(max_vl)), SVE_VQ_BYTES); if (!efi_sve_state) goto fail; @@ -1845,6 +1849,7 @@ EXPORT_SYMBOL(kernel_neon_end); static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state); static DEFINE_PER_CPU(bool, efi_fpsimd_state_used); static DEFINE_PER_CPU(bool, efi_sve_state_used); +static DEFINE_PER_CPU(bool, efi_sm_state); /* * EFI runtime services support functions @@ -1879,12 +1884,28 @@ void __efi_fpsimd_begin(void) */ if (system_supports_sve() && likely(efi_sve_state)) { char *sve_state = this_cpu_ptr(efi_sve_state); + bool ffr = true; + u64 svcr; __this_cpu_write(efi_sve_state_used, true); + if (system_supports_sme()) { + svcr = read_sysreg_s(SYS_SVCR_EL0); + + if (!system_supports_fa64()) + ffr = svcr & SYS_SVCR_EL0_SM_MASK; + + __this_cpu_write(efi_sm_state, ffr); + } + sve_save_state(sve_state + sve_ffr_offset(sve_max_vl()), &this_cpu_ptr(&efi_fpsimd_state)->fpsr, - true); + ffr); + + if (system_supports_sme()) + sysreg_clear_set_s(SYS_SVCR_EL0, + SYS_SVCR_EL0_SM_MASK, 0); + } else { fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state)); } @@ -1907,11 +1928,26 @@ void __efi_fpsimd_end(void) if (system_supports_sve() && likely(__this_cpu_read(efi_sve_state_used))) { char const *sve_state = this_cpu_ptr(efi_sve_state); + bool ffr = true; + + /* + * Restore streaming mode; EFI calls are + * normal function calls so should not return in + * streaming mode. + */ + if (system_supports_sme()) { + if (__this_cpu_read(efi_sm_state)) { + sysreg_clear_set_s(SYS_SVCR_EL0, + 0, + SYS_SVCR_EL0_SM_MASK); + if (!system_supports_fa64()) + ffr = efi_sm_state; + } + } - sve_set_vq(sve_vq_from_vl(sve_get_vl()) - 1); sve_load_state(sve_state + sve_ffr_offset(sve_max_vl()), &this_cpu_ptr(&efi_fpsimd_state)->fpsr, - true); + ffr); __this_cpu_write(efi_sve_state_used, false); } else { From 90807748ca3ac4874853b2148928529bf1f13e5e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:32 +0100 Subject: [PATCH 028/145] KVM: arm64: Hide SME system registers from guests For the time being we do not support use of SME by KVM guests, support for this will be enabled in future. In order to prevent any side effects or side channels via the new system registers, including the EL0 read/write register TPIDR2, explicitly undefine all the system registers added by SME and mask out the SME bitfield in SYS_ID_AA64PFR1. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20220419112247.711548-25-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kvm/sys_regs.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 7b45c040cc27..689e53dd4cb1 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1132,6 +1132,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, case SYS_ID_AA64PFR1_EL1: if (!kvm_has_mte(vcpu->kvm)) val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); + + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME); break; case SYS_ID_AA64ISAR1_EL1: if (!vcpu_has_ptrauth(vcpu)) @@ -1553,7 +1555,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_UNALLOCATED(4,2), ID_UNALLOCATED(4,3), ID_SANITISED(ID_AA64ZFR0_EL1), - ID_UNALLOCATED(4,5), + ID_HIDDEN(ID_AA64SMFR0_EL1), ID_UNALLOCATED(4,6), ID_UNALLOCATED(4,7), @@ -1596,6 +1598,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, { SYS_DESC(SYS_TRFCR_EL1), undef_access }, + { SYS_DESC(SYS_SMPRI_EL1), undef_access }, + { SYS_DESC(SYS_SMCR_EL1), undef_access }, { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, @@ -1678,8 +1682,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr }, { SYS_DESC(SYS_CLIDR_EL1), access_clidr }, + { SYS_DESC(SYS_SMIDR_EL1), undef_access }, { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, { SYS_DESC(SYS_CTR_EL0), access_ctr }, + { SYS_DESC(SYS_SVCR_EL0), undef_access }, { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, .reg = PMCR_EL0 }, @@ -1719,6 +1725,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, + { SYS_DESC(SYS_TPIDR2_EL0), undef_access }, { SYS_DESC(SYS_SCXTNUM_EL0), undef_access }, From 51729fb1d0683df5e9e4d5dbe2ec46188f011da9 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:33 +0100 Subject: [PATCH 029/145] KVM: arm64: Trap SME usage in guest SME defines two new traps which need to be enabled for guests to ensure that they can't use SME, one for the main SME operations which mirrors the traps for SVE and another for access to TPIDR2 in SCTLR_EL2. For VHE manage SMEN along with ZEN in activate_traps() and the FP state management callbacks, along with SCTLR_EL2.EnTPIDR2. There is no existing dynamic management of SCTLR_EL2. For nVHE manage TSM in activate_traps() along with the fine grained traps for TPIDR2 and SMPRI. There is no existing dynamic management of fine grained traps. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20220419112247.711548-26-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kvm/hyp/nvhe/switch.c | 30 ++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/vhe/switch.c | 11 ++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 6410d21d8695..caace61ea459 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -47,10 +47,24 @@ static void __activate_traps(struct kvm_vcpu *vcpu) val |= CPTR_EL2_TFP | CPTR_EL2_TZ; __activate_traps_fpsimd32(vcpu); } + if (cpus_have_final_cap(ARM64_SME)) + val |= CPTR_EL2_TSM; write_sysreg(val, cptr_el2); write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); + if (cpus_have_final_cap(ARM64_SME)) { + val = read_sysreg_s(SYS_HFGRTR_EL2); + val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK | + HFGxTR_EL2_nSMPRI_EL1_MASK); + write_sysreg_s(val, SYS_HFGRTR_EL2); + + val = read_sysreg_s(SYS_HFGWTR_EL2); + val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK | + HFGxTR_EL2_nSMPRI_EL1_MASK); + write_sysreg_s(val, SYS_HFGWTR_EL2); + } + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; @@ -94,9 +108,25 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); + if (cpus_have_final_cap(ARM64_SME)) { + u64 val; + + val = read_sysreg_s(SYS_HFGRTR_EL2); + val |= HFGxTR_EL2_nTPIDR2_EL0_MASK | + HFGxTR_EL2_nSMPRI_EL1_MASK; + write_sysreg_s(val, SYS_HFGRTR_EL2); + + val = read_sysreg_s(SYS_HFGWTR_EL2); + val |= HFGxTR_EL2_nTPIDR2_EL0_MASK | + HFGxTR_EL2_nSMPRI_EL1_MASK; + write_sysreg_s(val, SYS_HFGWTR_EL2); + } + cptr = CPTR_EL2_DEFAULT; if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)) cptr |= CPTR_EL2_TZ; + if (cpus_have_final_cap(ARM64_SME)) + cptr &= ~CPTR_EL2_TSM; write_sysreg(cptr, cptr_el2); write_sysreg(__kvm_hyp_host_vector, vbar_el2); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 262dfe03134d..969f20daf97a 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -41,7 +41,8 @@ static void __activate_traps(struct kvm_vcpu *vcpu) val = read_sysreg(cpacr_el1); val |= CPACR_EL1_TTA; - val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN); + val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN | + CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN); /* * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to @@ -62,6 +63,10 @@ static void __activate_traps(struct kvm_vcpu *vcpu) __activate_traps_fpsimd32(vcpu); } + if (cpus_have_final_cap(ARM64_SME)) + write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2, + sctlr_el2); + write_sysreg(val, cpacr_el1); write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1); @@ -83,6 +88,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) */ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); + if (cpus_have_final_cap(ARM64_SME)) + write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2, + sctlr_el2); + write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); if (!arm64_kernel_unmapped_at_el0()) From 861262ab862702061ae3355b811a07b15d1b2fc0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:34 +0100 Subject: [PATCH 030/145] KVM: arm64: Handle SME host state when running guests While we don't currently support SME in guests we do currently support it for the host system so we need to take care of SME's impact, including the floating point register state, when running guests. Simiarly to SVE we need to manage the traps in CPACR_RL1, what is new is the handling of streaming mode and ZA. Normally we defer any handling of the floating point register state until the guest first uses it however if the system is in streaming mode FPSIMD and SVE operations may generate SME traps which we would need to distinguish from actual attempts by the guest to use SME. Rather than do this for the time being if we are in streaming mode when entering the guest we force the floating point state to be saved immediately and exit streaming mode, meaning that the guest won't generate SME traps for supported operations. We could handle ZA in the access trap similarly to the FPSIMD/SVE state without the disruption caused by streaming mode but for simplicity handle it the same way as streaming mode for now. This will be revisited when we support SME for guests (hopefully before SME hardware becomes available), for now it will only incur additional cost on systems with SME and even there only if streaming mode or ZA are enabled. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20220419112247.711548-27-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/fpsimd.c | 36 +++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8a7c442d5b57..f8f0d30dd1a2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -454,6 +454,7 @@ struct kvm_vcpu_arch { #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ #define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14) #define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */ +#define KVM_ARM64_HOST_SME_ENABLED (1 << 16) /* SME enabled for EL0 */ #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ KVM_GUESTDBG_USE_SW_BP | \ diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 57d7ac3cfa0c..441edb9c398c 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -82,6 +82,26 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; + + /* + * We don't currently support SME guests but if we leave + * things in streaming mode then when the guest starts running + * FPSIMD or SVE code it may generate SME traps so as a + * special case if we are in streaming mode we force the host + * state to be saved now and exit streaming mode so that we + * don't have to handle any SME traps for valid guest + * operations. Do this for ZA as well for now for simplicity. + */ + if (system_supports_sme()) { + if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) + vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED; + + if (read_sysreg_s(SYS_SVCR_EL0) & + (SYS_SVCR_EL0_SM_MASK | SYS_SVCR_EL0_ZA_MASK)) { + vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; + fpsimd_save_and_flush_cpu_state(); + } + } } /* @@ -135,6 +155,22 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) local_irq_save(flags); + /* + * If we have VHE then the Hyp code will reset CPACR_EL1 to + * CPACR_EL1_DEFAULT and we need to reenable SME. + */ + if (has_vhe() && system_supports_sme()) { + /* Also restore EL0 state seen on entry */ + if (vcpu->arch.flags & KVM_ARM64_HOST_SME_ENABLED) + sysreg_clear_set(CPACR_EL1, 0, + CPACR_EL1_SMEN_EL0EN | + CPACR_EL1_SMEN_EL1EN); + else + sysreg_clear_set(CPACR_EL1, + CPACR_EL1_SMEN_EL0EN, + CPACR_EL1_SMEN_EL1EN); + } + if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { if (vcpu_has_sve(vcpu)) { __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); From a1f4ccd25cc256255813f584f10e5527369d4a02 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:35 +0100 Subject: [PATCH 031/145] arm64/sme: Provide Kconfig for SME Now that basline support for the Scalable Matrix Extension (SME) is present introduce the Kconfig option allowing it to be built. While the feature registers don't impose a strong requirement for a system with SME to support SVE at runtime the support for streaming mode SVE is mostly shared with normal SVE so depend on SVE. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-28-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 57c4c995965f..0897984918e8 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1939,6 +1939,17 @@ config ARM64_SVE booting the kernel. If unsure and you are not observing these symptoms, you should assume that it is safe to say Y. +config ARM64_SME + bool "ARM Scalable Matrix Extension support" + default y + depends on ARM64_SVE + help + The Scalable Matrix Extension (SME) is an extension to the AArch64 + execution state which utilises a substantial subset of the SVE + instruction set, together with the addition of new architectural + register state capable of holding two dimensional matrix tiles to + enable various matrix operations. + config ARM64_MODULE_PLTS bool "Use PLTs to allow module memory to spill over into vmalloc area" depends on MODULES From da32b5817253697671af961715517bfbb308a592 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Sat, 23 Apr 2022 11:07:49 +0100 Subject: [PATCH 032/145] mm: Add fault_in_subpage_writeable() to probe at sub-page granularity On hardware with features like arm64 MTE or SPARC ADI, an access fault can be triggered at sub-page granularity. Depending on how the fault_in_writeable() function is used, the caller can get into a live-lock by continuously retrying the fault-in on an address different from the one where the uaccess failed. In the majority of cases progress is ensured by the following conditions: 1. copy_to_user_nofault() guarantees at least one byte access if the user address is not faulting. 2. The fault_in_writeable() loop is resumed from the first address that could not be accessed by copy_to_user_nofault(). If the loop iteration is restarted from an earlier (initial) point, the loop is repeated with the same conditions and it would live-lock. Introduce an arch-specific probe_subpage_writeable() and call it from the newly added fault_in_subpage_writeable() function. The arch code with sub-page faults will have to implement the specific probing functionality. Note that no other fault_in_subpage_*() functions are added since they have no callers currently susceptible to a live-lock. Signed-off-by: Catalin Marinas Cc: Andrew Morton Link: https://lore.kernel.org/r/20220423100751.1870771-2-catalin.marinas@arm.com Signed-off-by: Catalin Marinas --- arch/Kconfig | 7 +++++++ include/linux/pagemap.h | 1 + include/linux/uaccess.h | 22 ++++++++++++++++++++++ mm/gup.c | 29 +++++++++++++++++++++++++++++ 4 files changed, 59 insertions(+) diff --git a/arch/Kconfig b/arch/Kconfig index 29b0167c088b..b34032279926 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -24,6 +24,13 @@ config KEXEC_ELF config HAVE_IMA_KEXEC bool +config ARCH_HAS_SUBPAGE_FAULTS + bool + help + Select if the architecture can check permissions at sub-page + granularity (e.g. arm64 MTE). The probe_user_*() functions + must be implemented. + config HOTPLUG_SMT bool diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 993994cd943a..6165283bdb6f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1046,6 +1046,7 @@ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); * Fault in userspace address range. */ size_t fault_in_writeable(char __user *uaddr, size_t size); +size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); size_t fault_in_readable(const char __user *uaddr, size_t size); diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 546179418ffa..5a328cf02b75 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -231,6 +231,28 @@ static inline bool pagefault_disabled(void) */ #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) +#ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS + +/** + * probe_subpage_writeable: probe the user range for write faults at sub-page + * granularity (e.g. arm64 MTE) + * @uaddr: start of address range + * @size: size of address range + * + * Returns 0 on success, the number of bytes not probed on fault. + * + * It is expected that the caller checked for the write permission of each + * page in the range either by put_user() or GUP. The architecture port can + * implement a more efficient get_user() probing if the same sub-page faults + * are triggered by either a read or a write. + */ +static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size) +{ + return 0; +} + +#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */ + #ifndef ARCH_HAS_NOCACHE_UACCESS static inline __must_check unsigned long diff --git a/mm/gup.c b/mm/gup.c index f598a037eb04..501bc150792c 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1648,6 +1648,35 @@ out: } EXPORT_SYMBOL(fault_in_writeable); +/** + * fault_in_subpage_writeable - fault in an address range for writing + * @uaddr: start of address range + * @size: size of address range + * + * Fault in a user address range for writing while checking for permissions at + * sub-page granularity (e.g. arm64 MTE). This function should be used when + * the caller cannot guarantee forward progress of a copy_to_user() loop. + * + * Returns the number of bytes not faulted in (like copy_to_user() and + * copy_from_user()). + */ +size_t fault_in_subpage_writeable(char __user *uaddr, size_t size) +{ + size_t faulted_in; + + /* + * Attempt faulting in at page granularity first for page table + * permission checking. The arch-specific probe_subpage_writeable() + * functions may not check for this. + */ + faulted_in = size - fault_in_writeable(uaddr, size); + if (faulted_in) + faulted_in -= probe_subpage_writeable(uaddr, faulted_in); + + return size - faulted_in; +} +EXPORT_SYMBOL(fault_in_subpage_writeable); + /* * fault_in_safe_writeable - fault in an address range for writing * @uaddr: start of address range From f3ba50a7a100e91b0b13ca43190a66c1bfdb9993 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Sat, 23 Apr 2022 11:07:50 +0100 Subject: [PATCH 033/145] arm64: Add support for user sub-page fault probing With MTE, even if the pte allows an access, a mismatched tag somewhere within a page can still cause a fault. Select ARCH_HAS_SUBPAGE_FAULTS if MTE is enabled and implement the probe_subpage_writeable() function. Note that get_user() is sufficient for the writeable MTE check since the same tag mismatch fault would be triggered by a read. The caller of probe_subpage_writeable() will need to check the pte permissions (put_user, GUP). Signed-off-by: Catalin Marinas Cc: Will Deacon Link: https://lore.kernel.org/r/20220423100751.1870771-3-catalin.marinas@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/mte.h | 1 + arch/arm64/include/asm/uaccess.h | 15 +++++++++++++++ arch/arm64/kernel/mte.c | 30 ++++++++++++++++++++++++++++++ 4 files changed, 47 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 57c4c995965f..290b88238103 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1871,6 +1871,7 @@ config ARM64_MTE depends on AS_HAS_LSE_ATOMICS # Required for tag checking in the uaccess routines depends on ARM64_PAN + select ARCH_HAS_SUBPAGE_FAULTS select ARCH_USES_HIGH_VMA_FLAGS help Memory Tagging (part of the ARMv8.5 Extensions) provides diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index adcb937342f1..aa523591a44e 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -47,6 +47,7 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg); long get_mte_ctrl(struct task_struct *task); int mte_ptrace_copy_tags(struct task_struct *child, long request, unsigned long addr, unsigned long data); +size_t mte_probe_user_range(const char __user *uaddr, size_t size); #else /* CONFIG_ARM64_MTE */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index e8dce0cc5eaa..63f9c828f1a7 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -460,4 +460,19 @@ static inline int __copy_from_user_flushcache(void *dst, const void __user *src, } #endif +#ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS + +/* + * Return 0 on success, the number of bytes not probed otherwise. + */ +static inline size_t probe_subpage_writeable(const char __user *uaddr, + size_t size) +{ + if (!system_supports_mte()) + return 0; + return mte_probe_user_range(uaddr, size); +} + +#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */ + #endif /* __ASM_UACCESS_H */ diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 78b3e0f8e997..35697a09926f 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -543,3 +544,32 @@ static int register_mte_tcf_preferred_sysctl(void) return 0; } subsys_initcall(register_mte_tcf_preferred_sysctl); + +/* + * Return 0 on success, the number of bytes not probed otherwise. + */ +size_t mte_probe_user_range(const char __user *uaddr, size_t size) +{ + const char __user *end = uaddr + size; + int err = 0; + char val; + + __raw_get_user(val, uaddr, err); + if (err) + return size; + + uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE); + while (uaddr < end) { + /* + * A read is sufficient for mte, the caller should have probed + * for the pte write permission if required. + */ + __raw_get_user(val, uaddr, err); + if (err) + return end - uaddr; + uaddr += MTE_GRANULE_SIZE; + } + (void)val; + + return 0; +} From 18788e34642e2e3eae785b0966769d03e7fbe9d2 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Sat, 23 Apr 2022 11:07:51 +0100 Subject: [PATCH 034/145] btrfs: Avoid live-lock in search_ioctl() on hardware with sub-page faults Commit a48b73eca4ce ("btrfs: fix potential deadlock in the search ioctl") addressed a lockdep warning by pre-faulting the user pages and attempting the copy_to_user_nofault() in an infinite loop. On architectures like arm64 with MTE, an access may fault within a page at a location different from what fault_in_writeable() probed. Since the sk_offset is rewound to the previous struct btrfs_ioctl_search_header boundary, there is no guaranteed forward progress and search_ioctl() may live-lock. Use fault_in_subpage_writeable() instead of fault_in_writeable() to ensure the permission is checked at the right granularity (smaller than PAGE_SIZE). Signed-off-by: Catalin Marinas Fixes: a48b73eca4ce ("btrfs: fix potential deadlock in the search ioctl") Reported-by: Al Viro Acked-by: David Sterba Cc: Chris Mason Cc: Josef Bacik Link: https://lore.kernel.org/r/20220423100751.1870771-4-catalin.marinas@arm.com Signed-off-by: Catalin Marinas --- fs/btrfs/ioctl.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index be6c24577dbe..9bf0616a3069 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2565,7 +2565,12 @@ static noinline int search_ioctl(struct inode *inode, while (1) { ret = -EFAULT; - if (fault_in_writeable(ubuf + sk_offset, *buf_size - sk_offset)) + /* + * Ensure that the whole user buffer is faulted in at sub-page + * granularity, otherwise the loop may live-lock. + */ + if (fault_in_subpage_writeable(ubuf + sk_offset, + *buf_size - sk_offset)) break; ret = btrfs_search_forward(root, &key, path, sk->min_transid); From b6ba1a89f73f11000f6b3062c4dc2503531bd3d2 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Fri, 22 Apr 2022 13:29:12 -0700 Subject: [PATCH 035/145] arm64: document the boot requirements for MTE When booting the kernel we access system registers such as GCR_EL1 if MTE is supported. These accesses are defined to trap to EL3 if SCR_EL3.ATA is disabled. Furthermore, tag accesses will not behave as expected if SCR_EL3.ATA is not set, or if HCR_EL2.ATA is not set and we were booted at EL1. Therefore, require that these bits are enabled when appropriate. Signed-off-by: Peter Collingbourne Reviewed-by: Mark Brown Link: https://linux-review.googlesource.com/id/Iadcfd4dcd9ba3279b2813970b44d7485b0116709 Link: https://lore.kernel.org/r/20220422202912.292039-1-pcc@google.com Signed-off-by: Catalin Marinas --- Documentation/arm64/booting.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst index 29884b261aa9..8aefa1001ae5 100644 --- a/Documentation/arm64/booting.rst +++ b/Documentation/arm64/booting.rst @@ -350,6 +350,16 @@ Before jumping into the kernel, the following conditions must be met: - SMCR_EL2.FA64 (bit 31) must be initialised to 0b1. + For CPUs with the Memory Tagging Extension feature (FEAT_MTE2): + + - If EL3 is present: + + - SCR_EL3.ATA (bit 26) must be initialised to 0b1. + + - If the kernel is entered at EL1 and EL2 is present: + + - HCR_EL2.ATA (bit 56) must be initialised to 0b1. + The requirements described above for CPU mode, caches, MMUs, architected timers, coherency and system registers apply to all CPUs. All CPUs must enter the kernel in the same exception level. Where the values documented From 8a58bcd00e2e8d46afce468adc09fcd7968f514c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 27 Apr 2022 14:08:28 +0100 Subject: [PATCH 036/145] arm64/sme: Add ID_AA64SMFR0_EL1 to __read_sysreg_by_encoding() We need to explicitly enumerate all the ID registers which we rely on for CPU capabilities in __read_sysreg_by_encoding(), ID_AA64SMFR0_EL1 was missed from this list so we trip a BUG() in paths which rely on that function such as CPU hotplug. Add the register. Reported-by: Marek Szyprowski Signed-off-by: Mark Brown Tested-by: Marek Szyprowski Link: https://lore.kernel.org/r/20220427130828.162615-1-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 082b3f48cbfd..619324b8bcef 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1336,6 +1336,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id) read_sysreg_case(SYS_ID_AA64PFR0_EL1); read_sysreg_case(SYS_ID_AA64PFR1_EL1); read_sysreg_case(SYS_ID_AA64ZFR0_EL1); + read_sysreg_case(SYS_ID_AA64SMFR0_EL1); read_sysreg_case(SYS_ID_AA64DFR0_EL1); read_sysreg_case(SYS_ID_AA64DFR1_EL1); read_sysreg_case(SYS_ID_AA64MMFR0_EL1); From 854f856f7ee35d26cdfd26e4eb3f293cc8cd8d12 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 4 Apr 2022 10:06:11 +0100 Subject: [PATCH 037/145] kselftest/arm64: Fix comment for ptrace_sve_get_fpsimd_data() The comment for ptrace_sve_get_fpsimd_data() doesn't describe what the test does at all, fix that. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220404090613.181272-2-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/sve-ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c index 4c418b2021e0..7682798adbba 100644 --- a/tools/testing/selftests/arm64/fp/sve-ptrace.c +++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c @@ -395,7 +395,7 @@ out: free(write_buf); } -/* Validate attempting to set SVE data and read SVE data */ +/* Validate attempting to set SVE data and read it via the FPSIMD regset */ static void ptrace_set_sve_get_fpsimd_data(pid_t child, const struct vec_type *type, unsigned int vl) From 1fb1e285b4a8a3664897c34414787ea825124cb2 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 4 Apr 2022 10:06:12 +0100 Subject: [PATCH 038/145] kselftest/arm64: Remove assumption that tasks start FPSIMD only Currently the sve-ptrace test for setting and reading FPSIMD data assumes that the child will start off in FPSIMD only mode and that it can use this to read some FPSIMD mode SVE ptrace data, skipping the test if it can't. This isn't an assumption guaranteed by the ABI and also limits how we can use this testcase within the program. Instead skip the initial read and just generate a FPSIMD format buffer for the write part of the test, making the coverage more robust in the face of future kernel and test program changes. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220404090613.181272-3-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/sve-ptrace.c | 39 ++++++++----------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c index 7682798adbba..8f6146d89ca4 100644 --- a/tools/testing/selftests/arm64/fp/sve-ptrace.c +++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c @@ -46,7 +46,7 @@ static const struct vec_type vec_types[] = { #define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 3) #define FLAG_TESTS 2 -#define FPSIMD_TESTS 3 +#define FPSIMD_TESTS 2 #define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types)) @@ -240,28 +240,24 @@ static void check_u32(unsigned int vl, const char *reg, /* Access the FPSIMD registers via the SVE regset */ static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type) { - void *svebuf = NULL; - size_t svebufsz = 0; + void *svebuf; struct user_sve_header *sve; struct user_fpsimd_state *fpsimd, new_fpsimd; unsigned int i, j; unsigned char *p; + int ret; - /* New process should start with FPSIMD registers only */ - sve = get_sve(child, type, &svebuf, &svebufsz); - if (!sve) { - ksft_test_result_fail("get_sve(%s): %s\n", - type->name, strerror(errno)); - + svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD)); + if (!svebuf) { + ksft_test_result_fail("Failed to allocate FPSIMD buffer\n"); return; - } else { - ksft_test_result_pass("get_sve(%s FPSIMD)\n", type->name); } - ksft_test_result((sve->flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD, - "Got FPSIMD registers via %s\n", type->name); - if ((sve->flags & SVE_PT_REGS_MASK) != SVE_PT_REGS_FPSIMD) - goto out; + memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD)); + sve = svebuf; + sve->flags = SVE_PT_REGS_FPSIMD; + sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD); + sve->vl = 16; /* We don't care what the VL is */ /* Try to set a known FPSIMD state via PT_REGS_SVE */ fpsimd = (struct user_fpsimd_state *)((char *)sve + @@ -273,12 +269,11 @@ static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type) p[j] = j; } - if (set_sve(child, type, sve)) { - ksft_test_result_fail("set_sve(%s FPSIMD): %s\n", - type->name, strerror(errno)); - + ret = set_sve(child, type, sve); + ksft_test_result(ret == 0, "%s FPSIMD set via SVE: %d\n", + type->name, ret); + if (ret) goto out; - } /* Verify via the FPSIMD regset */ if (get_fpsimd(child, &new_fpsimd)) { @@ -548,11 +543,9 @@ static int do_parent(pid_t child) if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) { ptrace_sve_fpsimd(child, &vec_types[i]); } else { - ksft_test_result_skip("%s FPSIMD get via SVE\n", - vec_types[i].name); ksft_test_result_skip("%s FPSIMD set via SVE\n", vec_types[i].name); - ksft_test_result_skip("%s set read via FPSIMD\n", + ksft_test_result_skip("%s FPSIMD read\n", vec_types[i].name); } From 82f97bcd876a6b5f764726a5210bde638d9f4d0a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 4 Apr 2022 10:06:13 +0100 Subject: [PATCH 039/145] kselftest/arm64: Validate setting via FPSIMD and read via SVE regsets Currently we validate that we can set the floating point state via the SVE regset and read the data via the FPSIMD regset but we do not valiate that the opposite case works as expected. Add a test that covers this case, noting that when reading via SVE regset the kernel has the option of returning either SVE or FPSIMD data so we need to accept both formats. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220404090613.181272-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/sve-ptrace.c | 123 +++++++++++++++++- 1 file changed, 122 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c index 8f6146d89ca4..36b6f0749f23 100644 --- a/tools/testing/selftests/arm64/fp/sve-ptrace.c +++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c @@ -44,7 +44,7 @@ static const struct vec_type vec_types[] = { }, }; -#define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 3) +#define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 4) #define FLAG_TESTS 2 #define FPSIMD_TESTS 2 @@ -78,6 +78,15 @@ static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) return ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov); } +static int set_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) +{ + struct iovec iov; + + iov.iov_base = fpsimd; + iov.iov_len = sizeof(*fpsimd); + return ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov); +} + static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type, void **buf, size_t *size) { @@ -473,6 +482,115 @@ out: free(write_buf); } +/* Validate attempting to set FPSIMD data and read it via the SVE regset */ +static void ptrace_set_fpsimd_get_sve_data(pid_t child, + const struct vec_type *type, + unsigned int vl) +{ + void *read_buf = NULL; + unsigned char *p; + struct user_sve_header *read_sve; + unsigned int vq = sve_vq_from_vl(vl); + struct user_fpsimd_state write_fpsimd; + int ret, i, j; + size_t read_sve_size = 0; + size_t expected_size; + int errors = 0; + + if (__BYTE_ORDER == __BIG_ENDIAN) { + ksft_test_result_skip("Big endian not supported\n"); + return; + } + + for (i = 0; i < 32; ++i) { + p = (unsigned char *)&write_fpsimd.vregs[i]; + + for (j = 0; j < sizeof(write_fpsimd.vregs[i]); ++j) + p[j] = j; + } + + ret = set_fpsimd(child, &write_fpsimd); + if (ret != 0) { + ksft_test_result_fail("Failed to set FPSIMD state: %d\n)", + ret); + return; + } + + if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) { + ksft_test_result_fail("Failed to read %s VL %u data\n", + type->name, vl); + return; + } + read_sve = read_buf; + + if (read_sve->vl != vl) { + ksft_test_result_fail("Child VL != expected VL %d\n", + read_sve->vl, vl); + goto out; + } + + /* The kernel may return either SVE or FPSIMD format */ + switch (read_sve->flags & SVE_PT_REGS_MASK) { + case SVE_PT_REGS_FPSIMD: + expected_size = SVE_PT_FPSIMD_SIZE(vq, SVE_PT_REGS_FPSIMD); + if (read_sve_size < expected_size) { + ksft_test_result_fail("Read %d bytes, expected %d\n", + read_sve_size, expected_size); + goto out; + } + + ret = memcmp(&write_fpsimd, read_buf + SVE_PT_FPSIMD_OFFSET, + sizeof(write_fpsimd)); + if (ret != 0) { + ksft_print_msg("Read FPSIMD data mismatch\n"); + errors++; + } + break; + + case SVE_PT_REGS_SVE: + expected_size = SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); + if (read_sve_size < expected_size) { + ksft_test_result_fail("Read %d bytes, expected %d\n", + read_sve_size, expected_size); + goto out; + } + + for (i = 0; i < __SVE_NUM_ZREGS; i++) { + __uint128_t tmp = 0; + + /* + * Z regs are stored endianness invariant, this won't + * work for big endian + */ + memcpy(&tmp, read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), + sizeof(tmp)); + + if (tmp != write_fpsimd.vregs[i]) { + ksft_print_msg("Mismatch in FPSIMD for %s VL %u Z%d/V%d\n", + type->name, vl, i, i); + errors++; + } + } + + check_u32(vl, "FPSR", &write_fpsimd.fpsr, + read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors); + check_u32(vl, "FPCR", &write_fpsimd.fpcr, + read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors); + break; + default: + ksft_print_msg("Unexpected regs type %d\n", + read_sve->flags & SVE_PT_REGS_MASK); + errors++; + break; + } + + ksft_test_result(errors == 0, "Set FPSIMD, read via SVE for %s VL %u\n", + type->name, vl); + +out: + free(read_buf); +} + static int do_parent(pid_t child) { int ret = EXIT_FAILURE; @@ -578,11 +696,14 @@ static int do_parent(pid_t child) if (vl_supported) { ptrace_set_sve_get_sve_data(child, &vec_types[i], vl); ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl); + ptrace_set_fpsimd_get_sve_data(child, &vec_types[i], vl); } else { ksft_test_result_skip("%s set SVE get SVE for VL %d\n", vec_types[i].name, vl); ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n", vec_types[i].name, vl); + ksft_test_result_skip("%s set FPSIMD get SVE for VL %d\n", + vec_types[i].name, vl); } } } From 3f374d7972c48bc0824bdabb8f94fe82e54fd07d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 11:32:40 +0100 Subject: [PATCH 040/145] kselftest/arm64: Handle more kselftest result codes in MTE helpers The MTE selftests have a helper evaluate_test() which translates a return code into a call to ksft_test_result_*(). Currently this only handles pass and fail, silently ignoring any other code. Update the helper to support skipped tests and log any unknown return codes as an error so we get at least some diagnostic if anything goes wrong. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220419103243.24774-2-broonie@kernel.org Signed-off-by: Catalin Marinas --- .../testing/selftests/arm64/mte/mte_common_util.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.h b/tools/testing/selftests/arm64/mte/mte_common_util.h index 195a7d1879e6..2d3e71724e55 100644 --- a/tools/testing/selftests/arm64/mte/mte_common_util.h +++ b/tools/testing/selftests/arm64/mte/mte_common_util.h @@ -75,10 +75,21 @@ unsigned int mte_get_pstate_tco(void); /* Test framework static inline functions/macros */ static inline void evaluate_test(int err, const char *msg) { - if (err == KSFT_PASS) + switch (err) { + case KSFT_PASS: ksft_test_result_pass(msg); - else if (err == KSFT_FAIL) + break; + case KSFT_FAIL: ksft_test_result_fail(msg); + break; + case KSFT_SKIP: + ksft_test_result_skip(msg); + break; + default: + ksft_test_result_error("Unknown return code %d from %s", + err, msg); + break; + } } static inline int check_allocated_memory(void *ptr, size_t size, From 191e678bdc9be2447dae227f5b6ea1e995c5ee9c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 11:32:41 +0100 Subject: [PATCH 041/145] kselftest/arm64: Log unexpected asynchronous MTE faults Help people figure out problems by printing a diagnostic when we get an unexpected asynchronous fault. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220419103243.24774-3-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/mte/mte_common_util.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c index 0328a1e08f65..5327aa958171 100644 --- a/tools/testing/selftests/arm64/mte/mte_common_util.c +++ b/tools/testing/selftests/arm64/mte/mte_common_util.c @@ -37,6 +37,10 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc) if (si->si_code == SEGV_MTEAERR) { if (cur_mte_cxt.trig_si_code == si->si_code) cur_mte_cxt.fault_valid = true; + else + ksft_print_msg("Got unexpected SEGV_MTEAERR at pc=$lx, fault addr=%lx\n", + ((ucontext_t *)uc)->uc_mcontext.pc, + addr); return; } /* Compare the context for precise error */ From f326c9a6f49b06c0a936d68ae23cb90899835c3b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 11:32:42 +0100 Subject: [PATCH 042/145] kselftest/arm64: Refactor parameter checking in mte_switch_mode() Currently we just have a big if statement with a non-specific diagnostic checking both the mode and the tag. Since we'll need to dynamically check for asymmetric mode support in the system and to improve debugability split these checks out. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220419103243.24774-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- .../testing/selftests/arm64/mte/mte_common_util.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c index 5327aa958171..260206f4dce0 100644 --- a/tools/testing/selftests/arm64/mte/mte_common_util.c +++ b/tools/testing/selftests/arm64/mte/mte_common_util.c @@ -273,9 +273,18 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask) { unsigned long en = 0; - if (!(mte_option == MTE_SYNC_ERR || mte_option == MTE_ASYNC_ERR || - mte_option == MTE_NONE_ERR || incl_mask <= MTE_ALLOW_NON_ZERO_TAG)) { - ksft_print_msg("FAIL: Invalid mte config option\n"); + switch (mte_option) { + case MTE_NONE_ERR: + case MTE_SYNC_ERR: + case MTE_ASYNC_ERR: + break; + default: + ksft_print_msg("FAIL: Invalid MTE option %x\n", mte_option); + return -EINVAL; + } + + if (!(incl_mask <= MTE_ALLOW_NON_ZERO_TAG)) { + ksft_print_msg("FAIL: Invalid incl_mask %lx\n", incl_mask); return -EINVAL; } en = PR_TAGGED_ADDR_ENABLE; From e2d9642a5a5101a559e7d368a1df8e01e960096b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 11:32:43 +0100 Subject: [PATCH 043/145] kselftest/arm64: Add simple test for MTE prctl The current tests use the prctls for various things but there's no coverage of the edges of the interface so add some basics. This isn't hugely useful as it is (it originally had some coverage for the combinations with asymmetric mode but we removed the prctl() for that) but it might be a helpful starting point for future work, for example covering error handling. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220419103243.24774-5-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/mte/.gitignore | 1 + .../testing/selftests/arm64/mte/check_prctl.c | 119 ++++++++++++++++++ 2 files changed, 120 insertions(+) create mode 100644 tools/testing/selftests/arm64/mte/check_prctl.c diff --git a/tools/testing/selftests/arm64/mte/.gitignore b/tools/testing/selftests/arm64/mte/.gitignore index d1fe4ddf1669..052d0f9f92b3 100644 --- a/tools/testing/selftests/arm64/mte/.gitignore +++ b/tools/testing/selftests/arm64/mte/.gitignore @@ -3,5 +3,6 @@ check_gcr_el1_cswitch check_tags_inclusion check_child_memory check_mmap_options +check_prctl check_ksm_options check_user_mem diff --git a/tools/testing/selftests/arm64/mte/check_prctl.c b/tools/testing/selftests/arm64/mte/check_prctl.c new file mode 100644 index 000000000000..f139a33a43ef --- /dev/null +++ b/tools/testing/selftests/arm64/mte/check_prctl.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022 ARM Limited + +#include +#include +#include + +#include +#include + +#include + +#include "kselftest.h" + +static int set_tagged_addr_ctrl(int val) +{ + int ret; + + ret = prctl(PR_SET_TAGGED_ADDR_CTRL, val, 0, 0, 0); + if (ret < 0) + ksft_print_msg("PR_SET_TAGGED_ADDR_CTRL: failed %d %d (%s)\n", + ret, errno, strerror(errno)); + return ret; +} + +static int get_tagged_addr_ctrl(void) +{ + int ret; + + ret = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0); + if (ret < 0) + ksft_print_msg("PR_GET_TAGGED_ADDR_CTRL failed: %d %d (%s)\n", + ret, errno, strerror(errno)); + return ret; +} + +/* + * Read the current mode without having done any configuration, should + * run first. + */ +void check_basic_read(void) +{ + int ret; + + ret = get_tagged_addr_ctrl(); + if (ret < 0) { + ksft_test_result_fail("check_basic_read\n"); + return; + } + + if (ret & PR_MTE_TCF_SYNC) + ksft_print_msg("SYNC enabled\n"); + if (ret & PR_MTE_TCF_ASYNC) + ksft_print_msg("ASYNC enabled\n"); + + /* Any configuration is valid */ + ksft_test_result_pass("check_basic_read\n"); +} + +/* + * Attempt to set a specified combination of modes. + */ +void set_mode_test(const char *name, int hwcap2, int mask) +{ + int ret; + + if ((getauxval(AT_HWCAP2) & hwcap2) != hwcap2) { + ksft_test_result_skip("%s\n", name); + return; + } + + ret = set_tagged_addr_ctrl(mask); + if (ret < 0) { + ksft_test_result_fail("%s\n", name); + return; + } + + ret = get_tagged_addr_ctrl(); + if (ret < 0) { + ksft_test_result_fail("%s\n", name); + return; + } + + if ((ret & PR_MTE_TCF_MASK) == mask) { + ksft_test_result_pass("%s\n", name); + } else { + ksft_print_msg("Got %x, expected %x\n", + (ret & PR_MTE_TCF_MASK), mask); + ksft_test_result_fail("%s\n", name); + } +} + +struct mte_mode { + int mask; + int hwcap2; + const char *name; +} mte_modes[] = { + { PR_MTE_TCF_NONE, 0, "NONE" }, + { PR_MTE_TCF_SYNC, HWCAP2_MTE, "SYNC" }, + { PR_MTE_TCF_ASYNC, HWCAP2_MTE, "ASYNC" }, + { PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC, HWCAP2_MTE, "SYNC+ASYNC" }, +}; + +int main(void) +{ + int i; + + ksft_print_header(); + ksft_set_plan(5); + + check_basic_read(); + for (i = 0; i < ARRAY_SIZE(mte_modes); i++) + set_mode_test(mte_modes[i].name, mte_modes[i].hwcap2, + mte_modes[i].mask); + + ksft_print_cnts(); + + return 0; +} From 6d51b18865c65390973e6ed0aec20239cf475489 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:36 +0100 Subject: [PATCH 044/145] kselftest/arm64: Add manual encodings for SME instructions As for the kernel so that we don't have ambitious toolchain requirements to build the tests manually encode some of the SVE instructions. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-29-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/sme-inst.h | 51 +++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 tools/testing/selftests/arm64/fp/sme-inst.h diff --git a/tools/testing/selftests/arm64/fp/sme-inst.h b/tools/testing/selftests/arm64/fp/sme-inst.h new file mode 100644 index 000000000000..7191e53ca1c0 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/sme-inst.h @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2021-2 ARM Limited. +// Original author: Mark Brown + +#ifndef SME_INST_H +#define SME_INST_H + +/* + * RDSVL X\nx, #\imm + */ +.macro rdsvl nx, imm + .inst 0x4bf5800 \ + | (\imm << 5) \ + | (\nx) +.endm + +.macro smstop + msr S0_3_C4_C6_3, xzr +.endm + +.macro smstart_za + msr S0_3_C4_C5_3, xzr +.endm + +.macro smstart_sm + msr S0_3_C4_C3_3, xzr +.endm + +/* + * LDR (vector to ZA array): + * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] + */ +.macro _ldr_za nw, nxbase, offset=0 + .inst 0xe1000000 \ + | (((\nw) & 3) << 13) \ + | ((\nxbase) << 5) \ + | ((\offset) & 7) +.endm + +/* + * STR (vector from ZA array): + * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] + */ +.macro _str_za nw, nxbase, offset=0 + .inst 0xe1200000 \ + | (((\nw) & 3) << 13) \ + | ((\nxbase) << 5) \ + | ((\offset) & 7) +.endm + +#endif From e8c4451480d0cb37cbc69160113b1f4ff211cd16 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:37 +0100 Subject: [PATCH 045/145] kselftest/arm64: sme: Add SME support to vlset The Scalable Matrix Extenions (SME) introduces additional register state with configurable vector lengths, similar to SVE but configured separately. Extend vlset to support configuring this state with a --sme or -s command line option. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-30-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/vlset.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/arm64/fp/vlset.c b/tools/testing/selftests/arm64/fp/vlset.c index 308d27a68226..76912a581a95 100644 --- a/tools/testing/selftests/arm64/fp/vlset.c +++ b/tools/testing/selftests/arm64/fp/vlset.c @@ -22,12 +22,15 @@ static int inherit = 0; static int no_inherit = 0; static int force = 0; static unsigned long vl; +static int set_ctl = PR_SVE_SET_VL; +static int get_ctl = PR_SVE_GET_VL; static const struct option options[] = { { "force", no_argument, NULL, 'f' }, { "inherit", no_argument, NULL, 'i' }, { "max", no_argument, NULL, 'M' }, { "no-inherit", no_argument, &no_inherit, 1 }, + { "sme", no_argument, NULL, 's' }, { "help", no_argument, NULL, '?' }, {} }; @@ -50,6 +53,9 @@ static int parse_options(int argc, char **argv) case 'M': vl = SVE_VL_MAX; break; case 'f': force = 1; break; case 'i': inherit = 1; break; + case 's': set_ctl = PR_SME_SET_VL; + get_ctl = PR_SME_GET_VL; + break; case 0: break; default: goto error; } @@ -125,14 +131,14 @@ int main(int argc, char **argv) if (inherit) flags |= PR_SVE_VL_INHERIT; - t = prctl(PR_SVE_SET_VL, vl | flags); + t = prctl(set_ctl, vl | flags); if (t < 0) { fprintf(stderr, "%s: PR_SVE_SET_VL: %s\n", program_name, strerror(errno)); goto error; } - t = prctl(PR_SVE_GET_VL); + t = prctl(get_ctl); if (t == -1) { fprintf(stderr, "%s: PR_SVE_GET_VL: %s\n", program_name, strerror(errno)); From 30e3a42b5d47d6dadba73a8509a6687a9d8f8e40 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:38 +0100 Subject: [PATCH 046/145] kselftest/arm64: Add tests for TPIDR2 The Scalable Matrix Extension adds a new system register TPIDR2 intended to be used by libc for its own thread specific use, add some kselftests which exercise the ABI for it. Since this test should with some adjustment work for TPIDR and any other similar registers added in future add tests for it in a separate directory rather than placing it with the other floating point tests, nothing existing looked suitable so I created a new test directory called "abi". Since this feature is intended to be used by libc the test is built as freestanding code using nolibc so we don't end up with the test program and libc both trying to manage the register simultaneously and distrupting each other. As a result of being written using nolibc rather than using hwcaps to identify if SME is available in the system we check for the default SME vector length configuration in proc, adding hwcap support to nolibc seems like disproportionate effort and didn't feel entirely idiomatic for what nolibc is trying to do. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-31-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/abi/.gitignore | 1 + tools/testing/selftests/arm64/abi/Makefile | 9 +- tools/testing/selftests/arm64/abi/tpidr2.c | 298 +++++++++++++++++++ 3 files changed, 307 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/arm64/abi/tpidr2.c diff --git a/tools/testing/selftests/arm64/abi/.gitignore b/tools/testing/selftests/arm64/abi/.gitignore index b79cf5814c23..b9e54417250d 100644 --- a/tools/testing/selftests/arm64/abi/.gitignore +++ b/tools/testing/selftests/arm64/abi/.gitignore @@ -1 +1,2 @@ syscall-abi +tpidr2 diff --git a/tools/testing/selftests/arm64/abi/Makefile b/tools/testing/selftests/arm64/abi/Makefile index 96eba974ac8d..c8d7f2495eb2 100644 --- a/tools/testing/selftests/arm64/abi/Makefile +++ b/tools/testing/selftests/arm64/abi/Makefile @@ -1,8 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (C) 2021 ARM Limited -TEST_GEN_PROGS := syscall-abi +TEST_GEN_PROGS := syscall-abi tpidr2 include ../../lib.mk $(OUTPUT)/syscall-abi: syscall-abi.c syscall-abi-asm.S + +# Build with nolibc since TPIDR2 is intended to be actively managed by +# libc and we're trying to test the functionality that it depends on here. +$(OUTPUT)/tpidr2: tpidr2.c + $(CC) -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \ + -static -include ../../../../include/nolibc/nolibc.h \ + -ffreestanding -Wall $^ -o $@ -lgcc diff --git a/tools/testing/selftests/arm64/abi/tpidr2.c b/tools/testing/selftests/arm64/abi/tpidr2.c new file mode 100644 index 000000000000..351a098b503a --- /dev/null +++ b/tools/testing/selftests/arm64/abi/tpidr2.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#define SYS_TPIDR2 "S3_3_C13_C0_5" + +#define EXPECTED_TESTS 5 + +static void putstr(const char *str) +{ + write(1, str, strlen(str)); +} + +static void putnum(unsigned int num) +{ + char c; + + if (num / 10) + putnum(num / 10); + + c = '0' + (num % 10); + write(1, &c, 1); +} + +static int tests_run; +static int tests_passed; +static int tests_failed; +static int tests_skipped; + +static void set_tpidr2(uint64_t val) +{ + asm volatile ( + "msr " SYS_TPIDR2 ", %0\n" + : + : "r"(val) + : "cc"); +} + +static uint64_t get_tpidr2(void) +{ + uint64_t val; + + asm volatile ( + "mrs %0, " SYS_TPIDR2 "\n" + : "=r"(val) + : + : "cc"); + + return val; +} + +static void print_summary(void) +{ + if (tests_passed + tests_failed + tests_skipped != EXPECTED_TESTS) + putstr("# UNEXPECTED TEST COUNT: "); + + putstr("# Totals: pass:"); + putnum(tests_passed); + putstr(" fail:"); + putnum(tests_failed); + putstr(" xfail:0 xpass:0 skip:"); + putnum(tests_skipped); + putstr(" error:0\n"); +} + +/* Processes should start with TPIDR2 == 0 */ +static int default_value(void) +{ + return get_tpidr2() == 0; +} + +/* If we set TPIDR2 we should read that value */ +static int write_read(void) +{ + set_tpidr2(getpid()); + + return getpid() == get_tpidr2(); +} + +/* If we set a value we should read the same value after scheduling out */ +static int write_sleep_read(void) +{ + set_tpidr2(getpid()); + + msleep(100); + + return getpid() == get_tpidr2(); +} + +/* + * If we fork the value in the parent should be unchanged and the + * child should start with the same value and be able to set its own + * value. + */ +static int write_fork_read(void) +{ + pid_t newpid, waiting, oldpid; + int status; + + set_tpidr2(getpid()); + + oldpid = getpid(); + newpid = fork(); + if (newpid == 0) { + /* In child */ + if (get_tpidr2() != oldpid) { + putstr("# TPIDR2 changed in child: "); + putnum(get_tpidr2()); + putstr("\n"); + exit(0); + } + + set_tpidr2(getpid()); + if (get_tpidr2() == getpid()) { + exit(1); + } else { + putstr("# Failed to set TPIDR2 in child\n"); + exit(0); + } + } + if (newpid < 0) { + putstr("# fork() failed: -"); + putnum(-newpid); + putstr("\n"); + return 0; + } + + for (;;) { + waiting = waitpid(newpid, &status, 0); + + if (waiting < 0) { + if (errno == EINTR) + continue; + putstr("# waitpid() failed: "); + putnum(errno); + putstr("\n"); + return 0; + } + if (waiting != newpid) { + putstr("# waitpid() returned wrong PID\n"); + return 0; + } + + if (!WIFEXITED(status)) { + putstr("# child did not exit\n"); + return 0; + } + + if (getpid() != get_tpidr2()) { + putstr("# TPIDR2 corrupted in parent\n"); + return 0; + } + + return WEXITSTATUS(status); + } +} + +/* + * sys_clone() has a lot of per architecture variation so just define + * it here rather than adding it to nolibc, plus the raw API is a + * little more convenient for this test. + */ +static int sys_clone(unsigned long clone_flags, unsigned long newsp, + int *parent_tidptr, unsigned long tls, + int *child_tidptr) +{ + return my_syscall5(__NR_clone, clone_flags, newsp, parent_tidptr, tls, + child_tidptr); +} + +/* + * If we clone with CLONE_SETTLS then the value in the parent should + * be unchanged and the child should start with zero and be able to + * set its own value. + */ +static int write_clone_read(void) +{ + int parent_tid, child_tid; + pid_t parent, waiting; + int ret, status; + + parent = getpid(); + set_tpidr2(parent); + + ret = sys_clone(CLONE_SETTLS, 0, &parent_tid, 0, &child_tid); + if (ret == -1) { + putstr("# clone() failed\n"); + putnum(errno); + putstr("\n"); + return 0; + } + + if (ret == 0) { + /* In child */ + if (get_tpidr2() != 0) { + putstr("# TPIDR2 non-zero in child: "); + putnum(get_tpidr2()); + putstr("\n"); + exit(0); + } + + if (gettid() == 0) + putstr("# Child TID==0\n"); + set_tpidr2(gettid()); + if (get_tpidr2() == gettid()) { + exit(1); + } else { + putstr("# Failed to set TPIDR2 in child\n"); + exit(0); + } + } + + for (;;) { + waiting = wait4(ret, &status, __WCLONE, NULL); + + if (waiting < 0) { + if (errno == EINTR) + continue; + putstr("# wait4() failed: "); + putnum(errno); + putstr("\n"); + return 0; + } + if (waiting != ret) { + putstr("# wait4() returned wrong PID "); + putnum(waiting); + putstr("\n"); + return 0; + } + + if (!WIFEXITED(status)) { + putstr("# child did not exit\n"); + return 0; + } + + if (parent != get_tpidr2()) { + putstr("# TPIDR2 corrupted in parent\n"); + return 0; + } + + return WEXITSTATUS(status); + } +} + +#define run_test(name) \ + if (name()) { \ + tests_passed++; \ + } else { \ + tests_failed++; \ + putstr("not "); \ + } \ + putstr("ok "); \ + putnum(++tests_run); \ + putstr(" " #name "\n"); + +int main(int argc, char **argv) +{ + int ret, i; + + putstr("TAP version 13\n"); + putstr("1.."); + putnum(EXPECTED_TESTS); + putstr("\n"); + + putstr("# PID: "); + putnum(getpid()); + putstr("\n"); + + /* + * This test is run with nolibc which doesn't support hwcap and + * it's probably disproportionate to implement so instead check + * for the default vector length configuration in /proc. + */ + ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0); + if (ret >= 0) { + run_test(default_value); + run_test(write_read); + run_test(write_sleep_read); + run_test(write_fork_read); + run_test(write_clone_read); + + } else { + putstr("# SME support not present\n"); + + for (i = 0; i < EXPECTED_TESTS; i++) { + putstr("ok "); + putnum(i); + putstr(" skipped, TPIDR2 not supported\n"); + } + + tests_skipped += EXPECTED_TESTS; + } + + print_summary(); + + return 0; +} From a0f2eb641b7c4ff753374f8b2043b8bbb1666a96 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:39 +0100 Subject: [PATCH 047/145] kselftest/arm64: Extend vector configuration API tests to cover SME Provide RDVL helpers for SME and extend the main vector configuration tests to cover SME. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-32-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/.gitignore | 1 + tools/testing/selftests/arm64/fp/Makefile | 3 ++- tools/testing/selftests/arm64/fp/rdvl-sme.c | 14 ++++++++++++++ tools/testing/selftests/arm64/fp/rdvl.S | 10 ++++++++++ tools/testing/selftests/arm64/fp/rdvl.h | 1 + tools/testing/selftests/arm64/fp/vec-syscfg.c | 10 ++++++++++ 6 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/arm64/fp/rdvl-sme.c diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore index c50d86331ed2..6e9a610c5e5d 100644 --- a/tools/testing/selftests/arm64/fp/.gitignore +++ b/tools/testing/selftests/arm64/fp/.gitignore @@ -1,5 +1,6 @@ fp-pidbench fpsimd-test +rdvl-sme rdvl-sve sve-probe-vls sve-ptrace diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index 95f0b877a060..a224fff8082b 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -3,7 +3,7 @@ CFLAGS += -I../../../../../usr/include/ TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg TEST_PROGS_EXTENDED := fp-pidbench fpsimd-test fpsimd-stress \ - rdvl-sve \ + rdvl-sme rdvl-sve \ sve-test sve-stress \ vlset @@ -13,6 +13,7 @@ fp-pidbench: fp-pidbench.S asm-utils.o $(CC) -nostdlib $^ -o $@ fpsimd-test: fpsimd-test.o asm-utils.o $(CC) -nostdlib $^ -o $@ +rdvl-sme: rdvl-sme.o rdvl.o rdvl-sve: rdvl-sve.o rdvl.o sve-ptrace: sve-ptrace.o sve-probe-vls: sve-probe-vls.o rdvl.o diff --git a/tools/testing/selftests/arm64/fp/rdvl-sme.c b/tools/testing/selftests/arm64/fp/rdvl-sme.c new file mode 100644 index 000000000000..49b0b2e08bac --- /dev/null +++ b/tools/testing/selftests/arm64/fp/rdvl-sme.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include + +#include "rdvl.h" + +int main(void) +{ + int vl = rdvl_sme(); + + printf("%d\n", vl); + + return 0; +} diff --git a/tools/testing/selftests/arm64/fp/rdvl.S b/tools/testing/selftests/arm64/fp/rdvl.S index c916c1c9defd..20dc29996dc6 100644 --- a/tools/testing/selftests/arm64/fp/rdvl.S +++ b/tools/testing/selftests/arm64/fp/rdvl.S @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2021 ARM Limited. +#include "sme-inst.h" + .arch_extension sve .globl rdvl_sve @@ -8,3 +10,11 @@ rdvl_sve: hint 34 // BTI C rdvl x0, #1 ret + +.globl rdvl_sme +rdvl_sme: + hint 34 // BTI C + + rdsvl 0, 1 + + ret diff --git a/tools/testing/selftests/arm64/fp/rdvl.h b/tools/testing/selftests/arm64/fp/rdvl.h index 7c9d953fc9e7..5d323679fbc9 100644 --- a/tools/testing/selftests/arm64/fp/rdvl.h +++ b/tools/testing/selftests/arm64/fp/rdvl.h @@ -3,6 +3,7 @@ #ifndef RDVL_H #define RDVL_H +int rdvl_sme(void); int rdvl_sve(void); #endif diff --git a/tools/testing/selftests/arm64/fp/vec-syscfg.c b/tools/testing/selftests/arm64/fp/vec-syscfg.c index c90658811a83..9bcfcdc34ee9 100644 --- a/tools/testing/selftests/arm64/fp/vec-syscfg.c +++ b/tools/testing/selftests/arm64/fp/vec-syscfg.c @@ -51,6 +51,16 @@ static struct vec_data vec_data[] = { .prctl_set = PR_SVE_SET_VL, .default_vl_file = "/proc/sys/abi/sve_default_vector_length", }, + { + .name = "SME", + .hwcap_type = AT_HWCAP2, + .hwcap = HWCAP2_SME, + .rdvl = rdvl_sme, + .rdvl_binary = "./rdvl-sme", + .prctl_get = PR_SME_GET_VL, + .prctl_set = PR_SME_SET_VL, + .default_vl_file = "/proc/sys/abi/sme_default_vector_length", + }, }; static int stdio_read_integer(FILE *f, const char *what, int *val) From 4126bde025c8f973dfd278879fa32e293f563df5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:40 +0100 Subject: [PATCH 048/145] kselftest/arm64: sme: Provide streaming mode SVE stress test One of the features of SME is the addition of streaming mode, in which we have access to a set of streaming mode SVE registers at the SME vector length. Since these are accessed using the SVE instructions let's reuse the existing SVE stress test for testing with a compile time option for controlling the few small differences needed: - Enter streaming mode immediately on starting the program. - In streaming mode FFR is removed so skip reading and writing FFR. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-33-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/.gitignore | 1 + tools/testing/selftests/arm64/fp/Makefile | 3 + tools/testing/selftests/arm64/fp/ssve-stress | 59 ++++++++++++++++++++ tools/testing/selftests/arm64/fp/sve-test.S | 20 +++++++ 4 files changed, 83 insertions(+) create mode 100644 tools/testing/selftests/arm64/fp/ssve-stress diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore index 6e9a610c5e5d..5729a5b1adfc 100644 --- a/tools/testing/selftests/arm64/fp/.gitignore +++ b/tools/testing/selftests/arm64/fp/.gitignore @@ -5,5 +5,6 @@ rdvl-sve sve-probe-vls sve-ptrace sve-test +ssve-test vec-syscfg vlset diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index a224fff8082b..e6643c9b0474 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -5,6 +5,7 @@ TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg TEST_PROGS_EXTENDED := fp-pidbench fpsimd-test fpsimd-stress \ rdvl-sme rdvl-sve \ sve-test sve-stress \ + ssve-test ssve-stress \ vlset all: $(TEST_GEN_PROGS) $(TEST_PROGS_EXTENDED) @@ -19,6 +20,8 @@ sve-ptrace: sve-ptrace.o sve-probe-vls: sve-probe-vls.o rdvl.o sve-test: sve-test.o asm-utils.o $(CC) -nostdlib $^ -o $@ +ssve-test: sve-test.S asm-utils.o + $(CC) -DSSVE -nostdlib $^ -o $@ vec-syscfg: vec-syscfg.o rdvl.o vlset: vlset.o diff --git a/tools/testing/selftests/arm64/fp/ssve-stress b/tools/testing/selftests/arm64/fp/ssve-stress new file mode 100644 index 000000000000..e2bd2cc184ad --- /dev/null +++ b/tools/testing/selftests/arm64/fp/ssve-stress @@ -0,0 +1,59 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2015-2019 ARM Limited. +# Original author: Dave Martin + +set -ue + +NR_CPUS=`nproc` + +pids= +logs= + +cleanup () { + trap - INT TERM CHLD + set +e + + if [ -n "$pids" ]; then + kill $pids + wait $pids + pids= + fi + + if [ -n "$logs" ]; then + cat $logs + rm $logs + logs= + fi +} + +interrupt () { + cleanup + exit 0 +} + +child_died () { + cleanup + exit 1 +} + +trap interrupt INT TERM EXIT + +for x in `seq 0 $((NR_CPUS * 4))`; do + log=`mktemp` + logs=$logs\ $log + ./ssve-test >$log & + pids=$pids\ $! +done + +# Wait for all child processes to be created: +sleep 10 + +while :; do + kill -USR1 $pids +done & +pids=$pids\ $! + +wait + +exit 1 diff --git a/tools/testing/selftests/arm64/fp/sve-test.S b/tools/testing/selftests/arm64/fp/sve-test.S index f5b1b48ffff2..589264231a2d 100644 --- a/tools/testing/selftests/arm64/fp/sve-test.S +++ b/tools/testing/selftests/arm64/fp/sve-test.S @@ -13,6 +13,7 @@ #include #include "assembler.h" #include "asm-offsets.h" +#include "sme-inst.h" #define NZR 32 #define NPR 16 @@ -156,6 +157,7 @@ endfunction // We fill the upper lanes of FFR with zeros. // Beware: corrupts P0. function setup_ffr +#ifndef SSVE mov x4, x30 and w0, w0, #0x3 @@ -178,6 +180,9 @@ function setup_ffr wrffr p0.b ret x4 +#else + ret +#endif endfunction // Trivial memory compare: compare x2 bytes starting at address x0 with @@ -260,6 +265,7 @@ endfunction // Beware -- corrupts P0. // Clobbers x0-x5. function check_ffr +#ifndef SSVE mov x3, x30 ldr x4, =scratch @@ -280,6 +286,9 @@ function check_ffr mov x2, x5 mov x30, x3 b memcmp +#else + ret +#endif endfunction // Any SVE register modified here can cause corruption in the main @@ -295,10 +304,12 @@ function irritator_handler movi v0.8b, #1 movi v9.16b, #2 movi v31.8b, #3 +#ifndef SSVE // And P0 rdffr p0.b // And FFR wrffr p15.b +#endif ret endfunction @@ -359,6 +370,11 @@ endfunction .globl _start function _start _start: +#ifdef SSVE + puts "Streaming mode " + smstart_sm +#endif + // Sanity-check and report the vector length rdvl x19, #8 @@ -407,6 +423,10 @@ _start: orr w2, w2, #SA_NODEFER bl setsignal +#ifdef SSVE + smstart_sm // syscalls will have exited streaming mode +#endif + mov x22, #0 // generation number, increments per iteration .Ltest_loop: rdvl x0, #8 From 1a792b545519b6e49f9b1653095ed785eea19afe Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:41 +0100 Subject: [PATCH 049/145] kselftest/arm64: signal: Handle ZA signal context in core code As part of the generic code for signal handling test cases we parse all signal frames to make sure they have at least the basic form we expect and that there are no unexpected frames present in the signal context. Add coverage of the ZA signal frame to this code. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-34-broonie@kernel.org Signed-off-by: Catalin Marinas --- .../arm64/signal/testcases/testcases.c | 36 +++++++++++++++++++ .../arm64/signal/testcases/testcases.h | 3 +- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.c b/tools/testing/selftests/arm64/signal/testcases/testcases.c index 8c2a57fc2f9c..84c36bee4d82 100644 --- a/tools/testing/selftests/arm64/signal/testcases/testcases.c +++ b/tools/testing/selftests/arm64/signal/testcases/testcases.c @@ -75,6 +75,31 @@ bool validate_sve_context(struct sve_context *sve, char **err) return true; } +bool validate_za_context(struct za_context *za, char **err) +{ + /* Size will be rounded up to a multiple of 16 bytes */ + size_t regs_size + = ((ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za->vl)) + 15) / 16) * 16; + + if (!za || !err) + return false; + + /* Either a bare za_context or a za_context followed by regs data */ + if ((za->head.size != sizeof(struct za_context)) && + (za->head.size != regs_size)) { + *err = "bad size for ZA context"; + return false; + } + + if (!sve_vl_valid(za->vl)) { + *err = "SME VL in ZA context invalid"; + + return false; + } + + return true; +} + bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) { bool terminated = false; @@ -82,6 +107,7 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) int flags = 0; struct extra_context *extra = NULL; struct sve_context *sve = NULL; + struct za_context *za = NULL; struct _aarch64_ctx *head = (struct _aarch64_ctx *)uc->uc_mcontext.__reserved; @@ -120,6 +146,13 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) sve = (struct sve_context *)head; flags |= SVE_CTX; break; + case ZA_MAGIC: + if (flags & ZA_CTX) + *err = "Multiple ZA_MAGIC"; + /* Size is validated in validate_za_context() */ + za = (struct za_context *)head; + flags |= ZA_CTX; + break; case EXTRA_MAGIC: if (flags & EXTRA_CTX) *err = "Multiple EXTRA_MAGIC"; @@ -165,6 +198,9 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) if (flags & SVE_CTX) if (!validate_sve_context(sve, err)) return false; + if (flags & ZA_CTX) + if (!validate_za_context(za, err)) + return false; head = GET_RESV_NEXT_HEAD(head); } diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.h b/tools/testing/selftests/arm64/signal/testcases/testcases.h index ad884c135314..49f1d5de7b5b 100644 --- a/tools/testing/selftests/arm64/signal/testcases/testcases.h +++ b/tools/testing/selftests/arm64/signal/testcases/testcases.h @@ -16,7 +16,8 @@ #define FPSIMD_CTX (1 << 0) #define SVE_CTX (1 << 1) -#define EXTRA_CTX (1 << 2) +#define ZA_CTX (1 << 2) +#define EXTRA_CTX (1 << 3) #define KSFT_BAD_MAGIC 0xdeadbeef From 5aa45cc5355db3f5302e232a0fe29759ace4bc92 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:42 +0100 Subject: [PATCH 050/145] kselftest/arm64: Add stress test for SME ZA context switching Add a stress test for context switching of the ZA register state based on the similar tests Dave Martin wrote for FPSIMD and SVE registers. The test loops indefinitely writing a data pattern to ZA then reading it back and verifying that it's what was expected. Unlike the other tests we manually assemble the SME instructions since at present no released toolchain has SME support integrated. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-35-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/.gitignore | 1 + tools/testing/selftests/arm64/fp/Makefile | 3 + tools/testing/selftests/arm64/fp/za-stress | 59 +++ tools/testing/selftests/arm64/fp/za-test.S | 388 ++++++++++++++++++++ 4 files changed, 451 insertions(+) create mode 100644 tools/testing/selftests/arm64/fp/za-stress create mode 100644 tools/testing/selftests/arm64/fp/za-test.S diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore index 5729a5b1adfc..ead3197e720b 100644 --- a/tools/testing/selftests/arm64/fp/.gitignore +++ b/tools/testing/selftests/arm64/fp/.gitignore @@ -8,3 +8,4 @@ sve-test ssve-test vec-syscfg vlset +za-test diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index e6643c9b0474..38d2d0d5a0eb 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -6,6 +6,7 @@ TEST_PROGS_EXTENDED := fp-pidbench fpsimd-test fpsimd-stress \ rdvl-sme rdvl-sve \ sve-test sve-stress \ ssve-test ssve-stress \ + za-test za-stress \ vlset all: $(TEST_GEN_PROGS) $(TEST_PROGS_EXTENDED) @@ -24,5 +25,7 @@ ssve-test: sve-test.S asm-utils.o $(CC) -DSSVE -nostdlib $^ -o $@ vec-syscfg: vec-syscfg.o rdvl.o vlset: vlset.o +za-test: za-test.o asm-utils.o + $(CC) -nostdlib $^ -o $@ include ../../lib.mk diff --git a/tools/testing/selftests/arm64/fp/za-stress b/tools/testing/selftests/arm64/fp/za-stress new file mode 100644 index 000000000000..5ac386b55b95 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/za-stress @@ -0,0 +1,59 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2015-2019 ARM Limited. +# Original author: Dave Martin + +set -ue + +NR_CPUS=`nproc` + +pids= +logs= + +cleanup () { + trap - INT TERM CHLD + set +e + + if [ -n "$pids" ]; then + kill $pids + wait $pids + pids= + fi + + if [ -n "$logs" ]; then + cat $logs + rm $logs + logs= + fi +} + +interrupt () { + cleanup + exit 0 +} + +child_died () { + cleanup + exit 1 +} + +trap interrupt INT TERM EXIT + +for x in `seq 0 $((NR_CPUS * 4))`; do + log=`mktemp` + logs=$logs\ $log + ./za-test >$log & + pids=$pids\ $! +done + +# Wait for all child processes to be created: +sleep 10 + +while :; do + kill -USR1 $pids +done & +pids=$pids\ $! + +wait + +exit 1 diff --git a/tools/testing/selftests/arm64/fp/za-test.S b/tools/testing/selftests/arm64/fp/za-test.S new file mode 100644 index 000000000000..9ab6f9cd9623 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/za-test.S @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2021 ARM Limited. +// Original author: Mark Brown +// +// Scalable Matrix Extension ZA context switch test +// Repeatedly writes unique test patterns into each ZA tile +// and reads them back to verify integrity. +// +// for x in `seq 1 NR_CPUS`; do sve-test & pids=$pids\ $! ; done +// (leave it running for as long as you want...) +// kill $pids + +#include +#include "assembler.h" +#include "asm-offsets.h" +#include "sme-inst.h" + +.arch_extension sve + +#define MAXVL 2048 +#define MAXVL_B (MAXVL / 8) + +// Declare some storage space to shadow ZA register contents and a +// scratch buffer for a vector. +.pushsection .text +.data +.align 4 +zaref: + .space MAXVL_B * MAXVL_B +scratch: + .space MAXVL_B +.popsection + +// Trivial memory copy: copy x2 bytes, starting at address x1, to address x0. +// Clobbers x0-x3 +function memcpy + cmp x2, #0 + b.eq 1f +0: ldrb w3, [x1], #1 + strb w3, [x0], #1 + subs x2, x2, #1 + b.ne 0b +1: ret +endfunction + +// Generate a test pattern for storage in ZA +// x0: pid +// x1: row in ZA +// x2: generation + +// These values are used to constuct a 32-bit pattern that is repeated in the +// scratch buffer as many times as will fit: +// bits 31:28 generation number (increments once per test_loop) +// bits 27:16 pid +// bits 15: 8 row number +// bits 7: 0 32-bit lane index + +function pattern + mov w3, wzr + bfi w3, w0, #16, #12 // PID + bfi w3, w1, #8, #8 // Row + bfi w3, w2, #28, #4 // Generation + + ldr x0, =scratch + mov w1, #MAXVL_B / 4 + +0: str w3, [x0], #4 + add w3, w3, #1 // Lane + subs w1, w1, #1 + b.ne 0b + + ret +endfunction + +// Get the address of shadow data for ZA horizontal vector xn +.macro _adrza xd, xn, nrtmp + ldr \xd, =zaref + rdsvl \nrtmp, 1 + madd \xd, x\nrtmp, \xn, \xd +.endm + +// Set up test pattern in a ZA horizontal vector +// x0: pid +// x1: row number +// x2: generation +function setup_za + mov x4, x30 + mov x12, x1 // Use x12 for vector select + + bl pattern // Get pattern in scratch buffer + _adrza x0, x12, 2 // Shadow buffer pointer to x0 and x5 + mov x5, x0 + ldr x1, =scratch + bl memcpy // length set up in x2 by _adrza + + _ldr_za 12, 5 // load vector w12 from pointer x5 + + ret x4 +endfunction + +// Trivial memory compare: compare x2 bytes starting at address x0 with +// bytes starting at address x1. +// Returns only if all bytes match; otherwise, the program is aborted. +// Clobbers x0-x5. +function memcmp + cbz x2, 2f + + stp x0, x1, [sp, #-0x20]! + str x2, [sp, #0x10] + + mov x5, #0 +0: ldrb w3, [x0, x5] + ldrb w4, [x1, x5] + add x5, x5, #1 + cmp w3, w4 + b.ne 1f + subs x2, x2, #1 + b.ne 0b + +1: ldr x2, [sp, #0x10] + ldp x0, x1, [sp], #0x20 + b.ne barf + +2: ret +endfunction + +// Verify that a ZA vector matches its shadow in memory, else abort +// x0: row number +// Clobbers x0-x7 and x12. +function check_za + mov x3, x30 + + mov x12, x0 + _adrza x5, x0, 6 // pointer to expected value in x5 + mov x4, x0 + ldr x7, =scratch // x7 is scratch + + mov x0, x7 // Poison scratch + mov x1, x6 + bl memfill_ae + + _str_za 12, 7 // save vector w12 to pointer x7 + + mov x0, x5 + mov x1, x7 + mov x2, x6 + mov x30, x3 + b memcmp +endfunction + +// Any SME register modified here can cause corruption in the main +// thread -- but *only* the locations modified here. +function irritator_handler + // Increment the irritation signal count (x23): + ldr x0, [x2, #ucontext_regs + 8 * 23] + add x0, x0, #1 + str x0, [x2, #ucontext_regs + 8 * 23] + + // Corrupt some random ZA data +#if 0 + adr x0, .text + (irritator_handler - .text) / 16 * 16 + movi v0.8b, #1 + movi v9.16b, #2 + movi v31.8b, #3 +#endif + + ret +endfunction + +function terminate_handler + mov w21, w0 + mov x20, x2 + + puts "Terminated by signal " + mov w0, w21 + bl putdec + puts ", no error, iterations=" + ldr x0, [x20, #ucontext_regs + 8 * 22] + bl putdec + puts ", signals=" + ldr x0, [x20, #ucontext_regs + 8 * 23] + bl putdecn + + mov x0, #0 + mov x8, #__NR_exit + svc #0 +endfunction + +// w0: signal number +// x1: sa_action +// w2: sa_flags +// Clobbers x0-x6,x8 +function setsignal + str x30, [sp, #-((sa_sz + 15) / 16 * 16 + 16)]! + + mov w4, w0 + mov x5, x1 + mov w6, w2 + + add x0, sp, #16 + mov x1, #sa_sz + bl memclr + + mov w0, w4 + add x1, sp, #16 + str w6, [x1, #sa_flags] + str x5, [x1, #sa_handler] + mov x2, #0 + mov x3, #sa_mask_sz + mov x8, #__NR_rt_sigaction + svc #0 + + cbz w0, 1f + + puts "sigaction failure\n" + b .Labort + +1: ldr x30, [sp], #((sa_sz + 15) / 16 * 16 + 16) + ret +endfunction + +// Main program entry point +.globl _start +function _start +_start: + puts "Streaming mode " + smstart_za + + // Sanity-check and report the vector length + + rdsvl 19, 8 + cmp x19, #128 + b.lo 1f + cmp x19, #2048 + b.hi 1f + tst x19, #(8 - 1) + b.eq 2f + +1: puts "bad vector length: " + mov x0, x19 + bl putdecn + b .Labort + +2: puts "vector length:\t" + mov x0, x19 + bl putdec + puts " bits\n" + + // Obtain our PID, to ensure test pattern uniqueness between processes + mov x8, #__NR_getpid + svc #0 + mov x20, x0 + + puts "PID:\t" + mov x0, x20 + bl putdecn + + mov x23, #0 // Irritation signal count + + mov w0, #SIGINT + adr x1, terminate_handler + mov w2, #SA_SIGINFO + bl setsignal + + mov w0, #SIGTERM + adr x1, terminate_handler + mov w2, #SA_SIGINFO + bl setsignal + + mov w0, #SIGUSR1 + adr x1, irritator_handler + mov w2, #SA_SIGINFO + orr w2, w2, #SA_NODEFER + bl setsignal + + mov x22, #0 // generation number, increments per iteration +.Ltest_loop: + rdsvl 0, 8 + cmp x0, x19 + b.ne vl_barf + + rdsvl 21, 1 // Set up ZA & shadow with test pattern +0: mov x0, x20 + sub x1, x21, #1 + mov x2, x22 + bl setup_za + subs x21, x21, #1 + b.ne 0b + + and x8, x22, #127 // Every 128 interations... + cbz x8, 0f + mov x8, #__NR_getpid // (otherwise minimal syscall) + b 1f +0: + mov x8, #__NR_sched_yield // ...encourage preemption +1: + svc #0 + + mrs x0, S3_3_C4_C2_2 // SVCR should have ZA=1,SM=0 + and x1, x0, #3 + cmp x1, #2 + b.ne svcr_barf + + rdsvl 21, 1 // Verify that the data made it through + rdsvl 24, 1 // Verify that the data made it through +0: sub x0, x24, x21 + bl check_za + subs x21, x21, #1 + bne 0b + + add x22, x22, #1 // Everything still working + b .Ltest_loop + +.Labort: + mov x0, #0 + mov x1, #SIGABRT + mov x8, #__NR_kill + svc #0 +endfunction + +function barf +// fpsimd.c acitivty log dump hack +// ldr w0, =0xdeadc0de +// mov w8, #__NR_exit +// svc #0 +// end hack + smstop + mov x10, x0 // expected data + mov x11, x1 // actual data + mov x12, x2 // data size + + puts "Mismatch: PID=" + mov x0, x20 + bl putdec + puts ", iteration=" + mov x0, x22 + bl putdec + puts ", row=" + mov x0, x21 + bl putdecn + puts "\tExpected [" + mov x0, x10 + mov x1, x12 + bl dumphex + puts "]\n\tGot [" + mov x0, x11 + mov x1, x12 + bl dumphex + puts "]\n" + + mov x8, #__NR_getpid + svc #0 +// fpsimd.c acitivty log dump hack +// ldr w0, =0xdeadc0de +// mov w8, #__NR_exit +// svc #0 +// ^ end of hack + mov x1, #SIGABRT + mov x8, #__NR_kill + svc #0 +// mov x8, #__NR_exit +// mov x1, #1 +// svc #0 +endfunction + +function vl_barf + mov x10, x0 + + puts "Bad active VL: " + mov x0, x10 + bl putdecn + + mov x8, #__NR_exit + mov x1, #1 + svc #0 +endfunction + +function svcr_barf + mov x10, x0 + + puts "Bad SVCR: " + mov x0, x10 + bl putdecn + + mov x8, #__NR_exit + mov x1, #1 + svc #0 +endfunction From 4963aeb35a9edca90f062885b0d78c47a00c1752 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:43 +0100 Subject: [PATCH 051/145] kselftest/arm64: signal: Add SME signal handling tests Add test cases for the SME signal handing ABI patterned off the SVE tests. Due to the small size of the tests and the differences in ABI (especially around needing to account for both streaming SVE and ZA) there is some code duplication here. We currently cover: - Reporting of the vector length. - Lack of support for changing vector length. - Presence and size of register state for streaming SVE and ZA. As with the SVE tests we do not yet have any validation of register contents. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-36-broonie@kernel.org Signed-off-by: Catalin Marinas --- .../testing/selftests/arm64/signal/.gitignore | 3 + .../selftests/arm64/signal/test_signals.h | 4 + .../arm64/signal/test_signals_utils.c | 6 + .../testcases/fake_sigreturn_sme_change_vl.c | 92 ++++++++++++ .../arm64/signal/testcases/sme_trap_no_sm.c | 38 +++++ .../signal/testcases/sme_trap_non_streaming.c | 45 ++++++ .../arm64/signal/testcases/sme_trap_za.c | 36 +++++ .../selftests/arm64/signal/testcases/sme_vl.c | 68 +++++++++ .../arm64/signal/testcases/ssve_regs.c | 135 ++++++++++++++++++ .../arm64/signal/testcases/za_regs.c | 128 +++++++++++++++++ 10 files changed, 555 insertions(+) create mode 100644 tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c create mode 100644 tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c create mode 100644 tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c create mode 100644 tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c create mode 100644 tools/testing/selftests/arm64/signal/testcases/sme_vl.c create mode 100644 tools/testing/selftests/arm64/signal/testcases/ssve_regs.c create mode 100644 tools/testing/selftests/arm64/signal/testcases/za_regs.c diff --git a/tools/testing/selftests/arm64/signal/.gitignore b/tools/testing/selftests/arm64/signal/.gitignore index c1742755abb9..e8d2b57f73ec 100644 --- a/tools/testing/selftests/arm64/signal/.gitignore +++ b/tools/testing/selftests/arm64/signal/.gitignore @@ -1,5 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only mangle_* fake_sigreturn_* +sme_* +ssve_* sve_* +za_* !*.[ch] diff --git a/tools/testing/selftests/arm64/signal/test_signals.h b/tools/testing/selftests/arm64/signal/test_signals.h index f909b70d9e98..c70fdec7d7c4 100644 --- a/tools/testing/selftests/arm64/signal/test_signals.h +++ b/tools/testing/selftests/arm64/signal/test_signals.h @@ -34,11 +34,15 @@ enum { FSSBS_BIT, FSVE_BIT, + FSME_BIT, + FSME_FA64_BIT, FMAX_END }; #define FEAT_SSBS (1UL << FSSBS_BIT) #define FEAT_SVE (1UL << FSVE_BIT) +#define FEAT_SME (1UL << FSME_BIT) +#define FEAT_SME_FA64 (1UL << FSME_FA64_BIT) /* * A descriptor used to describe and configure a test case. diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c index 5743897984b0..b588d10afd5b 100644 --- a/tools/testing/selftests/arm64/signal/test_signals_utils.c +++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c @@ -27,6 +27,8 @@ static int sig_copyctx = SIGTRAP; static char const *const feats_names[FMAX_END] = { " SSBS ", " SVE ", + " SME ", + " FA64 ", }; #define MAX_FEATS_SZ 128 @@ -268,6 +270,10 @@ int test_init(struct tdescr *td) td->feats_supported |= FEAT_SSBS; if (getauxval(AT_HWCAP) & HWCAP_SVE) td->feats_supported |= FEAT_SVE; + if (getauxval(AT_HWCAP2) & HWCAP2_SME) + td->feats_supported |= FEAT_SME; + if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64) + td->feats_supported |= FEAT_SME_FA64; if (feats_ok(td)) { if (td->feats_required & td->feats_supported) fprintf(stderr, diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c new file mode 100644 index 000000000000..7ed762b7202f --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Attempt to change the streaming SVE vector length in a signal + * handler, this is not supported and is expected to segfault. + */ + +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +struct fake_sigframe sf; +static unsigned int vls[SVE_VQ_MAX]; +unsigned int nvls = 0; + +static bool sme_get_vls(struct tdescr *td) +{ + int vq, vl; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(PR_SVE_SET_VL, vq * 16); + if (vl == -1) + return false; + + vl &= PR_SME_VL_LEN_MASK; + + /* Skip missing VLs */ + vq = sve_vq_from_vl(vl); + + vls[nvls++] = vl; + } + + /* We need at least two VLs */ + if (nvls < 2) { + fprintf(stderr, "Only %d VL supported\n", nvls); + return false; + } + + return true; +} + +static int fake_sigreturn_ssve_change_vl(struct tdescr *td, + siginfo_t *si, ucontext_t *uc) +{ + size_t resv_sz, offset; + struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); + struct sve_context *sve; + + /* Get a signal context with a SME ZA frame in it */ + if (!get_current_context(td, &sf.uc)) + return 1; + + resv_sz = GET_SF_RESV_SIZE(sf); + head = get_header(head, SVE_MAGIC, resv_sz, &offset); + if (!head) { + fprintf(stderr, "No SVE context\n"); + return 1; + } + + if (head->size != sizeof(struct sve_context)) { + fprintf(stderr, "Register data present, aborting\n"); + return 1; + } + + sve = (struct sve_context *)head; + + /* No changes are supported; init left us at minimum VL so go to max */ + fprintf(stderr, "Attempting to change VL from %d to %d\n", + sve->vl, vls[0]); + sve->vl = vls[0]; + + fake_sigreturn(&sf, sizeof(sf), 0); + + return 1; +} + +struct tdescr tde = { + .name = "FAKE_SIGRETURN_SSVE_CHANGE", + .descr = "Attempt to change Streaming SVE VL", + .feats_required = FEAT_SME, + .sig_ok = SIGSEGV, + .timeout = 3, + .init = sme_get_vls, + .run = fake_sigreturn_ssve_change_vl, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c b/tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c new file mode 100644 index 000000000000..f9d76ae32bba --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that using a streaming mode instruction without enabling it + * generates a SIGILL. + */ + +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +int sme_trap_no_sm_trigger(struct tdescr *td) +{ + /* SMSTART ZA ; ADDHA ZA0.S, P0/M, P0/M, Z0.S */ + asm volatile(".inst 0xd503457f ; .inst 0xc0900000"); + + return 0; +} + +int sme_trap_no_sm_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + return 1; +} + +struct tdescr tde = { + .name = "SME trap without SM", + .descr = "Check that we get a SIGILL if we use streaming mode without enabling it", + .timeout = 3, + .feats_required = FEAT_SME, /* We need a SMSTART ZA */ + .sanity_disabled = true, + .trigger = sme_trap_no_sm_trigger, + .run = sme_trap_no_sm_run, + .sig_ok = SIGILL, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c b/tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c new file mode 100644 index 000000000000..e469ae5348e3 --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that using an instruction not supported in streaming mode + * traps when in streaming mode. + */ + +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +int sme_trap_non_streaming_trigger(struct tdescr *td) +{ + /* + * The framework will handle SIGILL so we need to exit SM to + * stop any other code triggering a further SIGILL down the + * line from using a streaming-illegal instruction. + */ + asm volatile(".inst 0xd503437f; /* SMSTART ZA */ \ + cnt v0.16b, v0.16b; \ + .inst 0xd503447f /* SMSTOP ZA */"); + + return 0; +} + +int sme_trap_non_streaming_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + return 1; +} + +struct tdescr tde = { + .name = "SME SM trap unsupported instruction", + .descr = "Check that we get a SIGILL if we use an unsupported instruction in streaming mode", + .feats_required = FEAT_SME, + .feats_incompatible = FEAT_SME_FA64, + .timeout = 3, + .sanity_disabled = true, + .trigger = sme_trap_non_streaming_trigger, + .run = sme_trap_non_streaming_run, + .sig_ok = SIGILL, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c b/tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c new file mode 100644 index 000000000000..3a7747af4715 --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that accessing ZA without enabling it generates a SIGILL. + */ + +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +int sme_trap_za_trigger(struct tdescr *td) +{ + /* ZERO ZA */ + asm volatile(".inst 0xc00800ff"); + + return 0; +} + +int sme_trap_za_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + return 1; +} + +struct tdescr tde = { + .name = "SME ZA trap", + .descr = "Check that we get a SIGILL if we access ZA without enabling", + .timeout = 3, + .sanity_disabled = true, + .trigger = sme_trap_za_trigger, + .run = sme_trap_za_run, + .sig_ok = SIGILL, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_vl.c b/tools/testing/selftests/arm64/signal/testcases/sme_vl.c new file mode 100644 index 000000000000..13ff3b35cbaf --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/sme_vl.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Check that the SME vector length reported in signal contexts is the + * expected one. + */ + +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +struct fake_sigframe sf; +unsigned int vl; + +static bool get_sme_vl(struct tdescr *td) +{ + int ret = prctl(PR_SME_GET_VL); + if (ret == -1) + return false; + + vl = ret; + + return true; +} + +static int sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + size_t resv_sz, offset; + struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); + struct za_context *za; + + /* Get a signal context which should have a ZA frame in it */ + if (!get_current_context(td, &sf.uc)) + return 1; + + resv_sz = GET_SF_RESV_SIZE(sf); + head = get_header(head, ZA_MAGIC, resv_sz, &offset); + if (!head) { + fprintf(stderr, "No ZA context\n"); + return 1; + } + za = (struct za_context *)head; + + if (za->vl != vl) { + fprintf(stderr, "ZA sigframe VL %u, expected %u\n", + za->vl, vl); + return 1; + } else { + fprintf(stderr, "got expected VL %u\n", vl); + } + + td->pass = 1; + + return 0; +} + +struct tdescr tde = { + .name = "SME VL", + .descr = "Check that we get the right SME VL reported", + .feats_required = FEAT_SME, + .timeout = 3, + .init = get_sme_vl, + .run = sme_vl, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c new file mode 100644 index 000000000000..9022a6cab4b3 --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that the streaming SVE register context in signal frames is + * set up as expected. + */ + +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +struct fake_sigframe sf; +static unsigned int vls[SVE_VQ_MAX]; +unsigned int nvls = 0; + +static bool sme_get_vls(struct tdescr *td) +{ + int vq, vl; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(PR_SME_SET_VL, vq * 16); + if (vl == -1) + return false; + + vl &= PR_SME_VL_LEN_MASK; + + /* Skip missing VLs */ + vq = sve_vq_from_vl(vl); + + vls[nvls++] = vl; + } + + /* We need at least one VL */ + if (nvls < 1) { + fprintf(stderr, "Only %d VL supported\n", nvls); + return false; + } + + return true; +} + +static void setup_ssve_regs(void) +{ + /* smstart sm; real data is TODO */ + asm volatile(".inst 0xd503437f" : : : ); +} + +static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc, + unsigned int vl) +{ + size_t resv_sz, offset; + struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); + struct sve_context *ssve; + int ret; + + fprintf(stderr, "Testing VL %d\n", vl); + + ret = prctl(PR_SME_SET_VL, vl); + if (ret != vl) { + fprintf(stderr, "Failed to set VL, got %d\n", ret); + return 1; + } + + /* + * Get a signal context which should have a SVE frame and registers + * in it. + */ + setup_ssve_regs(); + if (!get_current_context(td, &sf.uc)) + return 1; + + resv_sz = GET_SF_RESV_SIZE(sf); + head = get_header(head, SVE_MAGIC, resv_sz, &offset); + if (!head) { + fprintf(stderr, "No SVE context\n"); + return 1; + } + + ssve = (struct sve_context *)head; + if (ssve->vl != vl) { + fprintf(stderr, "Got VL %d, expected %d\n", ssve->vl, vl); + return 1; + } + + /* The actual size validation is done in get_current_context() */ + fprintf(stderr, "Got expected size %u and VL %d\n", + head->size, ssve->vl); + + return 0; +} + +static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + int i; + + for (i = 0; i < nvls; i++) { + /* + * TODO: the signal test helpers can't currently cope + * with signal frames bigger than struct sigcontext, + * skip VLs that will trigger that. + */ + if (vls[i] > 64) { + printf("Skipping VL %u due to stack size\n", vls[i]); + continue; + } + + if (do_one_sme_vl(td, si, uc, vls[i])) + return 1; + } + + td->pass = 1; + + return 0; +} + +struct tdescr tde = { + .name = "Streaming SVE registers", + .descr = "Check that we get the right Streaming SVE registers reported", + /* + * We shouldn't require FA64 but things like memset() used in the + * helpers might use unsupported instructions so for now disable + * the test unless we've got the full instruction set. + */ + .feats_required = FEAT_SME | FEAT_SME_FA64, + .timeout = 3, + .init = sme_get_vls, + .run = sme_regs, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/za_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_regs.c new file mode 100644 index 000000000000..b94e4f99fcac --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/za_regs.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that the ZA register context in signal frames is set up as + * expected. + */ + +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +struct fake_sigframe sf; +static unsigned int vls[SVE_VQ_MAX]; +unsigned int nvls = 0; + +static bool sme_get_vls(struct tdescr *td) +{ + int vq, vl; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(PR_SVE_SET_VL, vq * 16); + if (vl == -1) + return false; + + vl &= PR_SME_VL_LEN_MASK; + + /* Skip missing VLs */ + vq = sve_vq_from_vl(vl); + + vls[nvls++] = vl; + } + + /* We need at least one VL */ + if (nvls < 1) { + fprintf(stderr, "Only %d VL supported\n", nvls); + return false; + } + + return true; +} + +static void setup_za_regs(void) +{ + /* smstart za; real data is TODO */ + asm volatile(".inst 0xd503457f" : : : ); +} + +static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc, + unsigned int vl) +{ + size_t resv_sz, offset; + struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); + struct za_context *za; + + fprintf(stderr, "Testing VL %d\n", vl); + + if (prctl(PR_SME_SET_VL, vl) != vl) { + fprintf(stderr, "Failed to set VL\n"); + return 1; + } + + /* + * Get a signal context which should have a SVE frame and registers + * in it. + */ + setup_za_regs(); + if (!get_current_context(td, &sf.uc)) + return 1; + + resv_sz = GET_SF_RESV_SIZE(sf); + head = get_header(head, ZA_MAGIC, resv_sz, &offset); + if (!head) { + fprintf(stderr, "No ZA context\n"); + return 1; + } + + za = (struct za_context *)head; + if (za->vl != vl) { + fprintf(stderr, "Got VL %d, expected %d\n", za->vl, vl); + return 1; + } + + /* The actual size validation is done in get_current_context() */ + fprintf(stderr, "Got expected size %u and VL %d\n", + head->size, za->vl); + + return 0; +} + +static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + int i; + + for (i = 0; i < nvls; i++) { + /* + * TODO: the signal test helpers can't currently cope + * with signal frames bigger than struct sigcontext, + * skip VLs that will trigger that. + */ + if (vls[i] > 32) { + printf("Skipping VL %u due to stack size\n", vls[i]); + continue; + } + + if (do_one_sme_vl(td, si, uc, vls[i])) + return 1; + } + + td->pass = 1; + + return 0; +} + +struct tdescr tde = { + .name = "ZA register", + .descr = "Check that we get the right ZA registers reported", + .feats_required = FEAT_SME, + .timeout = 3, + .init = sme_get_vls, + .run = sme_regs, +}; From fa23100bbad0748f6503511b109cfec955e4183d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:44 +0100 Subject: [PATCH 052/145] kselftest/arm64: Add streaming SVE to SVE ptrace tests In order to allow ptrace of streaming mode SVE registers we have added a new regset for streaming mode which in isolation offers the same ABI as regular SVE with a different vector type. Add this to the array of regsets we handle, together with additional tests for the interoperation of the two regsets. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-37-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/sve-ptrace.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c index 36b6f0749f23..8c4847977583 100644 --- a/tools/testing/selftests/arm64/fp/sve-ptrace.c +++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c @@ -26,6 +26,10 @@ #define NT_ARM_SVE 0x405 #endif +#ifndef NT_ARM_SSVE +#define NT_ARM_SSVE 0x40b +#endif + struct vec_type { const char *name; unsigned long hwcap_type; @@ -42,6 +46,13 @@ static const struct vec_type vec_types[] = { .regset = NT_ARM_SVE, .prctl_set = PR_SVE_SET_VL, }, + { + .name = "Streaming SVE", + .hwcap_type = AT_HWCAP2, + .hwcap = HWCAP2_SME, + .regset = NT_ARM_SSVE, + .prctl_set = PR_SME_SET_VL, + }, }; #define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 4) From 86c8888f91a95a30d8a224c0c655ddac33d04eac Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:45 +0100 Subject: [PATCH 053/145] kselftest/arm64: Add coverage for the ZA ptrace interface Add some basic coverage for the ZA ptrace interface, including walking through all the vector lengths supported in the system. Unlike SVE doing syscalls does not discard the ZA state so when we set data in ZA we run the child process briefly, having it add one to each byte in ZA in order to validate that both the vector size and data are being read and written as expected when the process runs. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-38-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/.gitignore | 1 + tools/testing/selftests/arm64/fp/Makefile | 3 +- tools/testing/selftests/arm64/fp/za-ptrace.c | 356 +++++++++++++++++++ 3 files changed, 359 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/arm64/fp/za-ptrace.c diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore index ead3197e720b..d98d3d48b504 100644 --- a/tools/testing/selftests/arm64/fp/.gitignore +++ b/tools/testing/selftests/arm64/fp/.gitignore @@ -8,4 +8,5 @@ sve-test ssve-test vec-syscfg vlset +za-ptrace za-test diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index 38d2d0d5a0eb..807a8faf8d57 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS += -I../../../../../usr/include/ -TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg +TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg za-ptrace TEST_PROGS_EXTENDED := fp-pidbench fpsimd-test fpsimd-stress \ rdvl-sme rdvl-sve \ sve-test sve-stress \ @@ -27,5 +27,6 @@ vec-syscfg: vec-syscfg.o rdvl.o vlset: vlset.o za-test: za-test.o asm-utils.o $(CC) -nostdlib $^ -o $@ +za-ptrace: za-ptrace.o include ../../lib.mk diff --git a/tools/testing/selftests/arm64/fp/za-ptrace.c b/tools/testing/selftests/arm64/fp/za-ptrace.c new file mode 100644 index 000000000000..bf6158654056 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/za-ptrace.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 ARM Limited. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../../kselftest.h" + +/* and don't like each other, so: */ +#ifndef NT_ARM_ZA +#define NT_ARM_ZA 0x40c +#endif + +#define EXPECTED_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 3) + +static void fill_buf(char *buf, size_t size) +{ + int i; + + for (i = 0; i < size; i++) + buf[i] = random(); +} + +static int do_child(void) +{ + if (ptrace(PTRACE_TRACEME, -1, NULL, NULL)) + ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno)); + + if (raise(SIGSTOP)) + ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno)); + + return EXIT_SUCCESS; +} + +static struct user_za_header *get_za(pid_t pid, void **buf, size_t *size) +{ + struct user_za_header *za; + void *p; + size_t sz = sizeof(*za); + struct iovec iov; + + while (1) { + if (*size < sz) { + p = realloc(*buf, sz); + if (!p) { + errno = ENOMEM; + goto error; + } + + *buf = p; + *size = sz; + } + + iov.iov_base = *buf; + iov.iov_len = sz; + if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZA, &iov)) + goto error; + + za = *buf; + if (za->size <= sz) + break; + + sz = za->size; + } + + return za; + +error: + return NULL; +} + +static int set_za(pid_t pid, const struct user_za_header *za) +{ + struct iovec iov; + + iov.iov_base = (void *)za; + iov.iov_len = za->size; + return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZA, &iov); +} + +/* Validate attempting to set the specfied VL via ptrace */ +static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported) +{ + struct user_za_header za; + struct user_za_header *new_za = NULL; + size_t new_za_size = 0; + int ret, prctl_vl; + + *supported = false; + + /* Check if the VL is supported in this process */ + prctl_vl = prctl(PR_SME_SET_VL, vl); + if (prctl_vl == -1) + ksft_exit_fail_msg("prctl(PR_SME_SET_VL) failed: %s (%d)\n", + strerror(errno), errno); + + /* If the VL is not supported then a supported VL will be returned */ + *supported = (prctl_vl == vl); + + /* Set the VL by doing a set with no register payload */ + memset(&za, 0, sizeof(za)); + za.size = sizeof(za); + za.vl = vl; + ret = set_za(child, &za); + if (ret != 0) { + ksft_test_result_fail("Failed to set VL %u\n", vl); + return; + } + + /* + * Read back the new register state and verify that we have the + * same VL that we got from prctl() on ourselves. + */ + if (!get_za(child, (void **)&new_za, &new_za_size)) { + ksft_test_result_fail("Failed to read VL %u\n", vl); + return; + } + + ksft_test_result(new_za->vl = prctl_vl, "Set VL %u\n", vl); + + free(new_za); +} + +/* Validate attempting to set no ZA data and read it back */ +static void ptrace_set_no_data(pid_t child, unsigned int vl) +{ + void *read_buf = NULL; + struct user_za_header write_za; + struct user_za_header *read_za; + size_t read_za_size = 0; + int ret; + + /* Set up some data and write it out */ + memset(&write_za, 0, sizeof(write_za)); + write_za.size = ZA_PT_ZA_OFFSET; + write_za.vl = vl; + + ret = set_za(child, &write_za); + if (ret != 0) { + ksft_test_result_fail("Failed to set VL %u no data\n", vl); + return; + } + + /* Read the data back */ + if (!get_za(child, (void **)&read_buf, &read_za_size)) { + ksft_test_result_fail("Failed to read VL %u no data\n", vl); + return; + } + read_za = read_buf; + + /* We might read more data if there's extensions we don't know */ + if (read_za->size < write_za.size) { + ksft_test_result_fail("VL %u wrote %d bytes, only read %d\n", + vl, write_za.size, read_za->size); + goto out_read; + } + + ksft_test_result(read_za->size == write_za.size, + "Disabled ZA for VL %u\n", vl); + +out_read: + free(read_buf); +} + +/* Validate attempting to set data and read it back */ +static void ptrace_set_get_data(pid_t child, unsigned int vl) +{ + void *write_buf; + void *read_buf = NULL; + struct user_za_header *write_za; + struct user_za_header *read_za; + size_t read_za_size = 0; + unsigned int vq = sve_vq_from_vl(vl); + int ret; + size_t data_size; + + data_size = ZA_PT_SIZE(vq); + write_buf = malloc(data_size); + if (!write_buf) { + ksft_test_result_fail("Error allocating %d byte buffer for VL %u\n", + data_size, vl); + return; + } + write_za = write_buf; + + /* Set up some data and write it out */ + memset(write_za, 0, data_size); + write_za->size = data_size; + write_za->vl = vl; + + fill_buf(write_buf + ZA_PT_ZA_OFFSET, ZA_PT_ZA_SIZE(vq)); + + ret = set_za(child, write_za); + if (ret != 0) { + ksft_test_result_fail("Failed to set VL %u data\n", vl); + goto out; + } + + /* Read the data back */ + if (!get_za(child, (void **)&read_buf, &read_za_size)) { + ksft_test_result_fail("Failed to read VL %u data\n", vl); + goto out; + } + read_za = read_buf; + + /* We might read more data if there's extensions we don't know */ + if (read_za->size < write_za->size) { + ksft_test_result_fail("VL %u wrote %d bytes, only read %d\n", + vl, write_za->size, read_za->size); + goto out_read; + } + + ksft_test_result(memcmp(write_buf + ZA_PT_ZA_OFFSET, + read_buf + ZA_PT_ZA_OFFSET, + ZA_PT_ZA_SIZE(vq)) == 0, + "Data match for VL %u\n", vl); + +out_read: + free(read_buf); +out: + free(write_buf); +} + +static int do_parent(pid_t child) +{ + int ret = EXIT_FAILURE; + pid_t pid; + int status; + siginfo_t si; + unsigned int vq, vl; + bool vl_supported; + + /* Attach to the child */ + while (1) { + int sig; + + pid = wait(&status); + if (pid == -1) { + perror("wait"); + goto error; + } + + /* + * This should never happen but it's hard to flag in + * the framework. + */ + if (pid != child) + continue; + + if (WIFEXITED(status) || WIFSIGNALED(status)) + ksft_exit_fail_msg("Child died unexpectedly\n"); + + if (!WIFSTOPPED(status)) + goto error; + + sig = WSTOPSIG(status); + + if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) { + if (errno == ESRCH) + goto disappeared; + + if (errno == EINVAL) { + sig = 0; /* bust group-stop */ + goto cont; + } + + ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n", + strerror(errno)); + goto error; + } + + if (sig == SIGSTOP && si.si_code == SI_TKILL && + si.si_pid == pid) + break; + + cont: + if (ptrace(PTRACE_CONT, pid, NULL, sig)) { + if (errno == ESRCH) + goto disappeared; + + ksft_test_result_fail("PTRACE_CONT: %s\n", + strerror(errno)); + goto error; + } + } + + ksft_print_msg("Parent is %d, child is %d\n", getpid(), child); + + /* Step through every possible VQ */ + for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) { + vl = sve_vl_from_vq(vq); + + /* First, try to set this vector length */ + ptrace_set_get_vl(child, vl, &vl_supported); + + /* If the VL is supported validate data set/get */ + if (vl_supported) { + ptrace_set_no_data(child, vl); + ptrace_set_get_data(child, vl); + } else { + ksft_test_result_skip("Disabled ZA for VL %u\n", vl); + ksft_test_result_skip("Get and set data for VL %u\n", + vl); + } + } + + ret = EXIT_SUCCESS; + +error: + kill(child, SIGKILL); + +disappeared: + return ret; +} + +int main(void) +{ + int ret = EXIT_SUCCESS; + pid_t child; + + srandom(getpid()); + + ksft_print_header(); + + if (!(getauxval(AT_HWCAP2) & HWCAP2_SME)) { + ksft_set_plan(1); + ksft_exit_skip("SME not available\n"); + } + + ksft_set_plan(EXPECTED_TESTS); + + child = fork(); + if (!child) + return do_child(); + + if (do_parent(child)) + ret = EXIT_FAILURE; + + ksft_print_cnts(); + + return ret; +} From 43e3f85523e488f8acd6b371d457818d81977934 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:46 +0100 Subject: [PATCH 054/145] kselftest/arm64: Add SME support to syscall ABI test For every possible combination of SVE and SME vector length verify that for each possible value of SVCR after a syscall we leave streaming mode and ZA is preserved. We don't need to take account of any streaming/non streaming SVE vector length changes in the assembler code since the store instructions will handle the vector length for us. We log if the system supports FA64 and only try to set FFR in streaming mode if it does. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220419112247.711548-39-broonie@kernel.org Signed-off-by: Catalin Marinas --- .../selftests/arm64/abi/syscall-abi-asm.S | 79 ++++++- .../testing/selftests/arm64/abi/syscall-abi.c | 204 ++++++++++++++++-- .../testing/selftests/arm64/abi/syscall-abi.h | 15 ++ 3 files changed, 275 insertions(+), 23 deletions(-) create mode 100644 tools/testing/selftests/arm64/abi/syscall-abi.h diff --git a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S index 983467cfcee0..b523c21c2278 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S +++ b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S @@ -9,15 +9,42 @@ // invoked is configured in x8 of the input GPR data. // // x0: SVE VL, 0 for FP only +// x1: SME VL // // GPRs: gpr_in, gpr_out // FPRs: fpr_in, fpr_out // Zn: z_in, z_out // Pn: p_in, p_out // FFR: ffr_in, ffr_out +// ZA: za_in, za_out +// SVCR: svcr_in, svcr_out + +#include "syscall-abi.h" .arch_extension sve +/* + * LDR (vector to ZA array): + * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] + */ +.macro _ldr_za nw, nxbase, offset=0 + .inst 0xe1000000 \ + | (((\nw) & 3) << 13) \ + | ((\nxbase) << 5) \ + | ((\offset) & 7) +.endm + +/* + * STR (vector from ZA array): + * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] + */ +.macro _str_za nw, nxbase, offset=0 + .inst 0xe1200000 \ + | (((\nw) & 3) << 13) \ + | ((\nxbase) << 5) \ + | ((\offset) & 7) +.endm + .globl do_syscall do_syscall: // Store callee saved registers x19-x29 (80 bytes) plus x0 and x1 @@ -30,6 +57,24 @@ do_syscall: stp x25, x26, [sp, #80] stp x27, x28, [sp, #96] + // Set SVCR if we're doing SME + cbz x1, 1f + adrp x2, svcr_in + ldr x2, [x2, :lo12:svcr_in] + msr S3_3_C4_C2_2, x2 +1: + + // Load ZA if it's enabled - uses x12 as scratch due to SME LDR + tbz x2, #SVCR_ZA_SHIFT, 1f + mov w12, #0 + ldr x2, =za_in +2: _ldr_za 12, 2 + add x2, x2, x1 + add x12, x12, #1 + cmp x1, x12 + bne 2b +1: + // Load GPRs x8-x28, and save our SP/FP for later comparison ldr x2, =gpr_in add x2, x2, #64 @@ -68,7 +113,7 @@ do_syscall: ldp q30, q31, [x2, #16 * 30] 1: - // Load the SVE registers if we're doing SVE + // Load the SVE registers if we're doing SVE/SME cbz x0, 1f ldr x2, =z_in @@ -105,9 +150,14 @@ do_syscall: ldr z30, [x2, #30, MUL VL] ldr z31, [x2, #31, MUL VL] + // Only set a non-zero FFR, test patterns must be zero since the + // syscall should clear it - this lets us handle FA64. ldr x2, =ffr_in ldr p0, [x2, #0] + ldr x2, [x2, #0] + cbz x2, 2f wrffr p0.b +2: ldr x2, =p_in ldr p0, [x2, #0, MUL VL] @@ -169,6 +219,24 @@ do_syscall: stp q28, q29, [x2, #16 * 28] stp q30, q31, [x2, #16 * 30] + // Save SVCR if we're doing SME + cbz x1, 1f + mrs x2, S3_3_C4_C2_2 + adrp x3, svcr_out + str x2, [x3, :lo12:svcr_out] +1: + + // Save ZA if it's enabled - uses x12 as scratch due to SME STR + tbz x2, #SVCR_ZA_SHIFT, 1f + mov w12, #0 + ldr x2, =za_out +2: _str_za 12, 2 + add x2, x2, x1 + add x12, x12, #1 + cmp x1, x12 + bne 2b +1: + // Save the SVE state if we have some cbz x0, 1f @@ -224,6 +292,10 @@ do_syscall: str p14, [x2, #14, MUL VL] str p15, [x2, #15, MUL VL] + // Only save FFR if we wrote a value for SME + ldr x2, =ffr_in + ldr x2, [x2, #0] + cbz x2, 1f ldr x2, =ffr_out rdffr p0.b str p0, [x2, #0] @@ -237,4 +309,9 @@ do_syscall: ldp x27, x28, [sp, #96] ldp x29, x30, [sp], #112 + // Clear SVCR if we were doing SME so future tests don't have ZA + cbz x1, 1f + msr S3_3_C4_C2_2, xzr +1: + ret diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c index 1e13b7523918..b632bfe9e022 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi.c +++ b/tools/testing/selftests/arm64/abi/syscall-abi.c @@ -18,9 +18,13 @@ #include "../../kselftest.h" +#include "syscall-abi.h" + #define NUM_VL ((SVE_VQ_MAX - SVE_VQ_MIN) + 1) -extern void do_syscall(int sve_vl); +static int default_sme_vl; + +extern void do_syscall(int sve_vl, int sme_vl); static void fill_random(void *buf, size_t size) { @@ -48,14 +52,15 @@ static struct syscall_cfg { uint64_t gpr_in[NUM_GPR]; uint64_t gpr_out[NUM_GPR]; -static void setup_gpr(struct syscall_cfg *cfg, int sve_vl) +static void setup_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { fill_random(gpr_in, sizeof(gpr_in)); gpr_in[8] = cfg->syscall_nr; memset(gpr_out, 0, sizeof(gpr_out)); } -static int check_gpr(struct syscall_cfg *cfg, int sve_vl) +static int check_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, uint64_t svcr) { int errors = 0; int i; @@ -79,13 +84,15 @@ static int check_gpr(struct syscall_cfg *cfg, int sve_vl) uint64_t fpr_in[NUM_FPR * 2]; uint64_t fpr_out[NUM_FPR * 2]; -static void setup_fpr(struct syscall_cfg *cfg, int sve_vl) +static void setup_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { fill_random(fpr_in, sizeof(fpr_in)); memset(fpr_out, 0, sizeof(fpr_out)); } -static int check_fpr(struct syscall_cfg *cfg, int sve_vl) +static int check_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { int errors = 0; int i; @@ -109,13 +116,15 @@ static uint8_t z_zero[__SVE_ZREG_SIZE(SVE_VQ_MAX)]; uint8_t z_in[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; uint8_t z_out[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; -static void setup_z(struct syscall_cfg *cfg, int sve_vl) +static void setup_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { fill_random(z_in, sizeof(z_in)); fill_random(z_out, sizeof(z_out)); } -static int check_z(struct syscall_cfg *cfg, int sve_vl) +static int check_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { size_t reg_size = sve_vl; int errors = 0; @@ -126,13 +135,17 @@ static int check_z(struct syscall_cfg *cfg, int sve_vl) /* * After a syscall the low 128 bits of the Z registers should - * be preserved and the rest be zeroed or preserved. + * be preserved and the rest be zeroed or preserved, except if + * we were in streaming mode in which case the low 128 bits may + * also be cleared by the transition out of streaming mode. */ for (i = 0; i < SVE_NUM_ZREGS; i++) { void *in = &z_in[reg_size * i]; void *out = &z_out[reg_size * i]; - if (memcmp(in, out, SVE_VQ_BYTES) != 0) { + if ((memcmp(in, out, SVE_VQ_BYTES) != 0) && + !((svcr & SVCR_SM_MASK) && + memcmp(z_zero, out, SVE_VQ_BYTES) == 0)) { ksft_print_msg("%s SVE VL %d Z%d low 128 bits changed\n", cfg->name, sve_vl, i); errors++; @@ -145,13 +158,15 @@ static int check_z(struct syscall_cfg *cfg, int sve_vl) uint8_t p_in[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)]; uint8_t p_out[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)]; -static void setup_p(struct syscall_cfg *cfg, int sve_vl) +static void setup_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { fill_random(p_in, sizeof(p_in)); fill_random(p_out, sizeof(p_out)); } -static int check_p(struct syscall_cfg *cfg, int sve_vl) +static int check_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */ @@ -175,8 +190,19 @@ static int check_p(struct syscall_cfg *cfg, int sve_vl) uint8_t ffr_in[__SVE_PREG_SIZE(SVE_VQ_MAX)]; uint8_t ffr_out[__SVE_PREG_SIZE(SVE_VQ_MAX)]; -static void setup_ffr(struct syscall_cfg *cfg, int sve_vl) +static void setup_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { + /* + * If we are in streaming mode and do not have FA64 then FFR + * is unavailable. + */ + if ((svcr & SVCR_SM_MASK) && + !(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)) { + memset(&ffr_in, 0, sizeof(ffr_in)); + return; + } + /* * It is only valid to set a contiguous set of bits starting * at 0. For now since we're expecting this to be cleared by @@ -186,7 +212,8 @@ static void setup_ffr(struct syscall_cfg *cfg, int sve_vl) fill_random(ffr_out, sizeof(ffr_out)); } -static int check_ffr(struct syscall_cfg *cfg, int sve_vl) +static int check_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */ int errors = 0; @@ -195,6 +222,10 @@ static int check_ffr(struct syscall_cfg *cfg, int sve_vl) if (!sve_vl) return 0; + if ((svcr & SVCR_SM_MASK) && + !(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)) + return 0; + /* After a syscall the P registers should be preserved or zeroed */ for (i = 0; i < reg_size; i++) if (ffr_out[i] && (ffr_in[i] != ffr_out[i])) @@ -206,8 +237,65 @@ static int check_ffr(struct syscall_cfg *cfg, int sve_vl) return errors; } -typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl); -typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl); +uint64_t svcr_in, svcr_out; + +static void setup_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) +{ + svcr_in = svcr; +} + +static int check_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) +{ + int errors = 0; + + if (svcr_out & SVCR_SM_MASK) { + ksft_print_msg("%s Still in SM, SVCR %llx\n", + cfg->name, svcr_out); + errors++; + } + + if ((svcr_in & SVCR_ZA_MASK) != (svcr_out & SVCR_ZA_MASK)) { + ksft_print_msg("%s PSTATE.ZA changed, SVCR %llx != %llx\n", + cfg->name, svcr_in, svcr_out); + errors++; + } + + return errors; +} + +uint8_t za_in[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; +uint8_t za_out[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; + +static void setup_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) +{ + fill_random(za_in, sizeof(za_in)); + memset(za_out, 0, sizeof(za_out)); +} + +static int check_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) +{ + size_t reg_size = sme_vl * sme_vl; + int errors = 0; + + if (!(svcr & SVCR_ZA_MASK)) + return 0; + + if (memcmp(za_in, za_out, reg_size) != 0) { + ksft_print_msg("SME VL %d ZA does not match\n", sme_vl); + errors++; + } + + return errors; +} + +typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr); +typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr); /* * Each set of registers has a setup function which is called before @@ -225,20 +313,23 @@ static struct { { setup_z, check_z }, { setup_p, check_p }, { setup_ffr, check_ffr }, + { setup_svcr, check_svcr }, + { setup_za, check_za }, }; -static bool do_test(struct syscall_cfg *cfg, int sve_vl) +static bool do_test(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) { int errors = 0; int i; for (i = 0; i < ARRAY_SIZE(regset); i++) - regset[i].setup(cfg, sve_vl); + regset[i].setup(cfg, sve_vl, sme_vl, svcr); - do_syscall(sve_vl); + do_syscall(sve_vl, sme_vl); for (i = 0; i < ARRAY_SIZE(regset); i++) - errors += regset[i].check(cfg, sve_vl); + errors += regset[i].check(cfg, sve_vl, sme_vl, svcr); return errors == 0; } @@ -246,9 +337,10 @@ static bool do_test(struct syscall_cfg *cfg, int sve_vl) static void test_one_syscall(struct syscall_cfg *cfg) { int sve_vq, sve_vl; + int sme_vq, sme_vl; /* FPSIMD only case */ - ksft_test_result(do_test(cfg, 0), + ksft_test_result(do_test(cfg, 0, default_sme_vl, 0), "%s FPSIMD\n", cfg->name); if (!(getauxval(AT_HWCAP) & HWCAP_SVE)) @@ -265,8 +357,36 @@ static void test_one_syscall(struct syscall_cfg *cfg) if (sve_vq != sve_vq_from_vl(sve_vl)) sve_vq = sve_vq_from_vl(sve_vl); - ksft_test_result(do_test(cfg, sve_vl), + ksft_test_result(do_test(cfg, sve_vl, default_sme_vl, 0), "%s SVE VL %d\n", cfg->name, sve_vl); + + if (!(getauxval(AT_HWCAP2) & HWCAP2_SME)) + continue; + + for (sme_vq = SVE_VQ_MAX; sme_vq > 0; --sme_vq) { + sme_vl = prctl(PR_SME_SET_VL, sme_vq * 16); + if (sme_vl == -1) + ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n", + strerror(errno), errno); + + sme_vl &= PR_SME_VL_LEN_MASK; + + if (sme_vq != sve_vq_from_vl(sme_vl)) + sme_vq = sve_vq_from_vl(sme_vl); + + ksft_test_result(do_test(cfg, sve_vl, sme_vl, + SVCR_ZA_MASK | SVCR_SM_MASK), + "%s SVE VL %d/SME VL %d SM+ZA\n", + cfg->name, sve_vl, sme_vl); + ksft_test_result(do_test(cfg, sve_vl, sme_vl, + SVCR_SM_MASK), + "%s SVE VL %d/SME VL %d SM\n", + cfg->name, sve_vl, sme_vl); + ksft_test_result(do_test(cfg, sve_vl, sme_vl, + SVCR_ZA_MASK), + "%s SVE VL %d/SME VL %d ZA\n", + cfg->name, sve_vl, sme_vl); + } } } @@ -299,14 +419,54 @@ int sve_count_vls(void) return vl_count; } +int sme_count_vls(void) +{ + unsigned int vq; + int vl_count = 0; + int vl; + + if (!(getauxval(AT_HWCAP2) & HWCAP2_SME)) + return 0; + + /* Ensure we configure a SME VL, used to flag if SVCR is set */ + default_sme_vl = 16; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(PR_SME_SET_VL, vq * 16); + if (vl == -1) + ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n", + strerror(errno), errno); + + vl &= PR_SME_VL_LEN_MASK; + + if (vq != sve_vq_from_vl(vl)) + vq = sve_vq_from_vl(vl); + + vl_count++; + } + + return vl_count; +} + int main(void) { int i; + int tests = 1; /* FPSIMD */ srandom(getpid()); ksft_print_header(); - ksft_set_plan(ARRAY_SIZE(syscalls) * (sve_count_vls() + 1)); + tests += sve_count_vls(); + tests += (sve_count_vls() * sme_count_vls()) * 3; + ksft_set_plan(ARRAY_SIZE(syscalls) * tests); + + if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64) + ksft_print_msg("SME with FA64\n"); + else if (getauxval(AT_HWCAP2) & HWCAP2_SME) + ksft_print_msg("SME without FA64\n"); for (i = 0; i < ARRAY_SIZE(syscalls); i++) test_one_syscall(&syscalls[i]); diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.h b/tools/testing/selftests/arm64/abi/syscall-abi.h new file mode 100644 index 000000000000..bda5a87ad381 --- /dev/null +++ b/tools/testing/selftests/arm64/abi/syscall-abi.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 ARM Limited. + */ + +#ifndef SYSCALL_ABI_H +#define SYSCALL_ABI_H + +#define SVCR_ZA_MASK 2 +#define SVCR_SM_MASK 1 + +#define SVCR_ZA_SHIFT 1 +#define SVCR_SM_SHIFT 0 + +#endif From 212b0426bc361eede2f9ce43fb2a5b01070000a1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 19 Apr 2022 12:22:47 +0100 Subject: [PATCH 055/145] selftests/arm64: Add a testcase for handling of ZA on clone() Add a small testcase that attempts to do a clone() with ZA enabled and verifies that it remains enabled with the same contents. We only check one word in one horizontal vector of ZA since there's already other tests that check for data corruption more broadly, we're just looking to make sure that ZA is still enabled and it looks like the data got copied. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220419112247.711548-40-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/.gitignore | 1 + tools/testing/selftests/arm64/fp/Makefile | 9 +- .../testing/selftests/arm64/fp/za-fork-asm.S | 61 +++++++ tools/testing/selftests/arm64/fp/za-fork.c | 156 ++++++++++++++++++ 4 files changed, 226 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/arm64/fp/za-fork-asm.S create mode 100644 tools/testing/selftests/arm64/fp/za-fork.c diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore index d98d3d48b504..ea947af63882 100644 --- a/tools/testing/selftests/arm64/fp/.gitignore +++ b/tools/testing/selftests/arm64/fp/.gitignore @@ -8,5 +8,6 @@ sve-test ssve-test vec-syscfg vlset +za-fork za-ptrace za-test diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index 807a8faf8d57..95e707e32247 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS += -I../../../../../usr/include/ -TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg za-ptrace +TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg za-fork za-ptrace TEST_PROGS_EXTENDED := fp-pidbench fpsimd-test fpsimd-stress \ rdvl-sme rdvl-sve \ sve-test sve-stress \ @@ -11,6 +11,7 @@ TEST_PROGS_EXTENDED := fp-pidbench fpsimd-test fpsimd-stress \ all: $(TEST_GEN_PROGS) $(TEST_PROGS_EXTENDED) +# Build with nolibc to avoid effects due to libc's clone() support fp-pidbench: fp-pidbench.S asm-utils.o $(CC) -nostdlib $^ -o $@ fpsimd-test: fpsimd-test.o asm-utils.o @@ -25,6 +26,12 @@ ssve-test: sve-test.S asm-utils.o $(CC) -DSSVE -nostdlib $^ -o $@ vec-syscfg: vec-syscfg.o rdvl.o vlset: vlset.o +za-fork: za-fork.o za-fork-asm.o + $(CC) -nostdlib -static $^ -o $@ -lgcc +za-fork.o: za-fork.c + $(CC) -c -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \ + -include ../../../../include/nolibc/nolibc.h \ + -ffreestanding -Wall $^ -o $@ za-test: za-test.o asm-utils.o $(CC) -nostdlib $^ -o $@ za-ptrace: za-ptrace.o diff --git a/tools/testing/selftests/arm64/fp/za-fork-asm.S b/tools/testing/selftests/arm64/fp/za-fork-asm.S new file mode 100644 index 000000000000..2fafadd491c3 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/za-fork-asm.S @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2021 ARM Limited. + +#include "sme-inst.h" + +.arch_extension sve + +#define MAGIC 42 + +#define MAXVL 2048 +#define MAXVL_B (MAXVL / 8) + +.pushsection .text +.data +.align 4 +scratch: + .space MAXVL_B +.popsection + +.globl fork_test +fork_test: + smstart_za + + // For simplicity just set one word in one vector, other tests + // cover general data corruption issues. + ldr x0, =scratch + mov x1, #MAGIC + str x1, [x0] + mov w12, wzr + _ldr_za 12, 0 // ZA.H[W12] loaded from [X0] + + // Tail call into the C portion that does the fork & verify + b fork_test_c + +.globl verify_fork +verify_fork: + // SVCR should have ZA=1, SM=0 + mrs x0, S3_3_C4_C2_2 + and x1, x0, #3 + cmp x1, #2 + beq 1f + mov x0, xzr + b 100f +1: + + // ZA should still have the value we loaded + ldr x0, =scratch + mov w12, wzr + _str_za 12, 0 // ZA.H[W12] stored to [X0] + ldr x1, [x0] + cmp x1, #MAGIC + beq 2f + mov x0, xzr + b 100f + +2: + // All tests passed + mov x0, #1 +100: + ret + diff --git a/tools/testing/selftests/arm64/fp/za-fork.c b/tools/testing/selftests/arm64/fp/za-fork.c new file mode 100644 index 000000000000..ff475c649e96 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/za-fork.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 ARM Limited. + * Original author: Mark Brown + */ + +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#define EXPECTED_TESTS 1 + +static void putstr(const char *str) +{ + write(1, str, strlen(str)); +} + +static void putnum(unsigned int num) +{ + char c; + + if (num / 10) + putnum(num / 10); + + c = '0' + (num % 10); + write(1, &c, 1); +} + +static int tests_run; +static int tests_passed; +static int tests_failed; +static int tests_skipped; + +static void print_summary(void) +{ + if (tests_passed + tests_failed + tests_skipped != EXPECTED_TESTS) + putstr("# UNEXPECTED TEST COUNT: "); + + putstr("# Totals: pass:"); + putnum(tests_passed); + putstr(" fail:"); + putnum(tests_failed); + putstr(" xfail:0 xpass:0 skip:"); + putnum(tests_skipped); + putstr(" error:0\n"); +} + +int fork_test(void); +int verify_fork(void); + +/* + * If we fork the value in the parent should be unchanged and the + * child should start with the same value. This is called from the + * fork_test() asm function. + */ +int fork_test_c(void) +{ + pid_t newpid, waiting; + int child_status, parent_result; + + newpid = fork(); + if (newpid == 0) { + /* In child */ + if (!verify_fork()) { + putstr("# ZA state invalid in child\n"); + exit(0); + } else { + exit(1); + } + } + if (newpid < 0) { + putstr("# fork() failed: -"); + putnum(-newpid); + putstr("\n"); + return 0; + } + + parent_result = verify_fork(); + if (!parent_result) + putstr("# ZA state invalid in parent\n"); + + for (;;) { + waiting = waitpid(newpid, &child_status, 0); + + if (waiting < 0) { + if (errno == EINTR) + continue; + putstr("# waitpid() failed: "); + putnum(errno); + putstr("\n"); + return 0; + } + if (waiting != newpid) { + putstr("# waitpid() returned wrong PID\n"); + return 0; + } + + if (!WIFEXITED(child_status)) { + putstr("# child did not exit\n"); + return 0; + } + + return WEXITSTATUS(child_status) && parent_result; + } +} + +#define run_test(name) \ + if (name()) { \ + tests_passed++; \ + } else { \ + tests_failed++; \ + putstr("not "); \ + } \ + putstr("ok "); \ + putnum(++tests_run); \ + putstr(" " #name "\n"); + +int main(int argc, char **argv) +{ + int ret, i; + + putstr("TAP version 13\n"); + putstr("1.."); + putnum(EXPECTED_TESTS); + putstr("\n"); + + putstr("# PID: "); + putnum(getpid()); + putstr("\n"); + + /* + * This test is run with nolibc which doesn't support hwcap and + * it's probably disproportionate to implement so instead check + * for the default vector length configuration in /proc. + */ + ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0); + if (ret >= 0) { + run_test(fork_test); + + } else { + putstr("# SME support not present\n"); + + for (i = 0; i < EXPECTED_TESTS; i++) { + putstr("ok "); + putnum(i); + putstr(" skipped\n"); + } + + tests_skipped += EXPECTED_TESTS; + } + + print_summary(); + + return 0; +} From f82efe5b9a3ae75a557097a074b0125032e76a83 Mon Sep 17 00:00:00 2001 From: Guo Zhengkui Date: Tue, 19 Apr 2022 11:24:51 +0800 Subject: [PATCH 056/145] kselftest/arm64: fix array_size.cocci warning Fix the following coccicheck warnings: tools/testing/selftests/arm64/mte/check_child_memory.c:110:25-26: WARNING: Use ARRAY_SIZE tools/testing/selftests/arm64/mte/check_child_memory.c:88:24-25: WARNING: Use ARRAY_SIZE tools/testing/selftests/arm64/mte/check_child_memory.c:90:20-21: WARNING: Use ARRAY_SIZE tools/testing/selftests/arm64/mte/check_child_memory.c:147:24-25: WARNING: Use ARRAY_SIZE `ARRAY_SIZE` macro is defined in tools/testing/selftests/kselftest.h. Signed-off-by: Guo Zhengkui Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220419032501.22790-1-guozhengkui@vivo.com Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/mte/check_child_memory.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/arm64/mte/check_child_memory.c b/tools/testing/selftests/arm64/mte/check_child_memory.c index 43bd94f853ba..7597fc632cad 100644 --- a/tools/testing/selftests/arm64/mte/check_child_memory.c +++ b/tools/testing/selftests/arm64/mte/check_child_memory.c @@ -85,9 +85,9 @@ static int check_child_memory_mapping(int mem_type, int mode, int mapping) { char *ptr; int run, result; - int item = sizeof(sizes)/sizeof(int); + int item = ARRAY_SIZE(sizes); - item = sizeof(sizes)/sizeof(int); + item = ARRAY_SIZE(sizes); mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG); for (run = 0; run < item; run++) { ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, @@ -107,7 +107,7 @@ static int check_child_file_mapping(int mem_type, int mode, int mapping) { char *ptr, *map_ptr; int run, fd, map_size, result = KSFT_PASS; - int total = sizeof(sizes)/sizeof(int); + int total = ARRAY_SIZE(sizes); mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG); for (run = 0; run < total; run++) { @@ -144,7 +144,7 @@ static int check_child_file_mapping(int mem_type, int mode, int mapping) int main(int argc, char *argv[]) { int err; - int item = sizeof(sizes)/sizeof(int); + int item = ARRAY_SIZE(sizes); page_size = getpagesize(); if (!page_size) { From a59f7a7f76407da78c21c42afe6d57bd885caa53 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 27 Apr 2022 19:19:51 +0100 Subject: [PATCH 057/145] selftests/arm64: Use TEST_GEN_PROGS_EXTENDED in the FP Makefile The kselftest lib.mk provides a default all target which builds additional programs from TEST_GEN_PROGS_EXTENDED, use that rather than using TEST_PROGS_EXTENDED which is for programs that don't need to be built like shell scripts. Leave fpsimd-stress and sve-stress there since they are scripts. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220427181954.357975-2-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/Makefile | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index 95e707e32247..a0b8cc59947e 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -2,14 +2,13 @@ CFLAGS += -I../../../../../usr/include/ TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg za-fork za-ptrace -TEST_PROGS_EXTENDED := fp-pidbench fpsimd-test fpsimd-stress \ +TEST_GEN_PROGS_EXTENDED := fp-pidbench fpsimd-test \ rdvl-sme rdvl-sve \ - sve-test sve-stress \ - ssve-test ssve-stress \ - za-test za-stress \ + sve-test \ + ssve-test \ + za-test \ vlset - -all: $(TEST_GEN_PROGS) $(TEST_PROGS_EXTENDED) +TEST_PROGS_EXTENDED := fpsimd-stress sve-stress ssve-stress za-stress # Build with nolibc to avoid effects due to libc's clone() support fp-pidbench: fp-pidbench.S asm-utils.o From 3a23a42d1a480095e5e6ab820594f194079b6a61 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 27 Apr 2022 19:19:52 +0100 Subject: [PATCH 058/145] selftests/arm64: Define top_srcdir for the fp tests Some of the rules in lib.mk use a top_srcdir variable to figure out where the top of the kselftest tree is, provide it. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220427181954.357975-3-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index a0b8cc59947e..ba758a6c6b9a 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -1,6 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 -CFLAGS += -I../../../../../usr/include/ +# A proper top_srcdir is needed by KSFT(lib.mk) +top_srcdir = $(realpath ../../../../../) + +CFLAGS += -I$(top_srcdir)/usr/include/ + TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg za-fork za-ptrace TEST_GEN_PROGS_EXTENDED := fp-pidbench fpsimd-test \ rdvl-sme rdvl-sve \ From 399cf0a3e8a1a2cf93e87017282e682e7b65f01c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 27 Apr 2022 19:19:53 +0100 Subject: [PATCH 059/145] selftests/arm64: Clean the fp helper libraries We provide a couple of object files with helpers linked into several of the test programs, ensure they are cleaned. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220427181954.357975-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index ba758a6c6b9a..7e5d48c4a59d 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -14,6 +14,8 @@ TEST_GEN_PROGS_EXTENDED := fp-pidbench fpsimd-test \ vlset TEST_PROGS_EXTENDED := fpsimd-stress sve-stress ssve-stress za-stress +EXTRA_CLEAN += $(OUTPUT)/asm-utils.o $(OUTPUT)/rdvl.o $(OUTPUT)/za-fork-asm.o + # Build with nolibc to avoid effects due to libc's clone() support fp-pidbench: fp-pidbench.S asm-utils.o $(CC) -nostdlib $^ -o $@ From aca43ad51661d46b0083614a5b75b6cb90c30741 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 27 Apr 2022 19:19:54 +0100 Subject: [PATCH 060/145] selftests/arm64: Fix O= builds for the floating point tests Currently the arm64 floating point tests don't support out of tree builds due to two quirks of the kselftest build system. One is that when building a program from multiple files we shouldn't separately compile the main program to an object file as that will result in the pattern rule not matching when adjusted for the output directory. The other is that we also need to include $(OUTPUT) in the names of the binaries when specifying the dependencies in order to ensure that they get picked up with O=. Rewrite the dependencies for the executables to fix these issues. The kselftest build system will ensure OUTPUT is always defined. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220427181954.357975-5-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/Makefile | 32 +++++++++++------------ 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index 7e5d48c4a59d..a7c2286bf65b 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -17,28 +17,26 @@ TEST_PROGS_EXTENDED := fpsimd-stress sve-stress ssve-stress za-stress EXTRA_CLEAN += $(OUTPUT)/asm-utils.o $(OUTPUT)/rdvl.o $(OUTPUT)/za-fork-asm.o # Build with nolibc to avoid effects due to libc's clone() support -fp-pidbench: fp-pidbench.S asm-utils.o +$(OUTPUT)/fp-pidbench: fp-pidbench.S $(OUTPUT)/asm-utils.o $(CC) -nostdlib $^ -o $@ -fpsimd-test: fpsimd-test.o asm-utils.o +$(OUTPUT)/fpsimd-test: fpsimd-test.S $(OUTPUT)/asm-utils.o $(CC) -nostdlib $^ -o $@ -rdvl-sme: rdvl-sme.o rdvl.o -rdvl-sve: rdvl-sve.o rdvl.o -sve-ptrace: sve-ptrace.o -sve-probe-vls: sve-probe-vls.o rdvl.o -sve-test: sve-test.o asm-utils.o +$(OUTPUT)/rdvl-sve: rdvl-sve.c $(OUTPUT)/rdvl.o +$(OUTPUT)/rdvl-sme: rdvl-sme.c $(OUTPUT)/rdvl.o +$(OUTPUT)/sve-ptrace: sve-ptrace.c +$(OUTPUT)/sve-probe-vls: sve-probe-vls.c $(OUTPUT)/rdvl.o +$(OUTPUT)/sve-test: sve-test.S $(OUTPUT)/asm-utils.o $(CC) -nostdlib $^ -o $@ -ssve-test: sve-test.S asm-utils.o +$(OUTPUT)/ssve-test: sve-test.S $(OUTPUT)/asm-utils.o $(CC) -DSSVE -nostdlib $^ -o $@ -vec-syscfg: vec-syscfg.o rdvl.o -vlset: vlset.o -za-fork: za-fork.o za-fork-asm.o - $(CC) -nostdlib -static $^ -o $@ -lgcc -za-fork.o: za-fork.c - $(CC) -c -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \ +$(OUTPUT)/vec-syscfg: vec-syscfg.c $(OUTPUT)/rdvl.o +$(OUTPUT)/vlset: vlset.c +$(OUTPUT)/za-fork: za-fork.c $(OUTPUT)/za-fork-asm.o + $(CC) -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \ -include ../../../../include/nolibc/nolibc.h \ - -ffreestanding -Wall $^ -o $@ -za-test: za-test.o asm-utils.o + -static -ffreestanding -Wall $^ -o $@ +$(OUTPUT)/za-ptrace: za-ptrace.c +$(OUTPUT)/za-test: za-test.S $(OUTPUT)/asm-utils.o $(CC) -nostdlib $^ -o $@ -za-ptrace: za-ptrace.o include ../../lib.mk From 2e29b9971ac54dec88baa58856a230ec2f2a2dff Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Tue, 26 Apr 2022 19:30:53 +0800 Subject: [PATCH 061/145] arm64/sme: Fix NULL check after kzalloc Fix following coccicheck error: ./arch/arm64/kernel/process.c:322:2-23: alloc with no test, possible model on line 326 Here should be dst->thread.sve_state. Fixes: 8bd7f91c03d8 ("arm64/sme: Implement traps and syscall handling for SME") Signed-off-by: Wan Jiabing Reviwed-by: Mark Brown Link: https://lore.kernel.org/r/20220426113054.630983-1-wanjiabing@vivo.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 99c293513817..9734c9fb1a32 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -321,7 +321,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) if (thread_za_enabled(&src->thread)) { dst->thread.sve_state = kzalloc(sve_state_size(src), GFP_KERNEL); - if (!dst->thread.za_state) + if (!dst->thread.sve_state) return -ENOMEM; dst->thread.za_state = kmemdup(src->thread.za_state, za_state_size(src), From e999995c84c3abb6dcae83f8c35942a8d4ee0451 Mon Sep 17 00:00:00 2001 From: Chengming Zhou Date: Thu, 21 Apr 2022 00:00:05 +0800 Subject: [PATCH 062/145] ftrace: cleanup ftrace_graph_caller enable and disable The ftrace_[enable,disable]_ftrace_graph_caller() are used to do special hooks for graph tracer, which are not needed on some ARCHs that use graph_ops:func function to install return_hooker. So introduce the weak version in ftrace core code to cleanup in x86. Signed-off-by: Chengming Zhou Acked-by: Steven Rostedt (Google) Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220420160006.17880-1-zhouchengming@bytedance.com Signed-off-by: Catalin Marinas --- arch/x86/kernel/ftrace.c | 17 ++--------------- kernel/trace/fgraph.c | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 1e31c7d21597..b09d73c2ba89 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -579,9 +579,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops) #ifdef CONFIG_FUNCTION_GRAPH_TRACER -#ifdef CONFIG_DYNAMIC_FTRACE - -#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) extern void ftrace_graph_call(void); static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) { @@ -610,18 +608,7 @@ int ftrace_disable_ftrace_graph_caller(void) return ftrace_mod_jmp(ip, &ftrace_stub); } -#else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ -int ftrace_enable_ftrace_graph_caller(void) -{ - return 0; -} - -int ftrace_disable_ftrace_graph_caller(void) -{ - return 0; -} -#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ -#endif /* !CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ /* * Hook the return address and push it in the stack of return addrs diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 8f4fb328133a..289311680c29 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -30,6 +30,24 @@ int ftrace_graph_active; /* Both enabled by default (can be cleared by function_graph tracer flags */ static bool fgraph_sleep_time = true; +/* + * archs can override this function if they must do something + * to enable hook for graph tracer. + */ +int __weak ftrace_enable_ftrace_graph_caller(void) +{ + return 0; +} + +/* + * archs can override this function if they must do something + * to disable hook for graph tracer. + */ +int __weak ftrace_disable_ftrace_graph_caller(void) +{ + return 0; +} + /** * ftrace_graph_stop - set to permanently disable function graph tracing * From c4a0ebf87cebbfa28d56e7d93b2536e2311e30c9 Mon Sep 17 00:00:00 2001 From: Chengming Zhou Date: Thu, 21 Apr 2022 00:00:06 +0800 Subject: [PATCH 063/145] arm64/ftrace: Make function graph use ftrace directly As we do in commit 0c0593b45c9b ("x86/ftrace: Make function graph use ftrace directly"), we don't need special hook for graph tracer, but instead we use graph_ops:func function to install return_hooker. Since commit 3b23e4991fb6 ("arm64: implement ftrace with regs") add implementation for FTRACE_WITH_REGS on arm64, we can easily adopt the same cleanup on arm64. And this cleanup only changes the FTRACE_WITH_REGS implementation, so the mcount-based implementation is unaffected. While in theory it would be possible to make a similar cleanup for !FTRACE_WITH_REGS, this will require rework of the core code, and so for now we only change the FTRACE_WITH_REGS implementation. Tested-by: Mark Rutland Reviewed-by: Mark Rutland Signed-off-by: Chengming Zhou Link: https://lore.kernel.org/r/20220420160006.17880-2-zhouchengming@bytedance.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/ftrace.h | 7 +++++++ arch/arm64/kernel/entry-ftrace.S | 17 ----------------- arch/arm64/kernel/ftrace.c | 17 +++++++++++++++++ 3 files changed, 24 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 1494cfa8639b..dbc45a4157fa 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -80,8 +80,15 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS struct dyn_ftrace; +struct ftrace_ops; +struct ftrace_regs; + int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); #define ftrace_init_nop ftrace_init_nop + +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs); +#define ftrace_graph_func ftrace_graph_func #endif #define ftrace_return_address(n) return_address(n) diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index e535480a4069..d42a205ef625 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -97,12 +97,6 @@ SYM_CODE_START(ftrace_common) SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) bl ftrace_stub -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller(); - nop // If enabled, this will be replaced - // "b ftrace_graph_caller" -#endif - /* * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved * x19-x29 per the AAPCS, and we created frame records upon entry, so we need @@ -127,17 +121,6 @@ ftrace_common_return: ret x9 SYM_CODE_END(ftrace_common) -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -SYM_CODE_START(ftrace_graph_caller) - ldr x0, [sp, #S_PC] - sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn) - add x1, sp, #S_LR // parent_ip (callsite's LR) - ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP) - bl prepare_ftrace_return - b ftrace_common_return -SYM_CODE_END(ftrace_graph_caller) -#endif - #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ /* diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 4506c4a90ac1..f447c4a36f69 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -268,6 +268,22 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, } #ifdef CONFIG_DYNAMIC_FTRACE + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs) +{ + /* + * When DYNAMIC_FTRACE_WITH_REGS is selected, `fregs` can never be NULL + * and arch_ftrace_get_regs(fregs) will always give a non-NULL pt_regs + * in which we can safely modify the LR. + */ + struct pt_regs *regs = arch_ftrace_get_regs(fregs); + unsigned long *parent = (unsigned long *)&procedure_link_pointer(regs); + + prepare_ftrace_return(ip, parent, frame_pointer(regs)); +} +#else /* * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() * depending on @enable. @@ -297,5 +313,6 @@ int ftrace_disable_ftrace_graph_caller(void) { return ftrace_modify_graph_caller(false); } +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ From a99ef9cb4b79e79d1574804eaf39608f8187e174 Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Mon, 25 Apr 2022 12:44:40 +0100 Subject: [PATCH 064/145] arm64: Make ESR_ELx_xVC_IMM_MASK compatible with assembly ESR_ELx_xVC_IMM_MASK is used as a mask for the immediate value for the HVC/SMC instructions. The header file is included by assembly files (like entry.S) and ESR_ELx_xVC_IMM_MASK is not conditioned on __ASSEMBLY__ being undefined. Use the UL() macro for defining the constant's size, as that is compatible with both C code and assembly, whereas the UL suffix only works for C code. Signed-off-by: Alexandru Elisei Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20220425114444.368693-2-alexandru.elisei@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/esr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index d52a0b269ee8..7356e2f05755 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -136,7 +136,7 @@ #define ESR_ELx_WFx_ISS_TI (UL(1) << 0) #define ESR_ELx_WFx_ISS_WFI (UL(0) << 0) #define ESR_ELx_WFx_ISS_WFE (UL(1) << 0) -#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1) +#define ESR_ELx_xVC_IMM_MASK ((UL(1) << 16) - 1) #define DISR_EL1_IDS (UL(1) << 24) /* From 3fed9e551417b84038b15117732ea4505eee386b Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Mon, 25 Apr 2022 12:44:41 +0100 Subject: [PATCH 065/145] arm64: compat: Do not treat syscall number as ESR_ELx for a bad syscall If a compat process tries to execute an unknown system call above the __ARM_NR_COMPAT_END number, the kernel sends a SIGILL signal to the offending process. Information about the error is printed to dmesg in compat_arm_syscall() -> arm64_notify_die() -> arm64_force_sig_fault() -> arm64_show_signal(). arm64_show_signal() interprets a non-zero value for current->thread.fault_code as an exception syndrome and displays the message associated with the ESR_ELx.EC field (bits 31:26). current->thread.fault_code is set in compat_arm_syscall() -> arm64_notify_die() with the bad syscall number instead of a valid ESR_ELx value. This means that the ESR_ELx.EC field has the value that the user set for the syscall number and the kernel can end up printing bogus exception messages*. For example, for the syscall number 0x68000000, which evaluates to ESR_ELx.EC value of 0x1A (ESR_ELx_EC_FPAC) the kernel prints this error: [ 18.349161] syscall[300]: unhandled exception: ERET/ERETAA/ERETAB, ESR 0x68000000, Oops - bad compat syscall(2) in syscall[10000+50000] [ 18.350639] CPU: 2 PID: 300 Comm: syscall Not tainted 5.18.0-rc1 #79 [ 18.351249] Hardware name: Pine64 RockPro64 v2.0 (DT) [..] which is misleading, as the bad compat syscall has nothing to do with pointer authentication. Stop arm64_show_signal() from printing exception syndrome information by having compat_arm_syscall() set the ESR_ELx value to 0, as it has no meaning for an invalid system call number. The example above now becomes: [ 19.935275] syscall[301]: unhandled exception: Oops - bad compat syscall(2) in syscall[10000+50000] [ 19.936124] CPU: 1 PID: 301 Comm: syscall Not tainted 5.18.0-rc1-00005-g7e08006d4102 #80 [ 19.936894] Hardware name: Pine64 RockPro64 v2.0 (DT) [..] which although shows less information because the syscall number, wrongfully advertised as the ESR value, is missing, it is better than showing plainly wrong information. The syscall number can be easily obtained with strace. *A 32-bit value above or equal to 0x8000_0000 is interpreted as a negative integer in compat_arm_syscal() and the condition scno < __ARM_NR_COMPAT_END evaluates to true; the syscall will exit to userspace in this case with the ENOSYS error code instead of arm64_notify_die() being called. Signed-off-by: Alexandru Elisei Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20220425114444.368693-3-alexandru.elisei@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/sys_compat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 12c6864e51e1..df14336c3a29 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -113,6 +113,6 @@ long compat_arm_syscall(struct pt_regs *regs, int scno) addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4); arm64_notify_die("Oops - bad compat syscall(2)", regs, - SIGILL, ILL_ILLTRP, addr, scno); + SIGILL, ILL_ILLTRP, addr, 0); return 0; } From 8d56e5c5a99ce1d17d39ce5a8260e42c2a2d7682 Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Mon, 25 Apr 2022 12:44:42 +0100 Subject: [PATCH 066/145] arm64: Treat ESR_ELx as a 64-bit register In the initial release of the ARM Architecture Reference Manual for ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture, when they became 64-bit registers, with bits [63:32] defined as RES0. In version G.a, a new field was added to ESR_ELx, ISS2, which covers bits [36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is implemented. As a result of the evolution of the register width, Linux stores it as both a 64-bit value and a 32-bit value, which hasn't affected correctness so far as Linux only uses the lower 32 bits of the register. Make the register type consistent and always treat it as 64-bit wide. The register is redefined as an "unsigned long", which is an unsigned double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1, page 14). The type was chosen because "unsigned int" is the most frequent type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx in exception handling, is also declared as "unsigned long". The 64-bit type also makes adding support for architectural features that use fields above bit 31 easier in the future. The KVM hypervisor will receive a similar update in a subsequent patch. [1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf Signed-off-by: Alexandru Elisei Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/debug-monitors.h | 4 +- arch/arm64/include/asm/esr.h | 6 +-- arch/arm64/include/asm/exception.h | 28 +++++----- arch/arm64/include/asm/system_misc.h | 4 +- arch/arm64/include/asm/traps.h | 12 ++--- arch/arm64/kernel/debug-monitors.c | 12 ++--- arch/arm64/kernel/entry-common.c | 6 +-- arch/arm64/kernel/fpsimd.c | 6 +-- arch/arm64/kernel/hw_breakpoint.c | 4 +- arch/arm64/kernel/kgdb.c | 6 +-- arch/arm64/kernel/probes/kprobes.c | 4 +- arch/arm64/kernel/probes/uprobes.c | 4 +- arch/arm64/kernel/traps.c | 66 +++++++++++------------ arch/arm64/mm/fault.c | 70 ++++++++++++------------- 14 files changed, 116 insertions(+), 116 deletions(-) diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 00c291067e57..7b7e05c02691 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -64,7 +64,7 @@ struct task_struct; struct step_hook { struct list_head node; - int (*fn)(struct pt_regs *regs, unsigned int esr); + int (*fn)(struct pt_regs *regs, unsigned long esr); }; void register_user_step_hook(struct step_hook *hook); @@ -75,7 +75,7 @@ void unregister_kernel_step_hook(struct step_hook *hook); struct break_hook { struct list_head node; - int (*fn)(struct pt_regs *regs, unsigned int esr); + int (*fn)(struct pt_regs *regs, unsigned long esr); u16 imm; u16 mask; /* These bits are ignored when comparing with imm */ }; diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 7356e2f05755..9d18f82c57d5 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -330,14 +330,14 @@ #ifndef __ASSEMBLY__ #include -static inline bool esr_is_data_abort(u32 esr) +static inline bool esr_is_data_abort(unsigned long esr) { - const u32 ec = ESR_ELx_EC(esr); + const unsigned long ec = ESR_ELx_EC(esr); return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR; } -const char *esr_get_class_string(u32 esr); +const char *esr_get_class_string(unsigned long esr); #endif /* __ASSEMBLY */ #endif /* __ASM_ESR_H */ diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 339477dca551..0e6535aa78c2 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -19,9 +19,9 @@ #define __exception_irq_entry __kprobes #endif -static inline u32 disr_to_esr(u64 disr) +static inline unsigned long disr_to_esr(u64 disr) { - unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; + unsigned long esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; if ((disr & DISR_EL1_IDS) == 0) esr |= (disr & DISR_EL1_ESR_MASK); @@ -57,23 +57,23 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs, void (*func)(struct pt_regs *)); asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); -void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); +void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); void do_bti(struct pt_regs *regs); -void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, +void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, struct pt_regs *regs); -void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); -void do_sve_acc(unsigned int esr, struct pt_regs *regs); -void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs); -void do_sysinstr(unsigned int esr, struct pt_regs *regs); -void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); -void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); -void do_cp15instr(unsigned int esr, struct pt_regs *regs); +void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs); +void do_sve_acc(unsigned long esr, struct pt_regs *regs); +void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs); +void do_sysinstr(unsigned long esr, struct pt_regs *regs); +void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs); +void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr); +void do_cp15instr(unsigned long esr, struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); -void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); -void do_serror(struct pt_regs *regs, unsigned int esr); +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr); +void do_serror(struct pt_regs *regs, unsigned long esr); void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); -void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); +void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 305a7157c6a6..0eb7709422e2 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -23,9 +23,9 @@ void die(const char *msg, struct pt_regs *regs, int err); struct siginfo; void arm64_notify_die(const char *str, struct pt_regs *regs, int signo, int sicode, unsigned long far, - int err); + unsigned long err); -void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int, +void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned long, struct pt_regs *), int sig, int code, const char *name); diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 54f32a0675df..6e5826470bea 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -24,7 +24,7 @@ struct undef_hook { void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); -void force_signal_inject(int signal, int code, unsigned long address, unsigned int err); +void force_signal_inject(int signal, int code, unsigned long address, unsigned long err); void arm64_notify_segfault(unsigned long addr); void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *str); void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str); @@ -57,7 +57,7 @@ static inline int in_entry_text(unsigned long ptr) * errors share the same encoding as an all-zeros encoding from a CPU that * doesn't support RAS. */ -static inline bool arm64_is_ras_serror(u32 esr) +static inline bool arm64_is_ras_serror(unsigned long esr) { WARN_ON(preemptible()); @@ -77,9 +77,9 @@ static inline bool arm64_is_ras_serror(u32 esr) * We treat them as Uncontainable. * Non-RAS SError's are reported as Uncontained/Uncategorized. */ -static inline u32 arm64_ras_serror_get_severity(u32 esr) +static inline unsigned long arm64_ras_serror_get_severity(unsigned long esr) { - u32 aet = esr & ESR_ELx_AET; + unsigned long aet = esr & ESR_ELx_AET; if (!arm64_is_ras_serror(esr)) { /* Not a RAS error, we can't interpret the ESR. */ @@ -98,6 +98,6 @@ static inline u32 arm64_ras_serror_get_severity(u32 esr) return aet; } -bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr); -void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr); +bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr); +void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr); #endif diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 4f3661eeb7ec..bf9fe71589bc 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -202,7 +202,7 @@ void unregister_kernel_step_hook(struct step_hook *hook) * So we call all the registered handlers, until the right handler is * found which returns zero. */ -static int call_step_hook(struct pt_regs *regs, unsigned int esr) +static int call_step_hook(struct pt_regs *regs, unsigned long esr) { struct step_hook *hook; struct list_head *list; @@ -238,7 +238,7 @@ static void send_user_sigtrap(int si_code) "User debug trap"); } -static int single_step_handler(unsigned long unused, unsigned int esr, +static int single_step_handler(unsigned long unused, unsigned long esr, struct pt_regs *regs) { bool handler_found = false; @@ -299,11 +299,11 @@ void unregister_kernel_break_hook(struct break_hook *hook) unregister_debug_hook(&hook->node); } -static int call_break_hook(struct pt_regs *regs, unsigned int esr) +static int call_break_hook(struct pt_regs *regs, unsigned long esr) { struct break_hook *hook; struct list_head *list; - int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; + int (*fn)(struct pt_regs *regs, unsigned long esr) = NULL; list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; @@ -312,7 +312,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) * entirely not preemptible, and we can use rcu list safely here. */ list_for_each_entry_rcu(hook, list, node) { - unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; + unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; if ((comment & ~hook->mask) == hook->imm) fn = hook->fn; @@ -322,7 +322,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) } NOKPROBE_SYMBOL(call_break_hook); -static int brk_handler(unsigned long unused, unsigned int esr, +static int brk_handler(unsigned long unused, unsigned long esr, struct pt_regs *regs) { if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 878c65aa7206..6ba10edfb49c 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -282,13 +282,13 @@ extern void (*handle_arch_irq)(struct pt_regs *); extern void (*handle_arch_fiq)(struct pt_regs *); static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, - unsigned int esr) + unsigned long esr) { arm64_enter_nmi(regs); console_verbose(); - pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n", + pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n", vector, smp_processor_id(), esr, esr_get_class_string(esr)); @@ -818,7 +818,7 @@ UNHANDLED(el0t, 32, error) #ifdef CONFIG_VMAP_STACK asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) { - unsigned int esr = read_sysreg(esr_el1); + unsigned long esr = read_sysreg(esr_el1); unsigned long far = read_sysreg(far_el1); arm64_enter_nmi(regs); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 47af76e53221..22bf0cfe236b 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1004,7 +1004,7 @@ void fpsimd_release_task(struct task_struct *dead_task) * would have disabled the SVE access trap for userspace during * ret_to_user, making an SVE access trap impossible in that case. */ -void do_sve_acc(unsigned int esr, struct pt_regs *regs) +void do_sve_acc(unsigned long esr, struct pt_regs *regs) { /* Even if we chose not to use SVE, the hardware could still trap: */ if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { @@ -1046,7 +1046,7 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs) /* * Trapped FP/ASIMD access. */ -void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) +void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs) { /* TODO: implement lazy context saving/restoring */ WARN_ON(1); @@ -1055,7 +1055,7 @@ void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) /* * Raise a SIGFPE for the current process. */ -void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) +void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs) { unsigned int si_code = FPE_FLTUNK; diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index cd868084e724..b29a311bb055 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -617,7 +617,7 @@ NOKPROBE_SYMBOL(toggle_bp_registers); /* * Debug exception handlers. */ -static int breakpoint_handler(unsigned long unused, unsigned int esr, +static int breakpoint_handler(unsigned long unused, unsigned long esr, struct pt_regs *regs) { int i, step = 0, *kernel_step; @@ -751,7 +751,7 @@ static int watchpoint_report(struct perf_event *wp, unsigned long addr, return step; } -static int watchpoint_handler(unsigned long addr, unsigned int esr, +static int watchpoint_handler(unsigned long addr, unsigned long esr, struct pt_regs *regs) { int i, step = 0, *kernel_step, access, closest_match = 0; diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 2aede780fb80..cda9c1e9864f 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -232,14 +232,14 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, return err; } -static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) +static int kgdb_brk_fn(struct pt_regs *regs, unsigned long esr) { kgdb_handle_exception(1, SIGTRAP, 0, regs); return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_brk_fn) -static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) +static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned long esr) { compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); @@ -248,7 +248,7 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) } NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); -static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) +static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned long esr) { if (!kgdb_single_step) return DBG_HOOK_ERROR; diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index d9dfa82c1f18..d1d182320245 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -335,7 +335,7 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) } static int __kprobes -kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr) +kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long addr = instruction_pointer(regs); @@ -359,7 +359,7 @@ static struct break_hook kprobes_break_ss_hook = { }; static int __kprobes -kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) +kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr) { kprobe_handler(regs); return DBG_HOOK_HANDLED; diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index 9be668f3f034..d49aef2657cd 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c @@ -166,7 +166,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, } static int uprobe_breakpoint_handler(struct pt_regs *regs, - unsigned int esr) + unsigned long esr) { if (uprobe_pre_sstep_notifier(regs)) return DBG_HOOK_HANDLED; @@ -175,7 +175,7 @@ static int uprobe_breakpoint_handler(struct pt_regs *regs, } static int uprobe_single_step_handler(struct pt_regs *regs, - unsigned int esr) + unsigned long esr) { struct uprobe_task *utask = current->utask; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 0529fd57567e..da24a4c4f58b 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -242,7 +242,7 @@ static void arm64_show_signal(int signo, const char *str) static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); struct task_struct *tsk = current; - unsigned int esr = tsk->thread.fault_code; + unsigned long esr = tsk->thread.fault_code; struct pt_regs *regs = task_pt_regs(tsk); /* Leave if the signal won't be shown */ @@ -253,7 +253,7 @@ static void arm64_show_signal(int signo, const char *str) pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); if (esr) - pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr); + pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr); pr_cont("%s", str); print_vma_addr(KERN_CONT " in ", regs->pc); @@ -287,7 +287,7 @@ void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, void arm64_notify_die(const char *str, struct pt_regs *regs, int signo, int sicode, unsigned long far, - int err) + unsigned long err) { if (user_mode(regs)) { WARN_ON(regs != current_pt_regs()); @@ -439,7 +439,7 @@ exit: return fn ? fn(regs, instr) : 1; } -void force_signal_inject(int signal, int code, unsigned long address, unsigned int err) +void force_signal_inject(int signal, int code, unsigned long address, unsigned long err) { const char *desc; struct pt_regs *regs = current_pt_regs(); @@ -506,7 +506,7 @@ void do_bti(struct pt_regs *regs) } NOKPROBE_SYMBOL(do_bti); -void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr) +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr) { /* * Unexpected FPAC exception or pointer authentication failure in @@ -532,7 +532,7 @@ NOKPROBE_SYMBOL(do_ptrauth_fault); uaccess_ttbr0_disable(); \ } -static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) +static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs) { unsigned long tagged_address, address; int rt = ESR_ELx_SYS64_ISS_RT(esr); @@ -572,7 +572,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } -static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) +static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) { int rt = ESR_ELx_SYS64_ISS_RT(esr); unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); @@ -591,7 +591,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } -static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) +static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs) { int rt = ESR_ELx_SYS64_ISS_RT(esr); @@ -599,7 +599,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } -static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) +static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) { int rt = ESR_ELx_SYS64_ISS_RT(esr); @@ -607,7 +607,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } -static void mrs_handler(unsigned int esr, struct pt_regs *regs) +static void mrs_handler(unsigned long esr, struct pt_regs *regs) { u32 sysreg, rt; @@ -618,15 +618,15 @@ static void mrs_handler(unsigned int esr, struct pt_regs *regs) force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); } -static void wfi_handler(unsigned int esr, struct pt_regs *regs) +static void wfi_handler(unsigned long esr, struct pt_regs *regs) { arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } struct sys64_hook { - unsigned int esr_mask; - unsigned int esr_val; - void (*handler)(unsigned int esr, struct pt_regs *regs); + unsigned long esr_mask; + unsigned long esr_val; + void (*handler)(unsigned long esr, struct pt_regs *regs); }; static const struct sys64_hook sys64_hooks[] = { @@ -675,7 +675,7 @@ static const struct sys64_hook sys64_hooks[] = { }; #ifdef CONFIG_COMPAT -static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) +static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs) { int cond; @@ -695,7 +695,7 @@ static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) return aarch32_opcode_cond_checks[cond](regs->pstate); } -static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) +static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) { int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; @@ -712,7 +712,7 @@ static const struct sys64_hook cp15_32_hooks[] = { {}, }; -static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) +static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs) { int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; @@ -737,7 +737,7 @@ static const struct sys64_hook cp15_64_hooks[] = { {}, }; -void do_cp15instr(unsigned int esr, struct pt_regs *regs) +void do_cp15instr(unsigned long esr, struct pt_regs *regs) { const struct sys64_hook *hook, *hook_base; @@ -778,7 +778,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs) NOKPROBE_SYMBOL(do_cp15instr); #endif -void do_sysinstr(unsigned int esr, struct pt_regs *regs) +void do_sysinstr(unsigned long esr, struct pt_regs *regs) { const struct sys64_hook *hook; @@ -842,7 +842,7 @@ static const char *esr_class_str[] = { [ESR_ELx_EC_BRK64] = "BRK (AArch64)", }; -const char *esr_get_class_string(u32 esr) +const char *esr_get_class_string(unsigned long esr) { return esr_class_str[ESR_ELx_EC(esr)]; } @@ -851,7 +851,7 @@ const char *esr_get_class_string(u32 esr) * bad_el0_sync handles unexpected, but potentially recoverable synchronous * exceptions taken from EL0. */ -void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) +void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr) { unsigned long pc = instruction_pointer(regs); @@ -867,7 +867,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) __aligned(16); -void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) +void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) { unsigned long tsk_stk = (unsigned long)current->stack; unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); @@ -876,7 +876,7 @@ void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) console_verbose(); pr_emerg("Insufficient stack space to handle exception!"); - pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr)); + pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr)); pr_emerg("FAR: 0x%016lx\n", far); pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", @@ -897,11 +897,11 @@ void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) } #endif -void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) +void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) { console_verbose(); - pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n", + pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n", smp_processor_id(), esr, esr_get_class_string(esr)); if (regs) __show_regs(regs); @@ -912,9 +912,9 @@ void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) unreachable(); } -bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) +bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr) { - u32 aet = arm64_ras_serror_get_severity(esr); + unsigned long aet = arm64_ras_serror_get_severity(esr); switch (aet) { case ESR_ELx_AET_CE: /* corrected error */ @@ -944,7 +944,7 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) } } -void do_serror(struct pt_regs *regs, unsigned int esr) +void do_serror(struct pt_regs *regs, unsigned long esr) { /* non-RAS errors are not containable */ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) @@ -965,7 +965,7 @@ int is_valid_bugaddr(unsigned long addr) return 1; } -static int bug_handler(struct pt_regs *regs, unsigned int esr) +static int bug_handler(struct pt_regs *regs, unsigned long esr) { switch (report_bug(regs->pc, regs)) { case BUG_TRAP_TYPE_BUG: @@ -990,7 +990,7 @@ static struct break_hook bug_break_hook = { .imm = BUG_BRK_IMM, }; -static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) +static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr) { pr_err("%s generated an invalid instruction at %pS!\n", "Kernel text patching", @@ -1012,7 +1012,7 @@ static struct break_hook fault_break_hook = { #define KASAN_ESR_SIZE_MASK 0x0f #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) -static int kasan_handler(struct pt_regs *regs, unsigned int esr) +static int kasan_handler(struct pt_regs *regs, unsigned long esr) { bool recover = esr & KASAN_ESR_RECOVER; bool write = esr & KASAN_ESR_WRITE; @@ -1055,11 +1055,11 @@ static struct break_hook kasan_break_hook = { * Initial handler for AArch64 BRK exceptions * This handler only used until debug_traps_init(). */ -int __init early_brk64(unsigned long addr, unsigned int esr, +int __init early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs) { #ifdef CONFIG_KASAN_SW_TAGS - unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; + unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 77341b160aca..24f9b43bc18e 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -43,7 +43,7 @@ #include struct fault_info { - int (*fn)(unsigned long far, unsigned int esr, + int (*fn)(unsigned long far, unsigned long esr, struct pt_regs *regs); int sig; int code; @@ -53,17 +53,17 @@ struct fault_info { static const struct fault_info fault_info[]; static struct fault_info debug_fault_info[]; -static inline const struct fault_info *esr_to_fault_info(unsigned int esr) +static inline const struct fault_info *esr_to_fault_info(unsigned long esr) { return fault_info + (esr & ESR_ELx_FSC); } -static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) +static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr) { return debug_fault_info + DBG_ESR_EVT(esr); } -static void data_abort_decode(unsigned int esr) +static void data_abort_decode(unsigned long esr) { pr_alert("Data abort info:\n"); @@ -85,11 +85,11 @@ static void data_abort_decode(unsigned int esr) (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); } -static void mem_abort_decode(unsigned int esr) +static void mem_abort_decode(unsigned long esr) { pr_alert("Mem abort info:\n"); - pr_alert(" ESR = 0x%08x\n", esr); + pr_alert(" ESR = 0x%016lx\n", esr); pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", ESR_ELx_EC(esr), esr_get_class_string(esr), (esr & ESR_ELx_IL) ? 32 : 16); @@ -99,7 +99,7 @@ static void mem_abort_decode(unsigned int esr) pr_alert(" EA = %lu, S1PTW = %lu\n", (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); - pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC), + pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC), esr_to_fault_info(esr)->name); if (esr_is_data_abort(esr)) @@ -229,20 +229,20 @@ int ptep_set_access_flags(struct vm_area_struct *vma, return 1; } -static bool is_el1_instruction_abort(unsigned int esr) +static bool is_el1_instruction_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; } -static bool is_el1_data_abort(unsigned int esr) +static bool is_el1_data_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; } -static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, +static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { - unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; + unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE; if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) return false; @@ -258,7 +258,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, } static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, - unsigned int esr, + unsigned long esr, struct pt_regs *regs) { unsigned long flags; @@ -290,7 +290,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, } static void die_kernel_fault(const char *msg, unsigned long addr, - unsigned int esr, struct pt_regs *regs) + unsigned long esr, struct pt_regs *regs) { bust_spinlocks(1); @@ -308,7 +308,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr, } #ifdef CONFIG_KASAN_HW_TAGS -static void report_tag_fault(unsigned long addr, unsigned int esr, +static void report_tag_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { /* @@ -320,11 +320,11 @@ static void report_tag_fault(unsigned long addr, unsigned int esr, } #else /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ -static inline void report_tag_fault(unsigned long addr, unsigned int esr, +static inline void report_tag_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { } #endif -static void do_tag_recovery(unsigned long addr, unsigned int esr, +static void do_tag_recovery(unsigned long addr, unsigned long esr, struct pt_regs *regs) { @@ -339,9 +339,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr, isb(); } -static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) +static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) { - unsigned int fsc = esr & ESR_ELx_FSC; + unsigned long fsc = esr & ESR_ELx_FSC; if (!is_el1_data_abort(esr)) return false; @@ -352,7 +352,7 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) return false; } -static void __do_kernel_fault(unsigned long addr, unsigned int esr, +static void __do_kernel_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { const char *msg; @@ -393,7 +393,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, die_kernel_fault(msg, addr, esr, regs); } -static void set_thread_esr(unsigned long address, unsigned int esr) +static void set_thread_esr(unsigned long address, unsigned long esr) { current->thread.fault_address = address; @@ -441,7 +441,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) * exception level). Fail safe by not providing an ESR * context record at all. */ - WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr); + WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr); esr = 0; break; } @@ -450,7 +450,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) current->thread.fault_code = esr; } -static void do_bad_area(unsigned long far, unsigned int esr, +static void do_bad_area(unsigned long far, unsigned long esr, struct pt_regs *regs) { unsigned long addr = untagged_addr(far); @@ -501,7 +501,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, return handle_mm_fault(vma, addr, mm_flags, regs); } -static bool is_el0_instruction_abort(unsigned int esr) +static bool is_el0_instruction_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; } @@ -510,12 +510,12 @@ static bool is_el0_instruction_abort(unsigned int esr) * Note: not valid for EL1 DC IVAC, but we never use that such that it * should fault. EL0 cannot issue DC IVAC (undef). */ -static bool is_write_abort(unsigned int esr) +static bool is_write_abort(unsigned long esr) { return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); } -static int __kprobes do_page_fault(unsigned long far, unsigned int esr, +static int __kprobes do_page_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; @@ -671,7 +671,7 @@ no_context: } static int __kprobes do_translation_fault(unsigned long far, - unsigned int esr, + unsigned long esr, struct pt_regs *regs) { unsigned long addr = untagged_addr(far); @@ -683,19 +683,19 @@ static int __kprobes do_translation_fault(unsigned long far, return 0; } -static int do_alignment_fault(unsigned long far, unsigned int esr, +static int do_alignment_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { do_bad_area(far, esr, regs); return 0; } -static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs) +static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) { return 1; /* "fault" */ } -static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) +static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; unsigned long siaddr; @@ -725,7 +725,7 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) return 0; } -static int do_tag_check_fault(unsigned long far, unsigned int esr, +static int do_tag_check_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { /* @@ -805,7 +805,7 @@ static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, }; -void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) +void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_fault_info(esr); unsigned long addr = untagged_addr(far); @@ -825,14 +825,14 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) } NOKPROBE_SYMBOL(do_mem_abort); -void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) +void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs) { arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, addr, esr); } NOKPROBE_SYMBOL(do_sp_pc_abort); -int __init early_brk64(unsigned long addr, unsigned int esr, +int __init early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs); /* @@ -852,7 +852,7 @@ static struct fault_info __refdata debug_fault_info[] = { }; void __init hook_debug_fault_code(int nr, - int (*fn)(unsigned long, unsigned int, struct pt_regs *), + int (*fn)(unsigned long, unsigned long, struct pt_regs *), int sig, int code, const char *name) { BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); @@ -885,7 +885,7 @@ static void debug_exception_exit(struct pt_regs *regs) } NOKPROBE_SYMBOL(debug_exception_exit); -void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, +void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_debug_fault_info(esr); From 0b12620fddb8a8087091df1a9c7b1da1dec7a4a0 Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Mon, 25 Apr 2022 12:44:43 +0100 Subject: [PATCH 067/145] KVM: arm64: Treat ESR_EL2 as a 64-bit register ESR_EL2 was defined as a 32-bit register in the initial release of the ARM Architecture Manual for Armv8-A, and was later extended to 64 bits, with bits [63:32] RES0. ARMv8.7 introduced FEAT_LS64, which makes use of bits [36:32]. KVM treats ESR_EL1 as a 64-bit register when saving and restoring the guest context, but ESR_EL2 is handled as a 32-bit register. Start treating ESR_EL2 as a 64-bit register to allow KVM to make use of the most significant 32 bits in the future. The type chosen to represent ESR_EL2 is u64, as that is consistent with the notation KVM overwhelmingly uses today (u32), and how the rest of the registers are declared. Signed-off-by: Alexandru Elisei Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20220425114444.368693-5-alexandru.elisei@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kvm_emulate.h | 6 +++--- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/include/asm/kvm_ras.h | 2 +- arch/arm64/kvm/handle_exit.c | 14 +++++++------- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 2 +- arch/arm64/kvm/hyp/vgic-v3-sr.c | 4 ++-- arch/arm64/kvm/inject_fault.c | 4 ++-- arch/arm64/kvm/sys_regs.c | 4 ++-- 9 files changed, 20 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 7496deab025a..ab19a7317e12 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -235,14 +235,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) return mode != PSR_MODE_EL0t; } -static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) +static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.esr_el2; } static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) { - u32 esr = kvm_vcpu_get_esr(vcpu); + u64 esr = kvm_vcpu_get_esr(vcpu); if (esr & ESR_ELx_CV) return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; @@ -373,7 +373,7 @@ static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu) static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) { - u32 esr = kvm_vcpu_get_esr(vcpu); + u64 esr = kvm_vcpu_get_esr(vcpu); return ESR_ELx_SYS64_ISS_RT(esr); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 94a27a7520f4..850430d15cd0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -153,7 +153,7 @@ struct kvm_arch { }; struct kvm_vcpu_fault_info { - u32 esr_el2; /* Hyp Syndrom Register */ + u64 esr_el2; /* Hyp Syndrom Register */ u64 far_el2; /* Hyp Fault Address Register */ u64 hpfar_el2; /* Hyp IPA Fault Address Register */ u64 disr_el1; /* Deferred [SError] Status Register */ diff --git a/arch/arm64/include/asm/kvm_ras.h b/arch/arm64/include/asm/kvm_ras.h index 8ac6ee77437c..87e10d9a635b 100644 --- a/arch/arm64/include/asm/kvm_ras.h +++ b/arch/arm64/include/asm/kvm_ras.h @@ -14,7 +14,7 @@ * Was this synchronous external abort a RAS notification? * Returns '0' for errors handled by some RAS subsystem, or -ENOENT. */ -static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr) +static inline int kvm_handle_guest_sea(phys_addr_t addr, u64 esr) { /* apei_claim_sea(NULL) expects to mask interrupts itself */ lockdep_assert_irqs_enabled(); diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 97fe14aab1a3..93d92130d36c 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -26,7 +26,7 @@ typedef int (*exit_handle_fn)(struct kvm_vcpu *); -static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr) +static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr) { if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr)) kvm_inject_vabt(vcpu); @@ -117,10 +117,10 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu) static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - u32 esr = kvm_vcpu_get_esr(vcpu); + u64 esr = kvm_vcpu_get_esr(vcpu); run->exit_reason = KVM_EXIT_DEBUG; - run->debug.arch.hsr = esr; + run->debug.arch.hsr = lower_32_bits(esr); if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW) run->debug.arch.far = vcpu->arch.fault.far_el2; @@ -130,9 +130,9 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu) { - u32 esr = kvm_vcpu_get_esr(vcpu); + u64 esr = kvm_vcpu_get_esr(vcpu); - kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n", + kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n", esr, esr_get_class_string(esr)); kvm_inject_undefined(vcpu); @@ -187,7 +187,7 @@ static exit_handle_fn arm_exit_handlers[] = { static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) { - u32 esr = kvm_vcpu_get_esr(vcpu); + u64 esr = kvm_vcpu_get_esr(vcpu); u8 esr_ec = ESR_ELx_EC(esr); return arm_exit_handlers[esr_ec]; @@ -334,6 +334,6 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, */ kvm_err("Hyp Offset: 0x%llx\n", hyp_offset); - panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n", + panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n", spsr, elr_virt, esr, far, hpfar, par, vcpu); } diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 5d31f6c64c8c..37d9f211c200 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -266,7 +266,7 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu) return true; } -static inline bool esr_is_ptrauth_trap(u32 esr) +static inline bool esr_is_ptrauth_trap(u64 esr) { switch (esr_sys64_to_sysreg(esr)) { case SYS_APIAKEYLO_EL1: diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 33f5181af330..619f94fc95fa 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -33,7 +33,7 @@ u64 id_aa64mmfr2_el1_sys_val; */ static void inject_undef64(struct kvm_vcpu *vcpu) { - u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); + u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR); diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 4fb419f7b8b6..6cb638b184b1 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -473,7 +473,7 @@ static int __vgic_v3_bpr_min(void) static int __vgic_v3_get_group(struct kvm_vcpu *vcpu) { - u32 esr = kvm_vcpu_get_esr(vcpu); + u64 esr = kvm_vcpu_get_esr(vcpu); u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; return crm != 8; @@ -1016,7 +1016,7 @@ static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt) int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) { int rt; - u32 esr; + u64 esr; u32 vmcr; void (*fn)(struct kvm_vcpu *, u32, int); bool is_read; diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index b47df73e98d7..3664e30f5694 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -18,7 +18,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr { unsigned long cpsr = *vcpu_cpsr(vcpu); bool is_aarch32 = vcpu_mode_is_32bit(vcpu); - u32 esr = 0; + u64 esr = 0; vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | KVM_ARM64_EXCEPT_AA64_ELx_SYNC | @@ -50,7 +50,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr static void inject_undef64(struct kvm_vcpu *vcpu) { - u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); + u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | KVM_ARM64_EXCEPT_AA64_ELx_SYNC | diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 7b45c040cc27..2bde95662bbf 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2304,7 +2304,7 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, size_t nr_global) { struct sys_reg_params params; - u32 esr = kvm_vcpu_get_esr(vcpu); + u64 esr = kvm_vcpu_get_esr(vcpu); int Rt = kvm_vcpu_sys_get_rt(vcpu); int Rt2 = (esr >> 10) & 0x1f; @@ -2354,7 +2354,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, size_t nr_global) { struct sys_reg_params params; - u32 esr = kvm_vcpu_get_esr(vcpu); + u64 esr = kvm_vcpu_get_esr(vcpu); int Rt = kvm_vcpu_sys_get_rt(vcpu); params.CRm = (esr >> 1) & 0xf; From 18f3976fdb5da2ba9572845e6f7dfb58652871ea Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Mon, 25 Apr 2022 12:44:44 +0100 Subject: [PATCH 068/145] KVM: arm64: uapi: Add kvm_debug_exit_arch.hsr_high When userspace is debugging a VM, the kvm_debug_exit_arch part of the kvm_run struct contains arm64 specific debug information: the ESR_EL2 value, encoded in the field "hsr", and the address of the instruction that caused the exception, encoded in the field "far". Linux has moved to treating ESR_EL2 as a 64-bit register, but unfortunately kvm_debug_exit_arch.hsr cannot be changed because that would change the memory layout of the struct on big endian machines: Current layout: | Layout with "hsr" extended to 64 bits: | offset 0: ESR_EL2[31:0] (hsr) | offset 0: ESR_EL2[61:32] (hsr[61:32]) offset 4: padding | offset 4: ESR_EL2[31:0] (hsr[31:0]) offset 8: FAR_EL2[61:0] (far) | offset 8: FAR_EL2[61:0] (far) which breaks existing code. The padding is inserted by the compiler because the "far" field must be aligned to 8 bytes (each field must be naturally aligned - aapcs64 [1], page 18), and the struct itself must be aligned to 8 bytes (the struct must be aligned to the maximum alignment of its fields - aapcs64, page 18), which means that "hsr" must be aligned to 8 bytes as it is the first field in the struct. To avoid changing the struct size and layout for the existing fields, add a new field, "hsr_high", which replaces the existing padding. "hsr_high" will be used to hold the ESR_EL2[61:32] bits of the register. The memory layout, both on big and little endian machine, becomes: offset 0: ESR_EL2[31:0] (hsr) offset 4: ESR_EL2[61:32] (hsr_high) offset 8: FAR_EL2[61:0] (far) The padding that the compiler inserts for the current struct layout is unitialized. To prevent an updated userspace running on an old kernel mistaking the padding for a valid "hsr_high" value, add a new flag, KVM_DEBUG_ARCH_HSR_HIGH_VALID, to kvm_run->flags to let userspace know that "hsr_high" holds a valid ESR_EL2[61:32] value. [1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf Signed-off-by: Alexandru Elisei Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20220425114444.368693-6-alexandru.elisei@arm.com Signed-off-by: Catalin Marinas --- Documentation/virt/kvm/api.rst | 2 ++ arch/arm64/include/uapi/asm/kvm.h | 2 ++ arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/handle_exit.c | 2 ++ 4 files changed, 7 insertions(+) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 85c7abc51af5..ecd70d99f3e0 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -5713,6 +5713,8 @@ affect the device's behavior. Current defined flags:: #define KVM_RUN_X86_SMM (1 << 0) /* x86, set if bus lock detected in VM */ #define KVM_RUN_BUS_LOCK (1 << 1) + /* arm64, set for KVM_EXIT_DEBUG */ + #define KVM_DEBUG_ARCH_HSR_HIGH_VALID (1 << 0) :: diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index c1b6ddc02d2f..ab585359242d 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -139,8 +139,10 @@ struct kvm_guest_debug_arch { __u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS]; }; +#define KVM_DEBUG_ARCH_HSR_HIGH_VALID (1 << 0) struct kvm_debug_exit_arch { __u32 hsr; + __u32 hsr_high; /* ESR_EL2[61:32] */ __u64 far; /* used for watchpoints */ }; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 523bc934fe2f..7ef4fd2fe20a 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -783,6 +783,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ret = 1; run->exit_reason = KVM_EXIT_UNKNOWN; + run->flags = 0; while (ret > 0) { /* * Check conditions before entering the guest diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 93d92130d36c..0b829292dc54 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -121,6 +121,8 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) run->exit_reason = KVM_EXIT_DEBUG; run->debug.arch.hsr = lower_32_bits(esr); + run->debug.arch.hsr_high = upper_32_bits(esr); + run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID; if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW) run->debug.arch.far = vcpu->arch.fault.far_el2; From ae60e0763e97e977b03af1ac6ba782a4a86c3a5a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 4 May 2022 00:16:55 +0100 Subject: [PATCH 069/145] kselftest/arm64: Fix ABI header directory location Currently the arm64 kselftests attempt to locate the ABI headers using custom logic which doesn't work correctly in the case of out of tree builds if KBUILD_OUTPUT is not specified. Since lib.mk defines KHDR_INCLUDES with the appropriate flags we can simply remove the custom logic and use that instead. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220503231655.211346-1-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/Makefile | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile index 1e8d9a8f59df..9460cbe81bcc 100644 --- a/tools/testing/selftests/arm64/Makefile +++ b/tools/testing/selftests/arm64/Makefile @@ -17,16 +17,7 @@ top_srcdir = $(realpath ../../../../) # Additional include paths needed by kselftest.h and local headers CFLAGS += -I$(top_srcdir)/tools/testing/selftests/ -# Guessing where the Kernel headers could have been installed -# depending on ENV config -ifeq ($(KBUILD_OUTPUT),) -khdr_dir = $(top_srcdir)/usr/include -else -# the KSFT preferred location when KBUILD_OUTPUT is set -khdr_dir = $(KBUILD_OUTPUT)/kselftest/usr/include -endif - -CFLAGS += -I$(khdr_dir) +CFLAGS += $(KHDR_INCLUDES) export CFLAGS export top_srcdir From 48e6f22e25a44e43952db5fbb767dea0c9319cb2 Mon Sep 17 00:00:00 2001 From: Michal Orzel Date: Tue, 26 Apr 2022 09:06:03 +0200 Subject: [PATCH 070/145] arm64: cputype: Avoid overflow using MIDR_IMPLEMENTOR_MASK Value of macro MIDR_IMPLEMENTOR_MASK exceeds the range of integer and can lead to overflow. Currently there is no issue as it is used in expressions implicitly casting it to u32. To avoid possible problems, fix the macro. Signed-off-by: Michal Orzel Link: https://lore.kernel.org/r/20220426070603.56031-1-michal.orzel@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cputype.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index ff8f4511df71..92331c07c2d1 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -36,7 +36,7 @@ #define MIDR_VARIANT(midr) \ (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT) #define MIDR_IMPLEMENTOR_SHIFT 24 -#define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT) +#define MIDR_IMPLEMENTOR_MASK (0xffU << MIDR_IMPLEMENTOR_SHIFT) #define MIDR_IMPLEMENTOR(midr) \ (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) From e6a6b34f97efe3ded077b31f4370b4c1206c9e56 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:22 +0100 Subject: [PATCH 071/145] arm64/sysreg: Introduce helpers for access to sysreg fields The macros we define for the bitfields within sysregs have very regular names, especially once we switch to automatic generation of those macros. Take advantage of this to define wrappers around FIELD_PREP() allowing us to simplify setting values in fields either numerically SYS_FIELD_PREP(SCTLR_EL1, TCF0, 0x0) or using the values of enumerations within the fields SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM) Suggested-by: Mark Rutland Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220503170233.507788-2-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index fbf5f8bb9055..8543a315c5ca 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1345,4 +1345,10 @@ #endif +#define SYS_FIELD_PREP(reg, field, val) \ + FIELD_PREP(reg##_##field##_MASK, val) + +#define SYS_FIELD_PREP_ENUM(reg, field, val) \ + FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val) + #endif /* __ASM_SYSREG_H */ From 96f101a9eab479dbfbdf7713ba966f9031c9c045 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:23 +0100 Subject: [PATCH 072/145] arm64/mte: Make TCF0 naming and field values more standard In preparation for automatic generation of SCTLR_EL1 register definitions make the macros used to define SCTLR_EL1.TCF0 and the enumeration values it has more standard so they can be used with FIELD_PREP() via the newly defined SYS_FIELD_PREP_ helpers. Since the field also exists in SCTLR_EL2 with the same values also rename the macros to SCTLR_ELx rather than SCTLR_EL1. There should be no functional change as a result of this patch. Signed-off-by: Mark Brown Acked-by: Mark Rutland --- arch/arm64/include/asm/sysreg.h | 8 ++++---- arch/arm64/kernel/mte.c | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 8543a315c5ca..6dc840be0268 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -678,10 +678,10 @@ #define SCTLR_EL1_ATA0 (BIT(42)) #define SCTLR_EL1_TCF0_SHIFT 38 -#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT) -#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT) -#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT) -#define SCTLR_EL1_TCF0_ASYMM (UL(0x3) << SCTLR_EL1_TCF0_SHIFT) +#define SCTLR_EL1_TCF0_NONE (UL(0x0)) +#define SCTLR_EL1_TCF0_SYNC (UL(0x1)) +#define SCTLR_EL1_TCF0_ASYNC (UL(0x2)) +#define SCTLR_EL1_TCF0_ASYMM (UL(0x3)) #define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT) #define SCTLR_EL1_BT1 (BIT(36)) diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 78b3e0f8e997..41469b69a48e 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -216,11 +216,11 @@ static void mte_update_sctlr_user(struct task_struct *task) * default order. */ if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM) - sctlr |= SCTLR_EL1_TCF0_ASYMM; + sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM); else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC) - sctlr |= SCTLR_EL1_TCF0_ASYNC; + sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC); else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC) - sctlr |= SCTLR_EL1_TCF0_SYNC; + sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC); task->thread.sctlr_user = sctlr; } From bc249e37b9334826de29111c5350c3e7a08a3969 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:24 +0100 Subject: [PATCH 073/145] arm64/mte: Make TCF field values and naming more standard In preparation for automatic generation of the defines for system registers make the values used for the enumeration in SCTLR_ELx.TCF suitable for use with the newly defined SYS_FIELD_PREP_ENUM helper, removing the shift from the define and using the helper to generate it on use instead. Since we only ever interact with this field in EL1 and in preparation for generation of the defines also rename from SCTLR_ELx to SCTLR_EL1. SCTLR_EL2 is not quite the same as SCTLR_EL1 so the conversion does not share the field definitions. There should be no functional change from this patch. Signed-off-by: Mark Brown Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220503170233.507788-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 14 +++++++------- arch/arm64/kernel/mte.c | 9 +++++---- arch/arm64/mm/fault.c | 3 ++- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6dc840be0268..732d84111d9f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -631,13 +631,6 @@ #define SCTLR_ELx_DSSBS (BIT(44)) #define SCTLR_ELx_ATA (BIT(43)) -#define SCTLR_ELx_TCF_SHIFT 40 -#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_ASYMM (UL(0x3) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT) - #define SCTLR_ELx_ENIA_SHIFT 31 #define SCTLR_ELx_ITFSB (BIT(37)) @@ -677,6 +670,13 @@ #define SCTLR_EL1_EPAN (BIT(57)) #define SCTLR_EL1_ATA0 (BIT(42)) +#define SCTLR_EL1_TCF_SHIFT 40 +#define SCTLR_EL1_TCF_NONE (UL(0x0)) +#define SCTLR_EL1_TCF_SYNC (UL(0x1)) +#define SCTLR_EL1_TCF_ASYNC (UL(0x2)) +#define SCTLR_EL1_TCF_ASYMM (UL(0x3)) +#define SCTLR_EL1_TCF_MASK (UL(0x3) << SCTLR_EL1_TCF_SHIFT) + #define SCTLR_EL1_TCF0_SHIFT 38 #define SCTLR_EL1_TCF0_NONE (UL(0x0)) #define SCTLR_EL1_TCF0_SYNC (UL(0x1)) diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 41469b69a48e..98f5e1e13c36 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -106,7 +106,8 @@ int memcmp_pages(struct page *page1, struct page *page2) static inline void __mte_enable_kernel(const char *mode, unsigned long tcf) { /* Enable MTE Sync Mode for EL1. */ - sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, + SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf)); isb(); pr_info_once("MTE: enabled in %s mode at EL1\n", mode); @@ -122,12 +123,12 @@ void mte_enable_kernel_sync(void) WARN_ONCE(system_uses_mte_async_or_asymm_mode(), "MTE async mode enabled system wide!"); - __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC); + __mte_enable_kernel("synchronous", SCTLR_EL1_TCF_SYNC); } void mte_enable_kernel_async(void) { - __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC); + __mte_enable_kernel("asynchronous", SCTLR_EL1_TCF_ASYNC); /* * MTE async mode is set system wide by the first PE that @@ -144,7 +145,7 @@ void mte_enable_kernel_async(void) void mte_enable_kernel_asymm(void) { if (cpus_have_cap(ARM64_MTE_ASYMM)) { - __mte_enable_kernel("asymmetric", SCTLR_ELx_TCF_ASYMM); + __mte_enable_kernel("asymmetric", SCTLR_EL1_TCF_ASYMM); /* * MTE asymm mode behaves as async mode for store diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 77341b160aca..5e280cc566ca 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -335,7 +335,8 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr, * It will be done lazily on the other CPUs when they will hit a * tag fault. */ - sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_NONE); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, + SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE)); isb(); } From e4e6a9d5593c18b637668ac70ea1bfa868b5c210 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:25 +0100 Subject: [PATCH 074/145] arm64/sysreg: Rename SCTLR_EL1_NTWE/TWI to SCTLR_EL1_nTWE/TWI We already use lower case in some defines in sysreg.h, for consistency with the architecture definition do so for SCTLR_EL1.nTWE and SCTLR_EL1.nTWI. No functional change. Signed-off-by: Mark Brown Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220503170233.507788-5-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 732d84111d9f..7e9de3c87cd9 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -689,8 +689,8 @@ #define SCTLR_EL1_UCI (BIT(26)) #define SCTLR_EL1_E0E (BIT(24)) #define SCTLR_EL1_SPAN (BIT(23)) -#define SCTLR_EL1_NTWE (BIT(18)) -#define SCTLR_EL1_NTWI (BIT(16)) +#define SCTLR_EL1_nTWE (BIT(18)) +#define SCTLR_EL1_nTWI (BIT(16)) #define SCTLR_EL1_UCT (BIT(15)) #define SCTLR_EL1_DZE (BIT(14)) #define SCTLR_EL1_UMA (BIT(9)) @@ -714,7 +714,7 @@ #define INIT_SCTLR_EL1_MMU_ON \ (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \ SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \ - SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ + SCTLR_EL1_nTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | SCTLR_EL1_RES1) /* MAIR_ELx memory attributes (used by Linux) */ From 56eb621b8ab6c1d36e9eb6e75367ec49ba482f1a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:26 +0100 Subject: [PATCH 075/145] arm64/sysreg: Define bits for previously RES1 fields in SCTLR_EL1 In older revisions of the architecture SCTLR_EL1 contained several RES1 fields but in DDI0487H.a these now all have assigned functions. In preparation for automatically generating sysreg.h provide explicit definitions for all these bits and use them in the INIT_SCTLR_EL1_ macros where _RES1 was previously used. There should be no functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220503170233.507788-6-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 53 ++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 7e9de3c87cd9..331e2521a81a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -633,19 +633,24 @@ #define SCTLR_ELx_ENIA_SHIFT 31 -#define SCTLR_ELx_ITFSB (BIT(37)) -#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT)) -#define SCTLR_ELx_ENIB (BIT(30)) -#define SCTLR_ELx_ENDA (BIT(27)) -#define SCTLR_ELx_EE (BIT(25)) -#define SCTLR_ELx_IESB (BIT(21)) -#define SCTLR_ELx_WXN (BIT(19)) -#define SCTLR_ELx_ENDB (BIT(13)) -#define SCTLR_ELx_I (BIT(12)) -#define SCTLR_ELx_SA (BIT(3)) -#define SCTLR_ELx_C (BIT(2)) -#define SCTLR_ELx_A (BIT(1)) -#define SCTLR_ELx_M (BIT(0)) +#define SCTLR_ELx_ITFSB (BIT(37)) +#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT)) +#define SCTLR_ELx_ENIB (BIT(30)) +#define SCTLR_ELx_LSMAOE (BIT(29)) +#define SCTLR_ELx_nTLSMD (BIT(28)) +#define SCTLR_ELx_ENDA (BIT(27)) +#define SCTLR_ELx_EE (BIT(25)) +#define SCTLR_ELx_EIS (BIT(22)) +#define SCTLR_ELx_IESB (BIT(21)) +#define SCTLR_ELx_TSCXT (BIT(20)) +#define SCTLR_ELx_WXN (BIT(19)) +#define SCTLR_ELx_ENDB (BIT(13)) +#define SCTLR_ELx_I (BIT(12)) +#define SCTLR_ELx_EOS (BIT(11)) +#define SCTLR_ELx_SA (BIT(3)) +#define SCTLR_ELx_C (BIT(2)) +#define SCTLR_ELx_A (BIT(1)) +#define SCTLR_ELx_M (BIT(0)) /* SCTLR_EL2 specific flags. */ #define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ @@ -686,22 +691,24 @@ #define SCTLR_EL1_BT1 (BIT(36)) #define SCTLR_EL1_BT0 (BIT(35)) +#define SCTLR_EL1_LSMAOE (BIT(29)) +#define SCTLR_EL1_nTLSMD (BIT(28)) #define SCTLR_EL1_UCI (BIT(26)) #define SCTLR_EL1_E0E (BIT(24)) #define SCTLR_EL1_SPAN (BIT(23)) +#define SCTLR_EL1_EIS (BIT(22)) +#define SCTLR_EL1_TSCXT (BIT(20)) #define SCTLR_EL1_nTWE (BIT(18)) #define SCTLR_EL1_nTWI (BIT(16)) #define SCTLR_EL1_UCT (BIT(15)) #define SCTLR_EL1_DZE (BIT(14)) +#define SCTLR_EL1_EOS (BIT(11)) #define SCTLR_EL1_UMA (BIT(9)) #define SCTLR_EL1_SED (BIT(8)) #define SCTLR_EL1_ITD (BIT(7)) #define SCTLR_EL1_CP15BEN (BIT(5)) #define SCTLR_EL1_SA0 (BIT(4)) -#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \ - (BIT(29))) - #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) #else @@ -709,13 +716,17 @@ #endif #define INIT_SCTLR_EL1_MMU_OFF \ - (ENDIAN_SET_EL1 | SCTLR_EL1_RES1) + (ENDIAN_SET_EL1 | SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | \ + SCTLR_EL1_EIS | SCTLR_EL1_TSCXT | SCTLR_EL1_EOS) #define INIT_SCTLR_EL1_MMU_ON \ - (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \ - SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \ - SCTLR_EL1_nTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ - ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | SCTLR_EL1_RES1) + (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | \ + SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I | \ + SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_nTWE | \ + SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ + ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | \ + SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | SCTLR_EL1_EIS | \ + SCTLR_EL1_TSCXT | SCTLR_EL1_EOS) /* MAIR_ELx memory attributes (used by Linux) */ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) From 6329eb543d991a120fba5de1362fa3df7b6b62e1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:27 +0100 Subject: [PATCH 076/145] arm64: Update name of ID_AA64ISAR0_EL1_ATOMIC to reflect ARM The architecture reference manual refers to the field in bits 23:20 of ID_AA64ISAR0_EL1 with the name "atomic" but the kernel defines for this bitfield use the name "atomics". Bring the two into sync to make it easier to cross reference with the specification. Signed-off-by: Mark Brown Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220503170233.507788-7-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 +- arch/arm64/kernel/cpufeature.c | 6 +++--- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 331e2521a81a..0bb259ec6ee8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -749,7 +749,7 @@ #define ID_AA64ISAR0_SM3_SHIFT 36 #define ID_AA64ISAR0_SHA3_SHIFT 32 #define ID_AA64ISAR0_RDM_SHIFT 28 -#define ID_AA64ISAR0_ATOMICS_SHIFT 20 +#define ID_AA64ISAR0_ATOMIC_SHIFT 20 #define ID_AA64ISAR0_CRC32_SHIFT 16 #define ID_AA64ISAR0_SHA2_SHIFT 12 #define ID_AA64ISAR0_SHA1_SHIFT 8 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d72c4b4d389c..18833fe6d148 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -200,7 +200,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMIC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0), @@ -2013,7 +2013,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR0_EL1, - .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT, + .field_pos = ID_AA64ISAR0_ATOMIC_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 2, @@ -2520,7 +2520,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMIC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3), diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 5ad626527d41..63a114b9b2ed 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -163,7 +163,7 @@ ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA1) | \ ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA2) | \ ARM64_FEATURE_MASK(ID_AA64ISAR0_CRC32) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMIC) | \ ARM64_FEATURE_MASK(ID_AA64ISAR0_RDM) | \ ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA3) | \ ARM64_FEATURE_MASK(ID_AA64ISAR0_SM3) | \ From 0eda2ec48907f0ec8c283306c98f28d13e43dafd Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:28 +0100 Subject: [PATCH 077/145] arm64/sysreg: Standardise ID_AA64ISAR0_EL1 macro names The macros for accessing fields in ID_AA64ISAR0_EL1 omit the _EL1 from the name of the register. In preparation for converting this register to be automatically generated update the names to include an _EL1, there should be no functional change. Signed-off-by: Mark Brown Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220503170233.507788-8-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/archrandom.h | 2 +- arch/arm64/include/asm/sysreg.h | 32 ++++----- arch/arm64/kernel/cpufeature.c | 70 +++++++++---------- .../arm64/kvm/hyp/include/nvhe/fixed_config.h | 28 ++++---- 4 files changed, 66 insertions(+), 66 deletions(-) diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index d1bb5e71df25..3a6b6d38c5b8 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -142,7 +142,7 @@ static inline bool __init __early_cpu_has_rndr(void) { /* Open code as we run prior to the first call to cpufeature. */ unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1); - return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf; + return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf; } static inline bool __init __must_check diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 0bb259ec6ee8..ae440b1ffb8e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -740,23 +740,23 @@ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) /* id_aa64isar0 */ -#define ID_AA64ISAR0_RNDR_SHIFT 60 -#define ID_AA64ISAR0_TLB_SHIFT 56 -#define ID_AA64ISAR0_TS_SHIFT 52 -#define ID_AA64ISAR0_FHM_SHIFT 48 -#define ID_AA64ISAR0_DP_SHIFT 44 -#define ID_AA64ISAR0_SM4_SHIFT 40 -#define ID_AA64ISAR0_SM3_SHIFT 36 -#define ID_AA64ISAR0_SHA3_SHIFT 32 -#define ID_AA64ISAR0_RDM_SHIFT 28 -#define ID_AA64ISAR0_ATOMIC_SHIFT 20 -#define ID_AA64ISAR0_CRC32_SHIFT 16 -#define ID_AA64ISAR0_SHA2_SHIFT 12 -#define ID_AA64ISAR0_SHA1_SHIFT 8 -#define ID_AA64ISAR0_AES_SHIFT 4 +#define ID_AA64ISAR0_EL1_RNDR_SHIFT 60 +#define ID_AA64ISAR0_EL1_TLB_SHIFT 56 +#define ID_AA64ISAR0_EL1_TS_SHIFT 52 +#define ID_AA64ISAR0_EL1_FHM_SHIFT 48 +#define ID_AA64ISAR0_EL1_DP_SHIFT 44 +#define ID_AA64ISAR0_EL1_SM4_SHIFT 40 +#define ID_AA64ISAR0_EL1_SM3_SHIFT 36 +#define ID_AA64ISAR0_EL1_SHA3_SHIFT 32 +#define ID_AA64ISAR0_EL1_RDM_SHIFT 28 +#define ID_AA64ISAR0_EL1_ATOMIC_SHIFT 20 +#define ID_AA64ISAR0_EL1_CRC32_SHIFT 16 +#define ID_AA64ISAR0_EL1_SHA2_SHIFT 12 +#define ID_AA64ISAR0_EL1_SHA1_SHIFT 8 +#define ID_AA64ISAR0_EL1_AES_SHIFT 4 -#define ID_AA64ISAR0_TLB_RANGE_NI 0x0 -#define ID_AA64ISAR0_TLB_RANGE 0x2 +#define ID_AA64ISAR0_EL1_TLB_RANGE_NI 0x0 +#define ID_AA64ISAR0_EL1_TLB_RANGE 0x2 /* id_aa64isar1 */ #define ID_AA64ISAR1_I8MM_SHIFT 52 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 18833fe6d148..01e7ae167f9f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -191,20 +191,20 @@ static bool __system_matches_cap(unsigned int n); * sync with the documentation of the CPU feature register ABI. */ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMIC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TLB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_AES_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2013,7 +2013,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR0_EL1, - .field_pos = ID_AA64ISAR0_ATOMIC_SHIFT, + .field_pos = ID_AA64ISAR0_EL1_ATOMIC_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 2, @@ -2195,10 +2195,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR0_EL1, - .field_pos = ID_AA64ISAR0_TLB_SHIFT, + .field_pos = ID_AA64ISAR0_EL1_TLB_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, - .min_field_value = ID_AA64ISAR0_TLB_RANGE, + .min_field_value = ID_AA64ISAR0_EL1_TLB_RANGE, }, #ifdef CONFIG_ARM64_HW_AFDBM { @@ -2227,7 +2227,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR0_EL1, - .field_pos = ID_AA64ISAR0_CRC32_SHIFT, + .field_pos = ID_AA64ISAR0_EL1_CRC32_SHIFT, .field_width = 4, .min_field_value = 1, }, @@ -2382,7 +2382,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR0_EL1, - .field_pos = ID_AA64ISAR0_RNDR_SHIFT, + .field_pos = ID_AA64ISAR0_EL1_RNDR_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 1, @@ -2514,22 +2514,22 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { #endif static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMIC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 63a114b9b2ed..fd55014b3497 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -159,20 +159,20 @@ * No restrictions on instructions implemented in AArch64. */ #define PVM_ID_AA64ISAR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64ISAR0_AES) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA1) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA2) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_CRC32) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMIC) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_RDM) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA3) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_SM3) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_SM4) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_DP) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_FHM) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_TS) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_TLB) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR0_RNDR) \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_AES) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA1) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA2) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_CRC32) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RDM) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA3) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM3) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM4) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_DP) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_FHM) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TS) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TLB) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RNDR) \ ) #define PVM_ID_AA64ISAR1_ALLOW (\ From 66847e0618d7dcaf34d9626e2b1f699fc05a2fef Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 3 May 2022 18:02:29 +0100 Subject: [PATCH 078/145] arm64: Add sysreg header generation scripting The arm64 kernel requires some metadata for each system register it may need to access. Currently we have: * A SYS_ definition which sorresponds to a sys_reg() macro. This is used both to look up a sysreg by encoding (e.g. in KVM), and also to generate code to access a sysreg where the assembler is unaware of the specific sysreg encoding. Where assemblers support the S3__C_C_ syntax for system registers, we could use this rather than manually assembling the instructions. However, we don't have consistent definitions for these and we currently still need to handle toolchains that lack this feature. * A set of __SHIFT and __MASK definitions, which can be used to extract fields from the register, or to construct a register from a set of fields. These do not follow the convention used by , and the masks are not shifted into place, preventing their use in FIELD_PREP() and FIELD_GET(). We require the SHIFT definitions for inline assembly (and WIDTH definitions would be helpful for UBFX/SBFX), so we cannot only define a shifted MASK. Defining a SHIFT, WIDTH, shifted MASK and unshifted MASK is tedious and error-prone and life is much easier when they can be relied up to exist when writing code. * A set of __ definitions for each enumerated value a field may hold. These are used when identifying the presence of features. Atop of this, other code has to build up metadata at runtime (e.g. the sets of RES0/RES1 bits in a register). This patch adds scripting so that we can have an easier-to-manage canonical representation of this metadata, from which we can generate all the definitions necessary for various use-cases, e.g. | #define REG_ID_AA64ISAR0_EL1 S3_0_C0_C6_0 | #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) | #define SYS_ID_AA64ISAR0_EL1_Op0 3 | #define SYS_ID_AA64ISAR0_EL1_Op1 0 | #define SYS_ID_AA64ISAR0_EL1_CRn 0 | #define SYS_ID_AA64ISAR0_EL1_CRm 6 | #define SYS_ID_AA64ISAR0_EL1_Op2 0 | #define ID_AA64ISAR0_EL1_RNDR GENMASK(63, 60) | #define ID_AA64ISAR0_EL1_RNDR_MASK GENMASK(63, 60) | #define ID_AA64ISAR0_EL1_RNDR_SHIFT 60 | #define ID_AA64ISAR0_EL1_RNDR_WIDTH 4 | #define ID_AA64ISAR0_EL1_RNDR_NI UL(0b0000) | #define ID_AA64ISAR0_EL1_RNDR_IMP UL(0b0001) The script requires that all bits in the register be specified and that there be no overlapping fields. This helps the script spot errors in the input but means that the few registers which change layout at runtime depending on things like virtualisation settings will need some manual handling. No actual register conversions are done here but a header for the register data with some documention of the format is provided. For cases where multiple registers share a layout (eg, when identical controls are provided at multiple ELs) the register fields can be defined once and referenced from the actual registers, currently we do not generate actual defines for the individual registers. At the moment this is only intended to express metadata from the architecture, and does not handle policy imposed by the kernel, such as values exposed to userspace or VMs. In future this could be extended to express such information. This script was mostly written by Mark Rutland but has been extended by Mark Brown to improve validation of input and better integrate with the kernel. Signed-off-by: Mark Rutland Co-Developed-by: Mark Brown Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220503170233.507788-9-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/gen-sysreg.awk | 261 ++++++++++++++++++++++++++++++++ arch/arm64/tools/sysreg | 48 ++++++ 2 files changed, 309 insertions(+) create mode 100755 arch/arm64/tools/gen-sysreg.awk create mode 100644 arch/arm64/tools/sysreg diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk new file mode 100755 index 000000000000..3ffd77cbb499 --- /dev/null +++ b/arch/arm64/tools/gen-sysreg.awk @@ -0,0 +1,261 @@ +#!/bin/awk -f +# SPDX-License-Identifier: GPL-2.0 +# gen-sysreg.awk: arm64 sysreg header generator +# +# Usage: awk -f gen-sysreg.awk sysregs.txt + +# Log an error and terminate +function fatal(msg) { + print "Error at " NR ": " msg > "/dev/stderr" + exit 1 +} + +# Sanity check that the start or end of a block makes sense at this point in +# the file. If not, produce an error and terminate. +# +# @this - the $Block or $EndBlock +# @prev - the only valid block to already be in (value of @block) +# @new - the new value of @block +function change_block(this, prev, new) { + if (block != prev) + fatal("unexpected " this " (inside " block ")") + + block = new +} + +# Sanity check the number of records for a field makes sense. If not, produce +# an error and terminate. +function expect_fields(nf) { + if (NF != nf) + fatal(NF " fields found where " nf " expected") +} + +# Print a CPP macro definition, padded with spaces so that the macro bodies +# line up in a column +function define(name, val) { + printf "%-48s%s\n", "#define " name, val +} + +# Print standard BITMASK/SHIFT/WIDTH CPP definitions for a field +function define_field(reg, field, msb, lsb) { + define(reg "_" field, "GENMASK(" msb ", " lsb ")") + define(reg "_" field "_MASK", "GENMASK(" msb ", " lsb ")") + define(reg "_" field "_SHIFT", lsb) + define(reg "_" field "_WIDTH", msb - lsb + 1) +} + +# Parse a "[:]" string into the global variables @msb and @lsb +function parse_bitdef(reg, field, bitdef, _bits) +{ + if (bitdef ~ /^[0-9]+$/) { + msb = bitdef + lsb = bitdef + } else if (split(bitdef, _bits, ":") == 2) { + msb = _bits[1] + lsb = _bits[2] + } else { + fatal("invalid bit-range definition '" bitdef "'") + } + + + if (msb != next_bit) + fatal(reg "." field " starts at " msb " not " next_bit) + if (63 < msb || msb < 0) + fatal(reg "." field " invalid high bit in '" bitdef "'") + if (63 < lsb || lsb < 0) + fatal(reg "." field " invalid low bit in '" bitdef "'") + if (msb < lsb) + fatal(reg "." field " invalid bit-range '" bitdef "'") + if (low > high) + fatal(reg "." field " has invalid range " high "-" low) + + next_bit = lsb - 1 +} + +BEGIN { + print "#ifndef __ASM_SYSREG_DEFS_H" + print "#define __ASM_SYSREG_DEFS_H" + print "" + print "/* Generated file - do not edit */" + + block = "None" +} + +END { + print "#endif /* __ASM_SYSREG_DEFS_H */" +} + +# skip blank lines and comment lines +/^$/ { next } +/^#/ { next } + +/^SysregFields/ { + change_block("SysregFields", "None", "SysregFields") + expect_fields(2) + + reg = $2 + + res0 = "UL(0)" + res1 = "UL(0)" + + print "" + + next_bit = 63 + + next +} + +/^EndSysregFields/ { + if (next_bit > 0) + fatal("Unspecified bits in " reg) + + change_block("EndSysregFields", "SysregFields", "None") + + define(reg "_RES0", "(" res0 ")") + define(reg "_RES1", "(" res1 ")") + print "" + + reg = null + res0 = null + res1 = null + + next +} + +/^Sysreg/ { + change_block("Sysreg", "None", "Sysreg") + expect_fields(7) + + reg = $2 + op0 = $3 + op1 = $4 + crn = $5 + crm = $6 + op2 = $7 + + res0 = "UL(0)" + res1 = "UL(0)" + + define("REG_" reg, "S" op0 "_" op1 "_C" crn "_C" crm "_" op2) + define("SYS_" reg, "sys_reg(" op0 ", " op1 ", " crn ", " crm ", " op2 ")") + + define("SYS_" reg "_Op0", op0) + define("SYS_" reg "_Op1", op1) + define("SYS_" reg "_CRn", crn) + define("SYS_" reg "_CRm", crm) + define("SYS_" reg "_Op2", op2) + + print "" + + next_bit = 63 + + next +} + +/^EndSysreg/ { + if (next_bit > 0) + fatal("Unspecified bits in " reg) + + change_block("EndSysreg", "Sysreg", "None") + + if (res0 != null) + define(reg "_RES0", "(" res0 ")") + if (res1 != null) + define(reg "_RES1", "(" res1 ")") + print "" + + reg = null + op0 = null + op1 = null + crn = null + crm = null + op2 = null + res0 = null + res1 = null + + next +} + +# Currently this is effectivey a comment, in future we may want to emit +# defines for the fields. +/^Fields/ && (block == "Sysreg") { + expect_fields(2) + + if (next_bit != 63) + fatal("Some fields already defined for " reg) + + print "/* See " $2 " */" + print "" + + next_bit = 0 + res0 = null + res1 = null + + next +} + + +/^Res0/ && (block == "Sysreg" || block == "SysregFields") { + expect_fields(2) + parse_bitdef(reg, "RES0", $2) + field = "RES0_" msb "_" lsb + + res0 = res0 " | GENMASK_ULL(" msb ", " lsb ")" + + next +} + +/^Res1/ && (block == "Sysreg" || block == "SysregFields") { + expect_fields(2) + parse_bitdef(reg, "RES1", $2) + field = "RES1_" msb "_" lsb + + res1 = res1 " | GENMASK_ULL(" msb ", " lsb ")" + + next +} + +/^Field/ && (block == "Sysreg" || block == "SysregFields") { + expect_fields(3) + field = $3 + parse_bitdef(reg, field, $2) + + define_field(reg, field, msb, lsb) + print "" + + next +} + +/^Enum/ { + change_block("Enum", "Sysreg", "Enum") + expect_fields(3) + field = $3 + parse_bitdef(reg, field, $2) + + define_field(reg, field, msb, lsb) + + next +} + +/^EndEnum/ { + change_block("EndEnum", "Enum", "Sysreg") + field = null + msb = null + lsb = null + print "" + next +} + +/0b[01]+/ && block = "Enum" { + expect_fields(2) + val = $1 + name = $2 + + define(reg "_" field "_" name, "UL(" val ")") + next +} + +# Any lines not handled by previous rules are unexpected +{ + fatal("unhandled statement") +} diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg new file mode 100644 index 000000000000..8e39c718c1b8 --- /dev/null +++ b/arch/arm64/tools/sysreg @@ -0,0 +1,48 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# System register metadata + +# Each System register is described by a Sysreg block: + +# Sysreg +# +# ... +# EndSysreg + +# Within a Sysreg block, each field can be described as one of: + +# Res0 [:] + +# Res1 [:] + +# Field [:] + +# Enum [:] +# +# ... +# EndEnum + +# Alternatively if multiple registers share the same layout then +# a SysregFields block can be used to describe the shared layout + +# SysregFields +# +# ... +# EndSysregFields + +# and referenced from within the Sysreg: + +# Sysreg +# Fields +# EndSysreg + +# For ID registers we adopt a few conventions for translating the +# language in the ARM into defines: +# +# NI - Not implemented +# IMP - Implemented +# +# In general it is recommended that new enumeration items be named for the +# feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration +# item ACCDATA) though it may be more taseful to do something else. + From c07d8017bceb82cbe5fedd129d072c59a53f5513 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:30 +0100 Subject: [PATCH 079/145] arm64/sysreg: Enable automatic generation of system register definitions Now that we have a script for generating system registers hook it up to the build system similarly to cpucaps. Since we don't currently have any actual register information in the input file this should produce no change in the built kernel. For ease of review the register information will be converted in separate patches. Signed-off-by: Mark Brown Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220503170233.507788-10-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/Kbuild | 1 + arch/arm64/include/asm/sysreg.h | 8 ++++++++ arch/arm64/tools/Makefile | 8 +++++++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 345fe98605ba..5c8ee5a541d2 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += parport.h generic-y += user.h generated-y += cpucaps.h +generated-y += sysreg-defs.h diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index ae440b1ffb8e..db07a01776d8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -114,6 +114,14 @@ #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) +/* + * Automatically generated definitions for system registers, the + * manual encodings below are in the process of being converted to + * come from here. The header relies on the definition of sys_reg() + * earlier in this file. + */ +#include "asm/sysreg-defs.h" + /* * System registers, organised loosely by encoding but grouped together * where the architected name contains an index. e.g. ID_MMFR_EL1. diff --git a/arch/arm64/tools/Makefile b/arch/arm64/tools/Makefile index cf1307188150..07a93ab21a62 100644 --- a/arch/arm64/tools/Makefile +++ b/arch/arm64/tools/Makefile @@ -3,7 +3,7 @@ gen := arch/$(ARCH)/include/generated kapi := $(gen)/asm -kapi-hdrs-y := $(kapi)/cpucaps.h +kapi-hdrs-y := $(kapi)/cpucaps.h $(kapi)/sysreg-defs.h targets += $(addprefix ../../../, $(kapi-hdrs-y)) @@ -14,5 +14,11 @@ kapi: $(kapi-hdrs-y) quiet_cmd_gen_cpucaps = GEN $@ cmd_gen_cpucaps = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@ +quiet_cmd_gen_sysreg = GEN $@ + cmd_gen_sysreg = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@ + $(kapi)/cpucaps.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE $(call if_changed,gen_cpucaps) + +$(kapi)/sysreg-defs.h: $(src)/gen-sysreg.awk $(src)/sysreg FORCE + $(call if_changed,gen_sysreg) From e33bb6461cd6ca0e0dad8392f0e3ee0179871e7a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:31 +0100 Subject: [PATCH 080/145] arm64/sysreg: Generate definitions for ID_AA64ISAR0_EL1 Remove the manual definitions for ID_AA64ISAR0_EL1 in favour of automatic generation. There should be no functional change. The only notable change is that 27:24 TME is defined rather than RES0 reflecting DDI0487H.a. Signed-off-by: Mark Brown Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220503170233.507788-11-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 20 ---------- arch/arm64/tools/sysreg | 67 +++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index db07a01776d8..f5e02f27a5c9 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -196,7 +196,6 @@ #define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) #define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) -#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) #define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2) @@ -747,25 +746,6 @@ /* Position the attr at the correct index */ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) -/* id_aa64isar0 */ -#define ID_AA64ISAR0_EL1_RNDR_SHIFT 60 -#define ID_AA64ISAR0_EL1_TLB_SHIFT 56 -#define ID_AA64ISAR0_EL1_TS_SHIFT 52 -#define ID_AA64ISAR0_EL1_FHM_SHIFT 48 -#define ID_AA64ISAR0_EL1_DP_SHIFT 44 -#define ID_AA64ISAR0_EL1_SM4_SHIFT 40 -#define ID_AA64ISAR0_EL1_SM3_SHIFT 36 -#define ID_AA64ISAR0_EL1_SHA3_SHIFT 32 -#define ID_AA64ISAR0_EL1_RDM_SHIFT 28 -#define ID_AA64ISAR0_EL1_ATOMIC_SHIFT 20 -#define ID_AA64ISAR0_EL1_CRC32_SHIFT 16 -#define ID_AA64ISAR0_EL1_SHA2_SHIFT 12 -#define ID_AA64ISAR0_EL1_SHA1_SHIFT 8 -#define ID_AA64ISAR0_EL1_AES_SHIFT 4 - -#define ID_AA64ISAR0_EL1_TLB_RANGE_NI 0x0 -#define ID_AA64ISAR0_EL1_TLB_RANGE 0x2 - /* id_aa64isar1 */ #define ID_AA64ISAR1_I8MM_SHIFT 52 #define ID_AA64ISAR1_DGH_SHIFT 48 diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 8e39c718c1b8..4d8991574437 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -46,3 +46,70 @@ # feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration # item ACCDATA) though it may be more taseful to do something else. +Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0 +Enum 63:60 RNDR + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 59:56 TLB + 0b0000 NI + 0b0001 OS + 0b0010 RANGE +EndEnum +Enum 55:52 TS + 0b0000 NI + 0b0001 FLAGM + 0b0010 FLAGM2 +EndEnum +Enum 51:48 FHM + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 47:44 DP + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 43:40 SM4 + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 39:36 SM3 + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 35:32 SHA3 + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 31:28 RDM + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 27:24 TME + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 23:20 ATOMIC + 0b0000 NI + 0b0010 IMP +EndEnum +Enum 19:16 CRC32 + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 15:12 SHA2 + 0b0000 NI + 0b0001 SHA256 + 0b0010 SHA512 +EndEnum +Enum 11:8 SHA1 + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 7:4 AES + 0b0000 NI + 0b0001 AES + 0b0010 PMULL +EndEnum +Res0 3:0 +EndSysreg From 41fde735062d8dbf7ebf27b278ac567eaf8d9255 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:32 +0100 Subject: [PATCH 081/145] arm64/sysreg: Generate definitions for TTBRn_EL1 Automatically generate definitions for accessing the TTBRn_EL1 registers, no functional change. Signed-off-by: Mark Brown Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220503170233.507788-12-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 -- arch/arm64/tools/sysreg | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f5e02f27a5c9..c61bda1db2c8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -212,8 +212,6 @@ #define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) #define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) -#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) -#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1) #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) #define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 4d8991574437..e77354847a64 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -113,3 +113,17 @@ Enum 7:4 AES EndEnum Res0 3:0 EndSysreg + +SysregFields TTBRx_EL1 +Field 63:48 ASID +Field 47:1 BADDR +Field 0 CnP +EndSysregFields + +Sysreg TTBR0_EL1 3 0 2 0 0 +Fields TTBRx_EL1 +EndSysreg + +Sysreg TTBR1_EL1 3 0 2 0 1 +Fields TTBRx_EL1 +EndSysreg From 5028fbad2d57910e8c776ba1c868da0e4f64978f Mon Sep 17 00:00:00 2001 From: Hector Martin Date: Mon, 2 May 2022 18:14:27 +0900 Subject: [PATCH 082/145] arm64: Set ARCH_NR_GPIO to 2048 for ARCH_APPLE We're already running into the 512 GPIO limit on t600[01] depending on how many SMC GPIOs we allocate, and a 2-die version could double that. Let's make it 2K to be safe for now. Signed-off-by: Hector Martin Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20220502091427.28416-1-marcan@marcan.st Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 57c4c995965f..764433588fdd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2041,6 +2041,18 @@ config STACKPROTECTOR_PER_TASK def_bool y depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_SYSREG +# The GPIO number here must be sorted by descending number. In case of +# a multiplatform kernel, we just want the highest value required by the +# selected platforms. +config ARCH_NR_GPIO + int + default 2048 if ARCH_APPLE + default 0 + help + Maximum number of GPIOs in the system. + + If unsure, leave the default value. + endmenu menu "Boot options" From 7a41a97b65ea7c4e0458b11e7a2c71c6dd3be0c4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 3 May 2022 18:02:33 +0100 Subject: [PATCH 083/145] arm64/sysreg: Generate definitions for SCTLR_EL1 Automatically generate register definitions for SCTLR_EL1. No functional change. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220503170233.507788-13-broonie@kernel.org [catalin.marinas@arm.com: fix the SCTLR_EL1 encoding] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 38 ------------------ arch/arm64/tools/sysreg | 71 +++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 38 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c61bda1db2c8..a6b8b0deadfb 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -203,7 +203,6 @@ #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) #define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2) -#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) #define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2) #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) @@ -677,43 +676,6 @@ (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) /* SCTLR_EL1 specific flags. */ -#define SCTLR_EL1_EPAN (BIT(57)) -#define SCTLR_EL1_ATA0 (BIT(42)) - -#define SCTLR_EL1_TCF_SHIFT 40 -#define SCTLR_EL1_TCF_NONE (UL(0x0)) -#define SCTLR_EL1_TCF_SYNC (UL(0x1)) -#define SCTLR_EL1_TCF_ASYNC (UL(0x2)) -#define SCTLR_EL1_TCF_ASYMM (UL(0x3)) -#define SCTLR_EL1_TCF_MASK (UL(0x3) << SCTLR_EL1_TCF_SHIFT) - -#define SCTLR_EL1_TCF0_SHIFT 38 -#define SCTLR_EL1_TCF0_NONE (UL(0x0)) -#define SCTLR_EL1_TCF0_SYNC (UL(0x1)) -#define SCTLR_EL1_TCF0_ASYNC (UL(0x2)) -#define SCTLR_EL1_TCF0_ASYMM (UL(0x3)) -#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT) - -#define SCTLR_EL1_BT1 (BIT(36)) -#define SCTLR_EL1_BT0 (BIT(35)) -#define SCTLR_EL1_LSMAOE (BIT(29)) -#define SCTLR_EL1_nTLSMD (BIT(28)) -#define SCTLR_EL1_UCI (BIT(26)) -#define SCTLR_EL1_E0E (BIT(24)) -#define SCTLR_EL1_SPAN (BIT(23)) -#define SCTLR_EL1_EIS (BIT(22)) -#define SCTLR_EL1_TSCXT (BIT(20)) -#define SCTLR_EL1_nTWE (BIT(18)) -#define SCTLR_EL1_nTWI (BIT(16)) -#define SCTLR_EL1_UCT (BIT(15)) -#define SCTLR_EL1_DZE (BIT(14)) -#define SCTLR_EL1_EOS (BIT(11)) -#define SCTLR_EL1_UMA (BIT(9)) -#define SCTLR_EL1_SED (BIT(8)) -#define SCTLR_EL1_ITD (BIT(7)) -#define SCTLR_EL1_CP15BEN (BIT(5)) -#define SCTLR_EL1_SA0 (BIT(4)) - #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) #else diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index e77354847a64..c5619629bf9c 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -114,6 +114,77 @@ EndEnum Res0 3:0 EndSysreg +Sysreg SCTLR_EL1 3 0 1 0 0 +Field 63 TIDCP +Field 62 SPINMASK +Field 61 NMI +Field 60 EnTP2 +Res0 59:58 +Field 57 EPAN +Field 56 EnALS +Field 55 EnAS0 +Field 54 EnASR +Field 53 TME +Field 52 TME0 +Field 51 TMT +Field 50 TMT0 +Field 49:46 TWEDEL +Field 45 TWEDEn +Field 44 DSSBS +Field 43 ATA +Field 42 ATA0 +Enum 41:40 TCF + 0b00 NONE + 0b01 SYNC + 0b10 ASYNC + 0b11 ASYMM +EndEnum +Enum 39:38 TCF0 + 0b00 NONE + 0b01 SYNC + 0b10 ASYNC + 0b11 ASYMM +EndEnum +Field 37 ITFSB +Field 36 BT1 +Field 35 BT0 +Res0 34 +Field 33 MSCEn +Field 32 CMOW +Field 31 EnIA +Field 30 EnIB +Field 29 LSMAOE +Field 28 nTLSMD +Field 27 EnDA +Field 26 UCI +Field 25 EE +Field 24 E0E +Field 23 SPAN +Field 22 EIS +Field 21 IESB +Field 20 TSCXT +Field 19 WXN +Field 18 nTWE +Res0 17 +Field 16 nTWI +Field 15 UCT +Field 14 DZE +Field 13 EnDB +Field 12 I +Field 11 EOS +Field 10 EnRCTX +Field 9 UMA +Field 8 SED +Field 7 ITD +Field 6 nAA +Field 5 CP15BEN +Field 4 SA0 +Field 3 SA +Field 2 C +Field 1 A +Field 0 M +EndSysreg + SysregFields TTBRx_EL1 Field 63:48 ASID Field 47:1 BADDR From 921d161f15d6b090599f6a8c23f131969edbd1fa Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Wed, 20 Apr 2022 03:04:13 +0000 Subject: [PATCH 084/145] arm64: fix types in copy_highpage() In copy_highpage() the `kto` and `kfrom` local variables are pointers to struct page, but these are used to hold arbitrary pointers to kernel memory . Each call to page_address() returns a void pointer to memory associated with the relevant page, and copy_page() expects void pointers to this memory. This inconsistency was introduced in commit 2563776b41c3 ("arm64: mte: Tags-aware copy_{user_,}highpage() implementations") and while this doesn't appear to be harmful in practice it is clearly wrong. Correct this by making `kto` and `kfrom` void pointers. Fixes: 2563776b41c3 ("arm64: mte: Tags-aware copy_{user_,}highpage() implementations") Signed-off-by: Tong Tiangen Acked-by: Mark Rutland Reviewed-by: Kefeng Wang Link: https://lore.kernel.org/r/20220420030418.3189040-3-tongtiangen@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/copypage.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index b5447e53cd73..0dea80bf6de4 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -16,8 +16,8 @@ void copy_highpage(struct page *to, struct page *from) { - struct page *kto = page_address(to); - struct page *kfrom = page_address(from); + void *kto = page_address(to); + void *kfrom = page_address(from); copy_page(kto, kfrom); From f41ef4c2ee99d255c82d8ac6f720f28116340869 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 11 Apr 2022 17:24:55 +0800 Subject: [PATCH 085/145] arm64: mm: Cleanup useless parameters in zone_sizes_init() Directly use max_pfn for max and no one use min, kill them. Reviewed-by: Vijay Balakrishna Signed-off-by: Kefeng Wang Link: https://lore.kernel.org/r/20220411092455.1461-4-wangkefeng.wang@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e7b1550e2fc..fb07e94242bb 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -157,7 +157,7 @@ static phys_addr_t __init max_zone_phys(unsigned int zone_bits) return min(zone_mask, memblock_end_of_DRAM() - 1) + 1; } -static void __init zone_sizes_init(unsigned long min, unsigned long max) +static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; unsigned int __maybe_unused acpi_zone_dma_bits; @@ -176,7 +176,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) if (!arm64_dma_phys_limit) arm64_dma_phys_limit = dma32_phys_limit; #endif - max_zone_pfns[ZONE_NORMAL] = max; + max_zone_pfns[ZONE_NORMAL] = max_pfn; free_area_init(max_zone_pfns); } @@ -374,7 +374,7 @@ void __init bootmem_init(void) * done after the fixed reservations */ sparse_init(); - zone_sizes_init(min, max); + zone_sizes_init(); /* * Reserve the CMA area after arm64_dma_phys_limit was initialised. From c7a9dcea8e98377734821d41cd37bb96add9ca1c Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 22 Mar 2022 15:01:47 -0700 Subject: [PATCH 086/145] perf: RISC-V: Remove non-kernel-doc ** comments This will presumably trip up some tools that try to parse the comments as kernel doc when they're not. Reported-by: kernel test robot Fixes: 4905ec2fb7e6 ("RISC-V: Add sscofpmf extension support") Signed-off-by: Palmer Dabbelt -- These recently landed in for-next, but I'm trying to avoid rewriting history as there's a lot in flight right now. Reviewed-by: Atish Patra Acked-by: Randy Dunlap Link: https://lore.kernel.org/r/20220322220147.11407-1-palmer@rivosinc.com Signed-off-by: Will Deacon --- drivers/perf/riscv_pmu_sbi.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index a1317a483512..dca3537a8dcc 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -35,7 +35,7 @@ union sbi_pmu_ctr_info { }; }; -/** +/* * RISC-V doesn't have hetergenous harts yet. This need to be part of * per_cpu in case of harts with different pmu counters */ @@ -477,7 +477,7 @@ static int pmu_sbi_get_ctrinfo(int nctr) static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu) { - /** + /* * No need to check the error because we are disabling all the counters * which may include counters that are not enabled yet. */ @@ -494,7 +494,7 @@ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu) cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0); } -/** +/* * This function starts all the used counters in two step approach. * Any counter that did not overflow can be start in a single step * while the overflowed counters need to be started with updated initialization @@ -563,7 +563,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) /* Overflow status register should only be read after counter are stopped */ overflow = csr_read(CSR_SSCOUNTOVF); - /** + /* * Overflow interrupt pending bit should only be cleared after stopping * all the counters to avoid any race condition. */ From 4b5b7129095b265ba3bae8a6553d22a41e078b96 Mon Sep 17 00:00:00 2001 From: Ren Yu Date: Mon, 25 Apr 2022 18:04:36 +0800 Subject: [PATCH 087/145] perf: check return value of armpmu_request_irq() When the function armpmu_request_irq() failed, goto err Signed-off-by: Ren Yu Link: https://lore.kernel.org/r/20220425100436.4881-1-renyu@nfschina.com Signed-off-by: Will Deacon --- drivers/perf/arm_pmu_acpi.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index f5c7a845cd7b..96ffadd654ff 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -159,7 +159,9 @@ static int arm_pmu_acpi_parse_irqs(void) * them with their PMUs. */ per_cpu(pmu_irqs, cpu) = irq; - armpmu_request_irq(irq, cpu); + err = armpmu_request_irq(irq, cpu); + if (err) + goto out_err; } return 0; From 2b60a22b70fa77db73f91fd895c6509f70b3e276 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 18 Apr 2022 23:57:38 +0100 Subject: [PATCH 088/145] dt-bindings: perf: arm-cmn: Add CMN-650 and CMN-700 If you were to guess from the product names that CMN-650 and CMN-700 are the next two evolutionary steps of Arm's enterprise-level interconnect following on from CMN-600, you'd be pleasantly correct. Add them to the DT binding. CC: devicetree@vger.kernel.org Signed-off-by: Robin Murphy Acked-by: Rob Herring Tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/9b4dc0c82c91adff62b6f92eec5f61fb25b9db87.1650320598.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- Documentation/devicetree/bindings/perf/arm,cmn.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/perf/arm,cmn.yaml b/Documentation/devicetree/bindings/perf/arm,cmn.yaml index 2d4219ec7eda..2e51072e794a 100644 --- a/Documentation/devicetree/bindings/perf/arm,cmn.yaml +++ b/Documentation/devicetree/bindings/perf/arm,cmn.yaml @@ -14,6 +14,8 @@ properties: compatible: enum: - arm,cmn-600 + - arm,cmn-650 + - arm,cmn-700 - arm,ci-700 reg: From 8e504d93acb647c0db31ba13ba11b510bbab4174 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 18 Apr 2022 23:57:39 +0100 Subject: [PATCH 089/145] perf/arm-cmn: Add CMN-650 support Add the identifiers and events for CMN-650, which slots into its evolutionary position between CMN-600 and the 700-series products. Imagine CMN-600 made bigger, and with most of the rough edges smoothed off, but that then balanced out by some bonkers PMU functionality for the new HN-P enhancement in CMN-650r2. Most of the CXG events are actually common to newer revisions of CMN-600 too, so they're arguably a little late; oh well. Signed-off-by: Robin Murphy Tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/b0adc5824db53f71a2b561c293e2120390106536.1650320598.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- drivers/perf/arm-cmn.c | 222 ++++++++++++++++++++++++++++++++--------- 1 file changed, 176 insertions(+), 46 deletions(-) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 9c1d82be7a2f..cce8516d465c 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -39,7 +39,7 @@ #define CMN_CHILD_NODE_ADDR GENMASK(27, 0) #define CMN_CHILD_NODE_EXTERNAL BIT(31) -#define CMN_MAX_DIMENSION 8 +#define CMN_MAX_DIMENSION 12 #define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION) #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4) @@ -65,7 +65,9 @@ /* For most nodes, this is all there is */ #define CMN_PMU_EVENT_SEL 0x000 -#define CMN_PMU_EVENTn_ID_SHIFT(n) ((n) * 8) + +/* HN-Ps are weird... */ +#define CMN_HNP_PMU_EVENT_SEL 0x008 /* DTMs live in the PMU space of XP registers */ #define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18) @@ -177,9 +179,12 @@ enum cmn_model { - CMN_ANY = -1, CMN600 = 1, - CI700 = 2, + CMN650 = 2, + CI700 = 8, + /* ...and then we can use bitmap tricks for commonality */ + CMN_ANY = -1, + NOT_CMN600 = -2, }; /* CMN-600 r0px shouldn't exist in silicon, thankfully */ @@ -191,6 +196,11 @@ enum cmn_revision { CMN600_R2P0, CMN600_R3P0, CMN600_R3P1, + CMN650_R0P0 = 0, + CMN650_R1P0, + CMN650_R1P1, + CMN650_R2P0, + CMN650_R1P2, CI700_R0P0 = 0, CI700_R1P0, CI700_R2P0, @@ -211,6 +221,7 @@ enum cmn_node_type { CMN_TYPE_RND = 0xd, CMN_TYPE_RNSAM = 0xf, CMN_TYPE_MTSX, + CMN_TYPE_HNP, CMN_TYPE_CXRA = 0x100, CMN_TYPE_CXHA = 0x101, CMN_TYPE_CXLA = 0x102, @@ -307,9 +318,7 @@ struct arm_cmn_nodeid { static int arm_cmn_xyidbits(const struct arm_cmn *cmn) { - int dim = max(cmn->mesh_x, cmn->mesh_y); - - return dim > 4 ? 3 : 2; + return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2); } static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id) @@ -362,6 +371,7 @@ static struct dentry *arm_cmn_debugfs; static const char *arm_cmn_device_type(u8 type) { switch(type) { + case 0x00: return " |"; case 0x01: return " RN-I |"; case 0x02: return " RN-D |"; case 0x04: return " RN-F_B |"; @@ -371,6 +381,7 @@ static const char *arm_cmn_device_type(u8 type) case 0x08: return " HN-T |"; case 0x09: return " HN-I |"; case 0x0a: return " HN-D |"; + case 0x0b: return " HN-P |"; case 0x0c: return " SN-F |"; case 0x0d: return " SBSX |"; case 0x0e: return " HN-F |"; @@ -383,8 +394,10 @@ static const char *arm_cmn_device_type(u8 type) case 0x15: return "RN-F_D_E|"; case 0x16: return " RN-F_C |"; case 0x17: return "RN-F_C_E|"; + case 0x18: return " RN-F_E |"; + case 0x19: return "RN-F_E_E|"; case 0x1c: return " MTSX |"; - default: return " |"; + default: return " ???? |"; } } @@ -492,7 +505,7 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {} struct arm_cmn_hw_event { struct arm_cmn_node *dn; - u64 dtm_idx[2]; + u64 dtm_idx[4]; unsigned int dtc_idx; u8 dtcs_used; u8 num_dns; @@ -545,8 +558,7 @@ static bool arm_cmn_is_occup_event(enum cmn_model model, enum cmn_node_type type, unsigned int id) { if (type == CMN_TYPE_DVM) - return (model == CMN600 && id == 0x05) || - (model == CI700 && id == 0x0c); + return model == CMN600 ? id == 0x05 : id == 0x0c; return type == CMN_TYPE_HNF && id == 0x0f; } @@ -580,20 +592,25 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev)); struct arm_cmn_event_attr *eattr; + enum cmn_node_type type; + u16 eventid; eattr = container_of(attr, typeof(*eattr), attr.attr); if (!(eattr->model & cmn->model)) return 0; + type = eattr->type; + eventid = eattr->eventid; + /* Watchpoints aren't nodes, so avoid confusion */ - if (eattr->type == CMN_TYPE_WP) + if (type == CMN_TYPE_WP) return attr->mode; /* Hide XP events for unused interfaces/channels */ - if (eattr->type == CMN_TYPE_XP) { - unsigned int intf = (eattr->eventid >> 2) & 7; - unsigned int chan = eattr->eventid >> 5; + if (type == CMN_TYPE_XP) { + unsigned int intf = (eventid >> 2) & 7; + unsigned int chan = eventid >> 5; if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3))) return 0; @@ -607,12 +624,29 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, } /* Revision-specific differences */ - if (cmn->model == CMN600 && cmn->rev < CMN600_R1P2) { - if (eattr->type == CMN_TYPE_HNF && eattr->eventid == 0x1b) - return 0; + if (cmn->model == CMN600) { + if (cmn->rev < CMN600_R1P3) { + if (type == CMN_TYPE_CXRA && eventid > 0x10) + return 0; + } + if (cmn->rev < CMN600_R1P2) { + if (type == CMN_TYPE_HNF && eventid == 0x1b) + return 0; + if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA) + return 0; + } + } else if (cmn->model == CMN650) { + if (cmn->rev < CMN650_R2P0 || cmn->rev == CMN650_R1P2) { + if (type == CMN_TYPE_HNF && eventid > 0x22) + return 0; + if (type == CMN_TYPE_SBSX && eventid == 0x17) + return 0; + if (type == CMN_TYPE_RNI && eventid > 0x10) + return 0; + } } - if (!arm_cmn_node(cmn, eattr->type)) + if (!arm_cmn_node(cmn, type)) return 0; return attr->mode; @@ -626,6 +660,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup) #define CMN_EVENT_HNI(_name, _event) \ CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event, 0) +#define CMN_EVENT_HNP(_name, _event) \ + CMN_EVENT_ATTR(CMN_ANY, hnp_##_name, CMN_TYPE_HNP, _event, 0) #define __CMN_EVENT_XP(_name, _event) \ CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event, 0) #define CMN_EVENT_SBSX(_model, _name, _event) \ @@ -634,6 +670,10 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event, 0) #define CMN_EVENT_MTSX(_name, _event) \ CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event, 0) +#define CMN_EVENT_CXRA(_model, _name, _event) \ + CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event, 0) +#define CMN_EVENT_CXHA(_name, _event) \ + CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event, 0) #define CMN_EVENT_DVM(_model, _name, _event) \ _CMN_EVENT_DVM(_model, _name, _event, 0) @@ -675,20 +715,20 @@ static struct attribute *arm_cmn_event_attrs[] = { _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_all, 0x05, 0), _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmop, 0x05, 1), _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmsync, 0x05, 2), - CMN_EVENT_DVM(CI700, dvmop_tlbi, 0x01), - CMN_EVENT_DVM(CI700, dvmop_bpi, 0x02), - CMN_EVENT_DVM(CI700, dvmop_pici, 0x03), - CMN_EVENT_DVM(CI700, dvmop_vici, 0x04), - CMN_EVENT_DVM(CI700, dvmsync, 0x05), - CMN_EVENT_DVM(CI700, vmid_filtered, 0x06), - CMN_EVENT_DVM(CI700, rndop_filtered, 0x07), - CMN_EVENT_DVM(CI700, retry, 0x08), - CMN_EVENT_DVM(CI700, txsnp_flitv, 0x09), - CMN_EVENT_DVM(CI700, txsnp_stall, 0x0a), - CMN_EVENT_DVM(CI700, trkfull, 0x0b), - _CMN_EVENT_DVM(CI700, trk_occupancy_all, 0x0c, 0), - _CMN_EVENT_DVM(CI700, trk_occupancy_dvmop, 0x0c, 1), - _CMN_EVENT_DVM(CI700, trk_occupancy_dvmsync, 0x0c, 2), + CMN_EVENT_DVM(NOT_CMN600, dvmop_tlbi, 0x01), + CMN_EVENT_DVM(NOT_CMN600, dvmop_bpi, 0x02), + CMN_EVENT_DVM(NOT_CMN600, dvmop_pici, 0x03), + CMN_EVENT_DVM(NOT_CMN600, dvmop_vici, 0x04), + CMN_EVENT_DVM(NOT_CMN600, dvmsync, 0x05), + CMN_EVENT_DVM(NOT_CMN600, vmid_filtered, 0x06), + CMN_EVENT_DVM(NOT_CMN600, rndop_filtered, 0x07), + CMN_EVENT_DVM(NOT_CMN600, retry, 0x08), + CMN_EVENT_DVM(NOT_CMN600, txsnp_flitv, 0x09), + CMN_EVENT_DVM(NOT_CMN600, txsnp_stall, 0x0a), + CMN_EVENT_DVM(NOT_CMN600, trkfull, 0x0b), + _CMN_EVENT_DVM(NOT_CMN600, trk_occupancy_all, 0x0c, 0), + _CMN_EVENT_DVM(NOT_CMN600, trk_occupancy_dvmop, 0x0c, 1), + _CMN_EVENT_DVM(NOT_CMN600, trk_occupancy_dvmsync, 0x0c, 2), CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01), CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02), @@ -725,9 +765,12 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_HNF(CMN_ANY, stash_snp_sent, 0x1d), CMN_EVENT_HNF(CMN_ANY, stash_data_pull, 0x1e), CMN_EVENT_HNF(CMN_ANY, snp_fwded, 0x1f), - CMN_EVENT_HNF(CI700, atomic_fwd, 0x20), - CMN_EVENT_HNF(CI700, mpam_hardlim, 0x21), - CMN_EVENT_HNF(CI700, mpam_softlim, 0x22), + CMN_EVENT_HNF(NOT_CMN600, atomic_fwd, 0x20), + CMN_EVENT_HNF(NOT_CMN600, mpam_hardlim, 0x21), + CMN_EVENT_HNF(NOT_CMN600, mpam_softlim, 0x22), + CMN_EVENT_HNF(CMN650, snp_sent_cluster, 0x23), + CMN_EVENT_HNF(CMN650, sf_imprecise_evict, 0x24), + CMN_EVENT_HNF(CMN650, sf_evict_shared_line, 0x25), CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20), CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21), @@ -749,6 +792,27 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_HNI(nonpcie_serialization, 0x31), CMN_EVENT_HNI(pcie_serialization, 0x32), + /* + * HN-P events squat on top of the HN-I similarly to DVM events, except + * for being crammed into the same physical node as well. And of course + * where would the fun be if the same events were in the same order... + */ + CMN_EVENT_HNP(rrt_wr_occ_cnt_ovfl, 0x01), + CMN_EVENT_HNP(rdt_wr_occ_cnt_ovfl, 0x02), + CMN_EVENT_HNP(wdb_occ_cnt_ovfl, 0x03), + CMN_EVENT_HNP(rrt_wr_alloc, 0x04), + CMN_EVENT_HNP(rdt_wr_alloc, 0x05), + CMN_EVENT_HNP(wdb_alloc, 0x06), + CMN_EVENT_HNP(awvalid_no_awready, 0x07), + CMN_EVENT_HNP(awready_no_awvalid, 0x08), + CMN_EVENT_HNP(wvalid_no_wready, 0x09), + CMN_EVENT_HNP(rrt_rd_occ_cnt_ovfl, 0x11), + CMN_EVENT_HNP(rdt_rd_occ_cnt_ovfl, 0x12), + CMN_EVENT_HNP(rrt_rd_alloc, 0x13), + CMN_EVENT_HNP(rdt_rd_alloc, 0x14), + CMN_EVENT_HNP(arvalid_no_arready, 0x15), + CMN_EVENT_HNP(arready_no_arvalid, 0x16), + CMN_EVENT_XP(txflit_valid, 0x01), CMN_EVENT_XP(txflit_stall, 0x02), CMN_EVENT_XP(partial_dat_flit, 0x03), @@ -768,7 +832,7 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl, 0x14), CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15), CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16), - CMN_EVENT_SBSX(CI700, rdb_occ_cnt_ovfl, 0x17), + CMN_EVENT_SBSX(NOT_CMN600, rdb_occ_cnt_ovfl, 0x17), CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21), CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22), CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready, 0x23), @@ -795,12 +859,12 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_RNID(CMN600, rdb_replay, 0x12), CMN_EVENT_RNID(CMN600, rdb_hybrid, 0x13), CMN_EVENT_RNID(CMN600, rdb_ord, 0x14), - CMN_EVENT_RNID(CI700, padb_occ_ovfl, 0x11), - CMN_EVENT_RNID(CI700, rpdb_occ_ovfl, 0x12), - CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice1, 0x13), - CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice2, 0x14), - CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice3, 0x15), - CMN_EVENT_RNID(CI700, wrt_throttled, 0x16), + CMN_EVENT_RNID(NOT_CMN600, padb_occ_ovfl, 0x11), + CMN_EVENT_RNID(NOT_CMN600, rpdb_occ_ovfl, 0x12), + CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice1, 0x13), + CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice2, 0x14), + CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice3, 0x15), + CMN_EVENT_RNID(NOT_CMN600, wrt_throttled, 0x16), CMN_EVENT_MTSX(tc_lookup, 0x01), CMN_EVENT_MTSX(tc_fill, 0x02), @@ -815,6 +879,42 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_MTSX(tcq_occ_cnt_ovfl, 0x0b), CMN_EVENT_MTSX(tdb_occ_cnt_ovfl, 0x0c), + CMN_EVENT_CXRA(CMN_ANY, rht_occ, 0x01), + CMN_EVENT_CXRA(CMN_ANY, sht_occ, 0x02), + CMN_EVENT_CXRA(CMN_ANY, rdb_occ, 0x03), + CMN_EVENT_CXRA(CMN_ANY, wdb_occ, 0x04), + CMN_EVENT_CXRA(CMN_ANY, ssb_occ, 0x05), + CMN_EVENT_CXRA(CMN_ANY, snp_bcasts, 0x06), + CMN_EVENT_CXRA(CMN_ANY, req_chains, 0x07), + CMN_EVENT_CXRA(CMN_ANY, req_chain_avglen, 0x08), + CMN_EVENT_CXRA(CMN_ANY, chirsp_stalls, 0x09), + CMN_EVENT_CXRA(CMN_ANY, chidat_stalls, 0x0a), + CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link0, 0x0b), + CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link1, 0x0c), + CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link2, 0x0d), + CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link0, 0x0e), + CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link1, 0x0f), + CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link2, 0x10), + CMN_EVENT_CXRA(CMN_ANY, external_chirsp_stalls, 0x11), + CMN_EVENT_CXRA(CMN_ANY, external_chidat_stalls, 0x12), + CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link0, 0x13), + CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link1, 0x14), + CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link2, 0x15), + + CMN_EVENT_CXHA(rddatbyp, 0x21), + CMN_EVENT_CXHA(chirsp_up_stall, 0x22), + CMN_EVENT_CXHA(chidat_up_stall, 0x23), + CMN_EVENT_CXHA(snppcrd_link0_stall, 0x24), + CMN_EVENT_CXHA(snppcrd_link1_stall, 0x25), + CMN_EVENT_CXHA(snppcrd_link2_stall, 0x26), + CMN_EVENT_CXHA(reqtrk_occ, 0x27), + CMN_EVENT_CXHA(rdb_occ, 0x28), + CMN_EVENT_CXHA(rdbyp_occ, 0x29), + CMN_EVENT_CXHA(wdb_occ, 0x2a), + CMN_EVENT_CXHA(snptrk_occ, 0x2b), + CMN_EVENT_CXHA(sdb_occ, 0x2c), + CMN_EVENT_CXHA(snphaz_occ, 0x2d), + NULL }; @@ -1652,6 +1752,16 @@ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_c node->type, node->logid, offset); } +static enum cmn_node_type arm_cmn_subtype(enum cmn_node_type type) +{ + switch (type) { + case CMN_TYPE_HNP: + return CMN_TYPE_HNI; + default: + return CMN_TYPE_INVALID; + } +} + static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) { void __iomem *cfg_region; @@ -1692,8 +1802,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg); } - /* Cheeky +1 to help terminate pointer-based iteration later */ - dn = devm_kcalloc(cmn->dev, cmn->num_dns + 1, sizeof(*dn), GFP_KERNEL); + /* + * Some nodes effectively have two separate types, which we'll handle + * by creating one of each internally. For a (very) safe initial upper + * bound, account for double the number of non-XP nodes. + */ + dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps, + sizeof(*dn), GFP_KERNEL); if (!dn) return -ENOMEM; @@ -1802,6 +1917,18 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) case CMN_TYPE_RNSAM: case CMN_TYPE_CXLA: break; + /* + * Split "optimised" combination nodes into separate + * types for the different event sets. Offsetting the + * base address lets us handle the second pmu_event_sel + * register via the normal mechanism later. + */ + case CMN_TYPE_HNP: + dn[1] = dn[0]; + dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL; + dn[1].type = arm_cmn_subtype(dn->type); + dn += 2; + break; /* Something has gone horribly wrong */ default: dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type); @@ -1810,9 +1937,10 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) } } - /* Correct for any nodes we skipped */ + /* Correct for any nodes we added or skipped */ cmn->num_dns = dn - cmn->dns; + /* Cheeky +1 to help terminate pointer-based iteration later */ sz = (void *)(dn + 1) - (void *)cmn->dns; dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL); if (dn) @@ -1970,6 +2098,7 @@ static int arm_cmn_remove(struct platform_device *pdev) #ifdef CONFIG_OF static const struct of_device_id arm_cmn_of_match[] = { { .compatible = "arm,cmn-600", .data = (void *)CMN600 }, + { .compatible = "arm,cmn-650", .data = (void *)CMN650 }, { .compatible = "arm,ci-700", .data = (void *)CI700 }, {} }; @@ -1979,6 +2108,7 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id arm_cmn_acpi_match[] = { { "ARMHC600", CMN600 }, + { "ARMHC650", CMN650 }, {} }; MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match); From 65adf71398f5af9a591dc1b7eccac123f992d97a Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 18 Apr 2022 23:57:40 +0100 Subject: [PATCH 090/145] perf/arm-cmn: Refactor occupancy filter selector So far, DNs and HN-Fs have each had one event ralated to occupancy trackers which are filtered by a separate field. CMN-700 raises the stakes by introducing two more sets of HN-F events with corresponding additional filter fields. Prepare for this by refactoring our filter selection and tracking logic to account for multiple filter types coexisting on the same node. This need not affect the uAPI, which can just continue to encode any per-event filter setting in the "occupid" config field, even if it's technically not the most accurate name for some of them. Signed-off-by: Robin Murphy Tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/1aa47ba0455b144c416537f6b0e58dc93b467a00.1650320598.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- drivers/perf/arm-cmn.c | 170 ++++++++++++++++++++++++----------------- 1 file changed, 98 insertions(+), 72 deletions(-) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index cce8516d465c..f9788224df54 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -65,6 +65,8 @@ /* For most nodes, this is all there is */ #define CMN_PMU_EVENT_SEL 0x000 +/* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */ +#define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32) /* HN-Ps are weird... */ #define CMN_HNP_PMU_EVENT_SEL 0x008 @@ -229,6 +231,12 @@ enum cmn_node_type { CMN_TYPE_WP = 0x7770 }; +enum cmn_filter_select { + SEL_NONE = -1, + SEL_OCCUP1ID, + SEL_MAX +}; + struct arm_cmn_node { void __iomem *pmu_base; u16 id, logid; @@ -238,9 +246,9 @@ struct arm_cmn_node { union { /* DN/HN-F/CXHA */ struct { - u8 occupid_val; - u8 occupid_count; - }; + u8 val : 4; + u8 count : 4; + } occupid[SEL_MAX]; /* XP */ u8 dtc; }; @@ -510,6 +518,7 @@ struct arm_cmn_hw_event { u8 dtcs_used; u8 num_dns; u8 dtm_offset; + enum cmn_filter_select filter_sel; }; #define for_each_hw_dn(hw, dn, i) \ @@ -535,6 +544,7 @@ struct arm_cmn_event_attr { struct device_attribute attr; enum cmn_model model; enum cmn_node_type type; + enum cmn_filter_select fsel; u8 eventid; u8 occupid; }; @@ -545,22 +555,17 @@ struct arm_cmn_format_attr { int config; }; -#define CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid) \ +#define _CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid, _fsel)\ (&((struct arm_cmn_event_attr[]) {{ \ .attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \ .model = _model, \ .type = _type, \ .eventid = _eventid, \ .occupid = _occupid, \ + .fsel = _fsel, \ }})[0].attr.attr) - -static bool arm_cmn_is_occup_event(enum cmn_model model, - enum cmn_node_type type, unsigned int id) -{ - if (type == CMN_TYPE_DVM) - return model == CMN600 ? id == 0x05 : id == 0x0c; - return type == CMN_TYPE_HNF && id == 0x0f; -} +#define CMN_EVENT_ATTR(_model, _name, _type, _eventid) \ + _CMN_EVENT_ATTR(_model, _name, _type, _eventid, 0, SEL_NONE) static ssize_t arm_cmn_event_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -577,7 +582,7 @@ static ssize_t arm_cmn_event_show(struct device *dev, "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n", eattr->type, eattr->eventid); - if (arm_cmn_is_occup_event(eattr->model, eattr->type, eattr->eventid)) + if (eattr->fsel > SEL_NONE) return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n", eattr->type, eattr->eventid, eattr->occupid); @@ -652,33 +657,37 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, return attr->mode; } -#define _CMN_EVENT_DVM(_model, _name, _event, _occup) \ - CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup) +#define _CMN_EVENT_DVM(_model, _name, _event, _occup, _fsel) \ + _CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup, _fsel) #define CMN_EVENT_DTC(_name) \ - CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0, 0) -#define _CMN_EVENT_HNF(_model, _name, _event, _occup) \ - CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup) + CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0) +#define _CMN_EVENT_HNF(_model, _name, _event, _occup, _fsel) \ + _CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup, _fsel) #define CMN_EVENT_HNI(_name, _event) \ - CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event, 0) + CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event) #define CMN_EVENT_HNP(_name, _event) \ - CMN_EVENT_ATTR(CMN_ANY, hnp_##_name, CMN_TYPE_HNP, _event, 0) + CMN_EVENT_ATTR(CMN_ANY, hnp_##_name, CMN_TYPE_HNP, _event) #define __CMN_EVENT_XP(_name, _event) \ - CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event, 0) + CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event) #define CMN_EVENT_SBSX(_model, _name, _event) \ - CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event, 0) + CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event) #define CMN_EVENT_RNID(_model, _name, _event) \ - CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event, 0) + CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event) #define CMN_EVENT_MTSX(_name, _event) \ - CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event, 0) + CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event) #define CMN_EVENT_CXRA(_model, _name, _event) \ - CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event, 0) + CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event) #define CMN_EVENT_CXHA(_name, _event) \ - CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event, 0) + CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event) #define CMN_EVENT_DVM(_model, _name, _event) \ - _CMN_EVENT_DVM(_model, _name, _event, 0) + _CMN_EVENT_DVM(_model, _name, _event, 0, SEL_NONE) +#define CMN_EVENT_DVM_OCC(_model, _name, _event) \ + _CMN_EVENT_DVM(_model, _name##_all, _event, 0, SEL_OCCUP1ID), \ + _CMN_EVENT_DVM(_model, _name##_dvmop, _event, 1, SEL_OCCUP1ID), \ + _CMN_EVENT_DVM(_model, _name##_dvmsync, _event, 2, SEL_OCCUP1ID) #define CMN_EVENT_HNF(_model, _name, _event) \ - _CMN_EVENT_HNF(_model, _name, _event, 0) + _CMN_EVENT_HNF(_model, _name, _event, 0, SEL_NONE) #define _CMN_EVENT_XP(_name, _event) \ __CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \ __CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \ @@ -712,9 +721,7 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_DVM(CMN600, rxreq_dvmsync, 0x02), CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03), CMN_EVENT_DVM(CMN600, rxreq_retried, 0x04), - _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_all, 0x05, 0), - _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmop, 0x05, 1), - _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmsync, 0x05, 2), + CMN_EVENT_DVM_OCC(CMN600, rxreq_trk_occupancy, 0x05), CMN_EVENT_DVM(NOT_CMN600, dvmop_tlbi, 0x01), CMN_EVENT_DVM(NOT_CMN600, dvmop_bpi, 0x02), CMN_EVENT_DVM(NOT_CMN600, dvmop_pici, 0x03), @@ -726,9 +733,7 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_DVM(NOT_CMN600, txsnp_flitv, 0x09), CMN_EVENT_DVM(NOT_CMN600, txsnp_stall, 0x0a), CMN_EVENT_DVM(NOT_CMN600, trkfull, 0x0b), - _CMN_EVENT_DVM(NOT_CMN600, trk_occupancy_all, 0x0c, 0), - _CMN_EVENT_DVM(NOT_CMN600, trk_occupancy_dvmop, 0x0c, 1), - _CMN_EVENT_DVM(NOT_CMN600, trk_occupancy_dvmsync, 0x0c, 2), + CMN_EVENT_DVM_OCC(NOT_CMN600, trk_occupancy, 0x0c), CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01), CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02), @@ -744,11 +749,11 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_HNF(CMN_ANY, mc_retries, 0x0c), CMN_EVENT_HNF(CMN_ANY, mc_reqs, 0x0d), CMN_EVENT_HNF(CMN_ANY, qos_hh_retry, 0x0e), - _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0), - _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1), - _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2), - _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3), - _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4), + _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0, SEL_OCCUP1ID), + _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1, SEL_OCCUP1ID), + _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2, SEL_OCCUP1ID), + _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3, SEL_OCCUP1ID), + _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4, SEL_OCCUP1ID), CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz, 0x10), CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11), CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full, 0x12), @@ -817,8 +822,8 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_XP(txflit_stall, 0x02), CMN_EVENT_XP(partial_dat_flit, 0x03), /* We treat watchpoints as a special made-up class of XP events */ - CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP, 0), - CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN, 0), + CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP), + CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN), CMN_EVENT_SBSX(CMN_ANY, rd_req, 0x01), CMN_EVENT_SBSX(CMN_ANY, wr_req, 0x02), @@ -1132,6 +1137,26 @@ static void arm_cmn_event_read(struct perf_event *event) local64_add(delta, &event->count); } +static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn, + enum cmn_filter_select fsel, u8 occupid) +{ + u64 reg; + + if (fsel == SEL_NONE) + return 0; + + if (!dn->occupid[fsel].count) { + dn->occupid[fsel].val = occupid; + reg = FIELD_PREP(CMN__PMU_OCCUP1_ID, + dn->occupid[SEL_OCCUP1ID].val); + writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4); + } else if (dn->occupid[fsel].val != occupid) { + return -EBUSY; + } + dn->occupid[fsel].count++; + return 0; +} + static void arm_cmn_event_start(struct perf_event *event, int flags) { struct arm_cmn *cmn = to_cmn(event->pmu); @@ -1195,7 +1220,7 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags) struct arm_cmn_val { u8 dtm_count[CMN_MAX_DTMS]; - u8 occupid[CMN_MAX_DTMS]; + u8 occupid[CMN_MAX_DTMS][SEL_MAX]; u8 wp[CMN_MAX_DTMS][4]; int dtc_count; bool cycles; @@ -1208,7 +1233,6 @@ static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val, struct arm_cmn_node *dn; enum cmn_node_type type; int i; - u8 occupid; if (is_software_event(event)) return; @@ -1220,16 +1244,14 @@ static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val, } val->dtc_count++; - if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) - occupid = CMN_EVENT_OCCUPID(event) + 1; - else - occupid = 0; for_each_hw_dn(hw, dn, i) { - int wp_idx, dtm = dn->dtm; + int wp_idx, dtm = dn->dtm, sel = hw->filter_sel; val->dtm_count[dtm]++; - val->occupid[dtm] = occupid; + + if (sel > SEL_NONE) + val->occupid[dtm][sel] = CMN_EVENT_OCCUPID(event) + 1; if (type != CMN_TYPE_WP) continue; @@ -1247,7 +1269,6 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event) enum cmn_node_type type; struct arm_cmn_val *val; int i, ret = -EINVAL; - u8 occupid; if (leader == event) return 0; @@ -1272,18 +1293,14 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event) if (val->dtc_count == CMN_DT_NUM_COUNTERS) goto done; - if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) - occupid = CMN_EVENT_OCCUPID(event) + 1; - else - occupid = 0; - for_each_hw_dn(hw, dn, i) { - int wp_idx, wp_cmb, dtm = dn->dtm; + int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel; if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS) goto done; - if (occupid && val->occupid[dtm] && occupid != val->occupid[dtm]) + if (sel > SEL_NONE && val->occupid[dtm][sel] && + val->occupid[dtm][sel] != CMN_EVENT_OCCUPID(event) + 1) goto done; if (type != CMN_TYPE_WP) @@ -1304,6 +1321,22 @@ done: return ret; } +static enum cmn_filter_select arm_cmn_filter_sel(enum cmn_model model, + enum cmn_node_type type, + unsigned int eventid) +{ + struct arm_cmn_event_attr *e; + int i; + + for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs); i++) { + e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr); + if (e->model & model && e->type == type && e->eventid == eventid) + return e->fsel; + } + return SEL_NONE; +} + + static int arm_cmn_event_init(struct perf_event *event) { struct arm_cmn *cmn = to_cmn(event->pmu); @@ -1328,11 +1361,11 @@ static int arm_cmn_event_init(struct perf_event *event) if (type == CMN_TYPE_DTC) return 0; + eventid = CMN_EVENT_EVENTID(event); /* For watchpoints we need the actual XP node here */ if (type == CMN_TYPE_WP) { type = CMN_TYPE_XP; /* ...and we need a "real" direction */ - eventid = CMN_EVENT_EVENTID(event); if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN) return -EINVAL; /* ...but the DTM may depend on which port we're watching */ @@ -1340,6 +1373,9 @@ static int arm_cmn_event_init(struct perf_event *event) hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2; } + /* This is sufficiently annoying to recalculate, so cache it */ + hw->filter_sel = arm_cmn_filter_sel(cmn->model, type, eventid); + bynodeid = CMN_EVENT_BYNODEID(event); nodeid = CMN_EVENT_NODEID(event); @@ -1381,8 +1417,8 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, if (type == CMN_TYPE_WP) dtm->wp_event[arm_cmn_wp_idx(event)] = -1; - if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) - hw->dn[i].occupid_count--; + if (hw->filter_sel > SEL_NONE) + hw->dn[i].occupid[hw->filter_sel].count--; dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx); writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG); @@ -1462,18 +1498,8 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx + (nid.port << 4) + (nid.dev << 2); - if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) { - u8 occupid = CMN_EVENT_OCCUPID(event); - - if (dn->occupid_count == 0) { - dn->occupid_val = occupid; - writel_relaxed(occupid, - dn->pmu_base + CMN_PMU_EVENT_SEL + 4); - } else if (dn->occupid_val != occupid) { - goto free_dtms; - } - dn->occupid_count++; - } + if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event))) + goto free_dtms; } arm_cmn_set_index(hw->dtm_idx, i, dtm_idx); From 23760a0144173ef398522fbcc1dbe79521b5caf9 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 18 Apr 2022 23:57:41 +0100 Subject: [PATCH 091/145] perf/arm-cmn: Add CMN-700 support Add the identifiers, events, and subtleties for CMN-700. Highlights include yet more options for doubling up CHI channels, which finally grows event IDs beyond 8 bits for XPs, and a new set of CML gateway nodes adding support for CXL as well as CCIX, where the Link Agent is now internal to the CMN mesh so we gain regular PMU events for that too. Signed-off-by: Robin Murphy Tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/cf892baa0d0258ea6cd6544b15171be0069a083a.1650320598.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- drivers/perf/arm-cmn.c | 236 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 220 insertions(+), 16 deletions(-) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index f9788224df54..62f3842d1a47 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -52,6 +52,10 @@ #define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52) #define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50) +#define CMN_CFGM_INFO_GLOBAL_1 0x908 +#define CMN_INFO_SNP_VC_NUM GENMASK_ULL(3, 2) +#define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0) + /* XPs also have some local topology info which has uses too */ #define CMN_MXP__CONNECT_INFO_P0 0x0008 #define CMN_MXP__CONNECT_INFO_P1 0x0010 @@ -65,6 +69,8 @@ /* For most nodes, this is all there is */ #define CMN_PMU_EVENT_SEL 0x000 +#define CMN__PMU_CBUSY_SNTHROTTLE_SEL GENMASK_ULL(44, 42) +#define CMN__PMU_CLASS_OCCUP_ID GENMASK_ULL(36, 35) /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */ #define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32) @@ -74,7 +80,8 @@ /* DTMs live in the PMU space of XP registers */ #define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18) #define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00) -#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17) +#define CMN_DTM_WPn_CONFIG_WP_CHN_NUM GENMASK_ULL(20, 19) +#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18, 17) #define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(9) #define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8) #define CMN600_WPn_CONFIG_WP_COMBINE BIT(6) @@ -147,8 +154,8 @@ /* Event attributes */ #define CMN_CONFIG_TYPE GENMASK_ULL(15, 0) -#define CMN_CONFIG_EVENTID GENMASK_ULL(23, 16) -#define CMN_CONFIG_OCCUPID GENMASK_ULL(27, 24) +#define CMN_CONFIG_EVENTID GENMASK_ULL(26, 16) +#define CMN_CONFIG_OCCUPID GENMASK_ULL(30, 27) #define CMN_CONFIG_BYNODEID BIT_ULL(31) #define CMN_CONFIG_NODEID GENMASK_ULL(47, 32) @@ -183,10 +190,12 @@ enum cmn_model { CMN600 = 1, CMN650 = 2, + CMN700 = 4, CI700 = 8, /* ...and then we can use bitmap tricks for commonality */ CMN_ANY = -1, NOT_CMN600 = -2, + CMN_650ON = CMN650 | CMN700, }; /* CMN-600 r0px shouldn't exist in silicon, thankfully */ @@ -203,6 +212,9 @@ enum cmn_revision { CMN650_R1P1, CMN650_R2P0, CMN650_R1P2, + CMN700_R0P0 = 0, + CMN700_R1P0, + CMN700_R2P0, CI700_R0P0 = 0, CI700_R1P0, CI700_R2P0, @@ -225,8 +237,12 @@ enum cmn_node_type { CMN_TYPE_MTSX, CMN_TYPE_HNP, CMN_TYPE_CXRA = 0x100, - CMN_TYPE_CXHA = 0x101, - CMN_TYPE_CXLA = 0x102, + CMN_TYPE_CXHA, + CMN_TYPE_CXLA, + CMN_TYPE_CCRA, + CMN_TYPE_CCHA, + CMN_TYPE_CCLA, + CMN_TYPE_CCLA_RNI, /* Not a real node type */ CMN_TYPE_WP = 0x7770 }; @@ -234,6 +250,8 @@ enum cmn_node_type { enum cmn_filter_select { SEL_NONE = -1, SEL_OCCUP1ID, + SEL_CLASS_OCCUP_ID, + SEL_CBUSY_SNTHROTTLE_SEL, SEL_MAX }; @@ -255,6 +273,8 @@ struct arm_cmn_node { union { u8 event[4]; __le32 event_sel; + u16 event_w[4]; + __le64 event_sel_w; }; }; @@ -297,6 +317,8 @@ struct arm_cmn { struct { unsigned int rsp_vc_num : 2; unsigned int dat_vc_num : 2; + unsigned int snp_vc_num : 2; + unsigned int req_vc_num : 2; }; struct arm_cmn_node *xps; @@ -405,6 +427,8 @@ static const char *arm_cmn_device_type(u8 type) case 0x18: return " RN-F_E |"; case 0x19: return "RN-F_E_E|"; case 0x1c: return " MTSX |"; + case 0x1d: return " HN-V |"; + case 0x1e: return " CCG |"; default: return " ???? |"; } } @@ -518,6 +542,7 @@ struct arm_cmn_hw_event { u8 dtcs_used; u8 num_dns; u8 dtm_offset; + bool wide_sel; enum cmn_filter_select filter_sel; }; @@ -545,7 +570,7 @@ struct arm_cmn_event_attr { enum cmn_model model; enum cmn_node_type type; enum cmn_filter_select fsel; - u8 eventid; + u16 eventid; u8 occupid; }; @@ -624,7 +649,9 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, return 0; if ((chan == 5 && cmn->rsp_vc_num < 2) || - (chan == 6 && cmn->dat_vc_num < 2)) + (chan == 6 && cmn->dat_vc_num < 2) || + (chan == 7 && cmn->snp_vc_num < 2) || + (chan == 8 && cmn->req_vc_num < 2)) return 0; } @@ -649,6 +676,19 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, if (type == CMN_TYPE_RNI && eventid > 0x10) return 0; } + } else if (cmn->model == CMN700) { + if (cmn->rev < CMN700_R2P0) { + if (type == CMN_TYPE_HNF && eventid > 0x2c) + return 0; + if (type == CMN_TYPE_CCHA && eventid > 0x74) + return 0; + if (type == CMN_TYPE_CCLA && eventid > 0x27) + return 0; + } + if (cmn->rev < CMN700_R1P0) { + if (type == CMN_TYPE_HNF && eventid > 0x2b) + return 0; + } } if (!arm_cmn_node(cmn, type)) @@ -679,6 +719,14 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event) #define CMN_EVENT_CXHA(_name, _event) \ CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event) +#define CMN_EVENT_CCRA(_name, _event) \ + CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event) +#define CMN_EVENT_CCHA(_name, _event) \ + CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event) +#define CMN_EVENT_CCLA(_name, _event) \ + CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event) +#define CMN_EVENT_CCLA_RNI(_name, _event) \ + CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event) #define CMN_EVENT_DVM(_model, _name, _event) \ _CMN_EVENT_DVM(_model, _name, _event, 0, SEL_NONE) @@ -688,6 +736,20 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, _CMN_EVENT_DVM(_model, _name##_dvmsync, _event, 2, SEL_OCCUP1ID) #define CMN_EVENT_HNF(_model, _name, _event) \ _CMN_EVENT_HNF(_model, _name, _event, 0, SEL_NONE) +#define CMN_EVENT_HNF_CLS(_model, _name, _event) \ + _CMN_EVENT_HNF(_model, _name##_class0, _event, 0, SEL_CLASS_OCCUP_ID), \ + _CMN_EVENT_HNF(_model, _name##_class1, _event, 1, SEL_CLASS_OCCUP_ID), \ + _CMN_EVENT_HNF(_model, _name##_class2, _event, 2, SEL_CLASS_OCCUP_ID), \ + _CMN_EVENT_HNF(_model, _name##_class3, _event, 3, SEL_CLASS_OCCUP_ID) +#define CMN_EVENT_HNF_SNT(_model, _name, _event) \ + _CMN_EVENT_HNF(_model, _name##_all, _event, 0, SEL_CBUSY_SNTHROTTLE_SEL), \ + _CMN_EVENT_HNF(_model, _name##_group0_read, _event, 1, SEL_CBUSY_SNTHROTTLE_SEL), \ + _CMN_EVENT_HNF(_model, _name##_group0_write, _event, 2, SEL_CBUSY_SNTHROTTLE_SEL), \ + _CMN_EVENT_HNF(_model, _name##_group1_read, _event, 3, SEL_CBUSY_SNTHROTTLE_SEL), \ + _CMN_EVENT_HNF(_model, _name##_group1_write, _event, 4, SEL_CBUSY_SNTHROTTLE_SEL), \ + _CMN_EVENT_HNF(_model, _name##_read, _event, 5, SEL_CBUSY_SNTHROTTLE_SEL), \ + _CMN_EVENT_HNF(_model, _name##_write, _event, 6, SEL_CBUSY_SNTHROTTLE_SEL) + #define _CMN_EVENT_XP(_name, _event) \ __CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \ __CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \ @@ -706,7 +768,9 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)), \ _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \ _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \ - _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)) + _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \ + _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \ + _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5)) static struct attribute *arm_cmn_event_attrs[] = { @@ -734,6 +798,14 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_DVM(NOT_CMN600, txsnp_stall, 0x0a), CMN_EVENT_DVM(NOT_CMN600, trkfull, 0x0b), CMN_EVENT_DVM_OCC(NOT_CMN600, trk_occupancy, 0x0c), + CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_cxha, 0x0d), + CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_pdn, 0x0e), + CMN_EVENT_DVM(CMN700, trk_alloc, 0x0f), + CMN_EVENT_DVM(CMN700, trk_cxha_alloc, 0x10), + CMN_EVENT_DVM(CMN700, trk_pdn_alloc, 0x11), + CMN_EVENT_DVM(CMN700, txsnp_stall_limit, 0x12), + CMN_EVENT_DVM(CMN700, rxsnp_stall_starv, 0x13), + CMN_EVENT_DVM(CMN700, txsnp_sync_stall_op, 0x14), CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01), CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02), @@ -773,9 +845,19 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_HNF(NOT_CMN600, atomic_fwd, 0x20), CMN_EVENT_HNF(NOT_CMN600, mpam_hardlim, 0x21), CMN_EVENT_HNF(NOT_CMN600, mpam_softlim, 0x22), - CMN_EVENT_HNF(CMN650, snp_sent_cluster, 0x23), - CMN_EVENT_HNF(CMN650, sf_imprecise_evict, 0x24), - CMN_EVENT_HNF(CMN650, sf_evict_shared_line, 0x25), + CMN_EVENT_HNF(CMN_650ON, snp_sent_cluster, 0x23), + CMN_EVENT_HNF(CMN_650ON, sf_imprecise_evict, 0x24), + CMN_EVENT_HNF(CMN_650ON, sf_evict_shared_line, 0x25), + CMN_EVENT_HNF_CLS(CMN700, pocq_class_occup, 0x26), + CMN_EVENT_HNF_CLS(CMN700, pocq_class_retry, 0x27), + CMN_EVENT_HNF_CLS(CMN700, class_mc_reqs, 0x28), + CMN_EVENT_HNF_CLS(CMN700, class_cgnt_cmin, 0x29), + CMN_EVENT_HNF_SNT(CMN700, sn_throttle, 0x2a), + CMN_EVENT_HNF_SNT(CMN700, sn_throttle_min, 0x2b), + CMN_EVENT_HNF(CMN700, sf_precise_to_imprecise, 0x2c), + CMN_EVENT_HNF(CMN700, snp_intv_cln, 0x2d), + CMN_EVENT_HNF(CMN700, nc_excl, 0x2e), + CMN_EVENT_HNF(CMN700, excl_mon_ovfl, 0x2f), CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20), CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21), @@ -870,6 +952,19 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice2, 0x14), CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice3, 0x15), CMN_EVENT_RNID(NOT_CMN600, wrt_throttled, 0x16), + CMN_EVENT_RNID(CMN700, ldb_full, 0x17), + CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice0, 0x18), + CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice1, 0x19), + CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice2, 0x1a), + CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice3, 0x1b), + CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice0, 0x1c), + CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice1, 0x1d), + CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice2, 0x1e), + CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice3, 0x1f), + CMN_EVENT_RNID(CMN700, rrt_burst_alloc, 0x20), + CMN_EVENT_RNID(CMN700, awid_hash, 0x21), + CMN_EVENT_RNID(CMN700, atomic_alloc, 0x22), + CMN_EVENT_RNID(CMN700, atomic_occ_ovfl, 0x23), CMN_EVENT_MTSX(tc_lookup, 0x01), CMN_EVENT_MTSX(tc_fill, 0x02), @@ -920,6 +1015,82 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_CXHA(sdb_occ, 0x2c), CMN_EVENT_CXHA(snphaz_occ, 0x2d), + CMN_EVENT_CCRA(rht_occ, 0x41), + CMN_EVENT_CCRA(sht_occ, 0x42), + CMN_EVENT_CCRA(rdb_occ, 0x43), + CMN_EVENT_CCRA(wdb_occ, 0x44), + CMN_EVENT_CCRA(ssb_occ, 0x45), + CMN_EVENT_CCRA(snp_bcasts, 0x46), + CMN_EVENT_CCRA(req_chains, 0x47), + CMN_EVENT_CCRA(req_chain_avglen, 0x48), + CMN_EVENT_CCRA(chirsp_stalls, 0x49), + CMN_EVENT_CCRA(chidat_stalls, 0x4a), + CMN_EVENT_CCRA(cxreq_pcrd_stalls_link0, 0x4b), + CMN_EVENT_CCRA(cxreq_pcrd_stalls_link1, 0x4c), + CMN_EVENT_CCRA(cxreq_pcrd_stalls_link2, 0x4d), + CMN_EVENT_CCRA(cxdat_pcrd_stalls_link0, 0x4e), + CMN_EVENT_CCRA(cxdat_pcrd_stalls_link1, 0x4f), + CMN_EVENT_CCRA(cxdat_pcrd_stalls_link2, 0x50), + CMN_EVENT_CCRA(external_chirsp_stalls, 0x51), + CMN_EVENT_CCRA(external_chidat_stalls, 0x52), + CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link0, 0x53), + CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link1, 0x54), + CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link2, 0x55), + CMN_EVENT_CCRA(rht_alloc, 0x56), + CMN_EVENT_CCRA(sht_alloc, 0x57), + CMN_EVENT_CCRA(rdb_alloc, 0x58), + CMN_EVENT_CCRA(wdb_alloc, 0x59), + CMN_EVENT_CCRA(ssb_alloc, 0x5a), + + CMN_EVENT_CCHA(rddatbyp, 0x61), + CMN_EVENT_CCHA(chirsp_up_stall, 0x62), + CMN_EVENT_CCHA(chidat_up_stall, 0x63), + CMN_EVENT_CCHA(snppcrd_link0_stall, 0x64), + CMN_EVENT_CCHA(snppcrd_link1_stall, 0x65), + CMN_EVENT_CCHA(snppcrd_link2_stall, 0x66), + CMN_EVENT_CCHA(reqtrk_occ, 0x67), + CMN_EVENT_CCHA(rdb_occ, 0x68), + CMN_EVENT_CCHA(rdbyp_occ, 0x69), + CMN_EVENT_CCHA(wdb_occ, 0x6a), + CMN_EVENT_CCHA(snptrk_occ, 0x6b), + CMN_EVENT_CCHA(sdb_occ, 0x6c), + CMN_EVENT_CCHA(snphaz_occ, 0x6d), + CMN_EVENT_CCHA(reqtrk_alloc, 0x6e), + CMN_EVENT_CCHA(rdb_alloc, 0x6f), + CMN_EVENT_CCHA(rdbyp_alloc, 0x70), + CMN_EVENT_CCHA(wdb_alloc, 0x71), + CMN_EVENT_CCHA(snptrk_alloc, 0x72), + CMN_EVENT_CCHA(sdb_alloc, 0x73), + CMN_EVENT_CCHA(snphaz_alloc, 0x74), + CMN_EVENT_CCHA(pb_rhu_req_occ, 0x75), + CMN_EVENT_CCHA(pb_rhu_req_alloc, 0x76), + CMN_EVENT_CCHA(pb_rhu_pcie_req_occ, 0x77), + CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc, 0x78), + CMN_EVENT_CCHA(pb_pcie_wr_req_occ, 0x79), + CMN_EVENT_CCHA(pb_pcie_wr_req_alloc, 0x7a), + CMN_EVENT_CCHA(pb_pcie_reg_req_occ, 0x7b), + CMN_EVENT_CCHA(pb_pcie_reg_req_alloc, 0x7c), + CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ, 0x7d), + CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc, 0x7e), + CMN_EVENT_CCHA(pb_rhu_dat_occ, 0x7f), + CMN_EVENT_CCHA(pb_rhu_dat_alloc, 0x80), + CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ, 0x81), + CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc, 0x82), + CMN_EVENT_CCHA(pb_pcie_wr_dat_occ, 0x83), + CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc, 0x84), + + CMN_EVENT_CCLA(rx_cxs, 0x21), + CMN_EVENT_CCLA(tx_cxs, 0x22), + CMN_EVENT_CCLA(rx_cxs_avg_size, 0x23), + CMN_EVENT_CCLA(tx_cxs_avg_size, 0x24), + CMN_EVENT_CCLA(tx_cxs_lcrd_backpressure, 0x25), + CMN_EVENT_CCLA(link_crdbuf_occ, 0x26), + CMN_EVENT_CCLA(link_crdbuf_alloc, 0x27), + CMN_EVENT_CCLA(pfwd_rcvr_cxs, 0x28), + CMN_EVENT_CCLA(pfwd_sndr_num_flits, 0x29), + CMN_EVENT_CCLA(pfwd_sndr_stalls_static_crd, 0x2a), + CMN_EVENT_CCLA(pfwd_sndr_stalls_dynmaic_crd, 0x2b), + NULL }; @@ -1147,7 +1318,11 @@ static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn, if (!dn->occupid[fsel].count) { dn->occupid[fsel].val = occupid; - reg = FIELD_PREP(CMN__PMU_OCCUP1_ID, + reg = FIELD_PREP(CMN__PMU_CBUSY_SNTHROTTLE_SEL, + dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) | + FIELD_PREP(CMN__PMU_CLASS_OCCUP_ID, + dn->occupid[SEL_CLASS_OCCUP_ID].val) | + FIELD_PREP(CMN__PMU_OCCUP1_ID, dn->occupid[SEL_OCCUP1ID].val); writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4); } else if (dn->occupid[fsel].val != occupid) { @@ -1157,6 +1332,18 @@ static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn, return 0; } +static void arm_cmn_set_event_sel_lo(struct arm_cmn_node *dn, int dtm_idx, + int eventid, bool wide_sel) +{ + if (wide_sel) { + dn->event_w[dtm_idx] = eventid; + writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL); + } else { + dn->event[dtm_idx] = eventid; + writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL); + } +} + static void arm_cmn_event_start(struct perf_event *event, int flags) { struct arm_cmn *cmn = to_cmn(event->pmu); @@ -1183,8 +1370,8 @@ static void arm_cmn_event_start(struct perf_event *event, int flags) } else for_each_hw_dn(hw, dn, i) { int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); - dn->event[dtm_idx] = CMN_EVENT_EVENTID(event); - writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL); + arm_cmn_set_event_sel_lo(dn, dtm_idx, CMN_EVENT_EVENTID(event), + hw->wide_sel); } } @@ -1211,8 +1398,7 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags) } else for_each_hw_dn(hw, dn, i) { int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); - dn->event[dtm_idx] = 0; - writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL); + arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel); } arm_cmn_event_read(event); @@ -1371,6 +1557,8 @@ static int arm_cmn_event_init(struct perf_event *event) /* ...but the DTM may depend on which port we're watching */ if (cmn->multi_dtm) hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2; + } else if (type == CMN_TYPE_XP && cmn->model == CMN700) { + hw->wide_sel = true; } /* This is sufficiently annoying to recalculate, so cache it */ @@ -1748,6 +1936,10 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */ if (dn->type == CMN_TYPE_RND) dn->type = CMN_TYPE_RNI; + + /* We split the RN-I off already, so let the CCLA part match CCLA events */ + if (dn->type == CMN_TYPE_CCLA_RNI) + dn->type = CMN_TYPE_CCLA; } writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL); @@ -1783,6 +1975,8 @@ static enum cmn_node_type arm_cmn_subtype(enum cmn_node_type type) switch (type) { case CMN_TYPE_HNP: return CMN_TYPE_HNI; + case CMN_TYPE_CCLA_RNI: + return CMN_TYPE_RNI; default: return CMN_TYPE_INVALID; } @@ -1812,6 +2006,10 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg); cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg); + reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL_1); + cmn->snp_vc_num = FIELD_GET(CMN_INFO_SNP_VC_NUM, reg); + cmn->req_vc_num = FIELD_GET(CMN_INFO_REQ_VC_NUM, reg); + reg = readq_relaxed(cfg_region + CMN_CHILD_INFO); child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg); @@ -1935,6 +2133,9 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) case CMN_TYPE_MTSX: case CMN_TYPE_CXRA: case CMN_TYPE_CXHA: + case CMN_TYPE_CCRA: + case CMN_TYPE_CCHA: + case CMN_TYPE_CCLA: dn++; break; /* Nothing to see here */ @@ -1950,6 +2151,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) * register via the normal mechanism later. */ case CMN_TYPE_HNP: + case CMN_TYPE_CCLA_RNI: dn[1] = dn[0]; dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL; dn[1].type = arm_cmn_subtype(dn->type); @@ -2125,6 +2327,7 @@ static int arm_cmn_remove(struct platform_device *pdev) static const struct of_device_id arm_cmn_of_match[] = { { .compatible = "arm,cmn-600", .data = (void *)CMN600 }, { .compatible = "arm,cmn-650", .data = (void *)CMN650 }, + { .compatible = "arm,cmn-700", .data = (void *)CMN700 }, { .compatible = "arm,ci-700", .data = (void *)CI700 }, {} }; @@ -2135,6 +2338,7 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match); static const struct acpi_device_id arm_cmn_acpi_match[] = { { "ARMHC600", CMN600 }, { "ARMHC650", CMN650 }, + { "ARMHC700", CMN700 }, {} }; MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match); From 47a9ed88a4fc6c67ea7f1d65df28d2eb6a89bbbb Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Fri, 29 Apr 2022 14:33:07 +0800 Subject: [PATCH 092/145] drivers/perf: arm_spe: Expose saturating counter to 16-bit In order to acquire more accurate latency, Armv8.8[1] has defined the CountSize field to 16-bit saturating counters when it's 0b0011. Let's support this new feature and expose its to user under sysfs. [1] https://developer.arm.com/documentation/ddi0487/latest Cc: Will Deacon Cc: Mark Rutland Signed-off-by: Shaokun Zhang Link: https://lore.kernel.org/r/20220429063307.63251-1-zhangshaokun@hisilicon.com Signed-off-by: Will Deacon --- drivers/perf/arm_spe_pmu.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index d44bcc29d99c..db670b265897 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -1035,6 +1035,9 @@ static void __arm_spe_pmu_dev_probe(void *info) fallthrough; case 2: spe_pmu->counter_sz = 12; + break; + case 3: + spe_pmu->counter_sz = 16; } dev_info(dev, From 807907dae9701c4b0593d5195d4839f17d103314 Mon Sep 17 00:00:00 2001 From: Qi Liu Date: Fri, 15 Apr 2022 18:23:51 +0800 Subject: [PATCH 093/145] drivers/perf: hisi: Associate PMUs in SICL with CPUs online If a PMU is in a SICL (Super IO cluster), it is not appropriate to associate this PMU with a CPU die. So we associate it with all CPUs online, rather than CPUs in the nearest SCCL. As the firmware of Hip09 platform hasn't been published yet, change of PMU driver will not influence backwards compatibility between driver and firmware. Signed-off-by: Qi Liu Reviewed-by: John Garry Link: https://lore.kernel.org/r/20220415102352.6665-2-liuqi115@huawei.com Signed-off-by: Will Deacon --- drivers/perf/hisilicon/hisi_uncore_pa_pmu.c | 18 +++++++----------- drivers/perf/hisilicon/hisi_uncore_pmu.c | 4 ++++ drivers/perf/hisilicon/hisi_uncore_pmu.h | 1 + 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c index bad99d149172..a0ee84d97c41 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c @@ -258,13 +258,12 @@ static int hisi_pa_pmu_init_data(struct platform_device *pdev, struct hisi_pmu *pa_pmu) { /* - * Use the SCCL_ID and the index ID to identify the PA PMU, - * while SCCL_ID is the nearst SCCL_ID from this SICL and - * CPU core is chosen from this SCCL to manage this PMU. + * As PA PMU is in a SICL, use the SICL_ID and the index ID + * to identify the PA PMU. */ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", - &pa_pmu->sccl_id)) { - dev_err(&pdev->dev, "Cannot read sccl-id!\n"); + &pa_pmu->sicl_id)) { + dev_err(&pdev->dev, "Cannot read sicl-id!\n"); return -EINVAL; } @@ -275,6 +274,7 @@ static int hisi_pa_pmu_init_data(struct platform_device *pdev, } pa_pmu->ccl_id = -1; + pa_pmu->sccl_id = -1; pa_pmu->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pa_pmu->base)) { @@ -399,13 +399,9 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev) ret = hisi_pa_pmu_dev_probe(pdev, pa_pmu); if (ret) return ret; - /* - * PA is attached in SICL and the CPU core is chosen to manage this - * PMU which is the nearest SCCL, while its SCCL_ID is greater than - * one with the SICL_ID. - */ + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%u_pa%u", - pa_pmu->sccl_id - 1, pa_pmu->index_id); + pa_pmu->sicl_id, pa_pmu->index_id); if (!name) return -ENOMEM; diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 358e4e284a62..980b9ee6eb14 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -458,6 +458,10 @@ static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu) { int sccl_id, ccl_id; + /* If SCCL_ID is -1, the PMU is in a SICL and has no CPU affinity */ + if (hisi_pmu->sccl_id == -1) + return true; + if (hisi_pmu->ccl_id == -1) { /* If CCL_ID is -1, the PMU only shares the same SCCL */ hisi_read_sccl_and_ccl_id(&sccl_id, NULL); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h index 7f5841d6f592..96eeddad55ff 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.h +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -81,6 +81,7 @@ struct hisi_pmu { struct device *dev; struct hlist_node node; int sccl_id; + int sicl_id; int ccl_id; void __iomem *base; /* the ID of the PMU modules */ From 6b79738b6ed91a2d0fe958819469eeedac3bca81 Mon Sep 17 00:00:00 2001 From: Qi Liu Date: Fri, 15 Apr 2022 18:23:52 +0800 Subject: [PATCH 094/145] drivers/perf: hisi: Add Support for CPA PMU On HiSilicon Hip09 platform, there is a CPA (Coherency Protocol Agent) on each SICL (Super IO Cluster) which implements packet format translation, route parsing and traffic statistics. CPA PMU has 8 PMU counters and interrupt is supported to handle counter overflow. Let's support its driver under the framework of HiSilicon PMU driver. Signed-off-by: Qi Liu Reviewed-by: John Garry Reviewed-by: Shaokun Zhang Link: https://lore.kernel.org/r/20220415102352.6665-3-liuqi115@huawei.com Signed-off-by: Will Deacon --- drivers/perf/hisilicon/Makefile | 2 +- drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c | 409 +++++++++++++++++++ include/linux/cpuhotplug.h | 1 + 3 files changed, 411 insertions(+), 1 deletion(-) create mode 100644 drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile index 506ed39e3266..6be83517acaa 100644 --- a/drivers/perf/hisilicon/Makefile +++ b/drivers/perf/hisilicon/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \ hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \ - hisi_uncore_pa_pmu.o + hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c new file mode 100644 index 000000000000..a9bb73f76be4 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC CPA(Coherency Protocol Agent) hardware event counters support + * + * Copyright (C) 2022 HiSilicon Limited + * Author: Qi Liu + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ + +#define pr_fmt(fmt) "cpa pmu: " fmt +#include +#include +#include +#include +#include +#include +#include + +#include "hisi_uncore_pmu.h" + +/* CPA register definition */ +#define CPA_PERF_CTRL 0x1c00 +#define CPA_EVENT_CTRL 0x1c04 +#define CPA_INT_MASK 0x1c70 +#define CPA_INT_STATUS 0x1c78 +#define CPA_INT_CLEAR 0x1c7c +#define CPA_EVENT_TYPE0 0x1c80 +#define CPA_VERSION 0x1cf0 +#define CPA_CNT0_LOWER 0x1d00 +#define CPA_CFG_REG 0x0534 + +/* CPA operation command */ +#define CPA_PERF_CTRL_EN BIT_ULL(0) +#define CPA_EVTYPE_MASK 0xffUL +#define CPA_PM_CTRL BIT_ULL(9) + +/* CPA has 8-counters */ +#define CPA_NR_COUNTERS 0x8 +#define CPA_COUNTER_BITS 64 +#define CPA_NR_EVENTS 0xff +#define CPA_REG_OFFSET 0x8 + +static u32 hisi_cpa_pmu_get_counter_offset(int idx) +{ + return (CPA_CNT0_LOWER + idx * CPA_REG_OFFSET); +} + +static u64 hisi_cpa_pmu_read_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + return readq(cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_cpa_pmu_write_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc, u64 val) +{ + writeq(val, cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_cpa_pmu_write_evtype(struct hisi_pmu *cpa_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(CPA_EVENT_TYPE0/1). + * There are 2 event select registers for the 8 hardware counters. + * Event code is 8-bits and for the former 4 hardware counters, + * CPA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters, + * CPA_EVENT_TYPE1 is chosen. + */ + reg = CPA_EVENT_TYPE0 + (idx / 4) * 4; + reg_idx = idx % 4; + shift = CPA_REG_OFFSET * reg_idx; + + /* Write event code to CPA_EVENT_TYPEx Register */ + val = readl(cpa_pmu->base + reg); + val &= ~(CPA_EVTYPE_MASK << shift); + val |= type << shift; + writel(val, cpa_pmu->base + reg); +} + +static void hisi_cpa_pmu_start_counters(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_PERF_CTRL); + val |= CPA_PERF_CTRL_EN; + writel(val, cpa_pmu->base + CPA_PERF_CTRL); +} + +static void hisi_cpa_pmu_stop_counters(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_PERF_CTRL); + val &= ~(CPA_PERF_CTRL_EN); + writel(val, cpa_pmu->base + CPA_PERF_CTRL); +} + +static void hisi_cpa_pmu_disable_pm(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_CFG_REG); + val |= CPA_PM_CTRL; + writel(val, cpa_pmu->base + CPA_CFG_REG); +} + +static void hisi_cpa_pmu_enable_pm(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_CFG_REG); + val &= ~(CPA_PM_CTRL); + writel(val, cpa_pmu->base + CPA_CFG_REG); +} + +static void hisi_cpa_pmu_enable_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in CPA_EVENT_CTRL register */ + val = readl(cpa_pmu->base + CPA_EVENT_CTRL); + val |= 1 << hwc->idx; + writel(val, cpa_pmu->base + CPA_EVENT_CTRL); +} + +static void hisi_cpa_pmu_disable_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in CPA_EVENT_CTRL register */ + val = readl(cpa_pmu->base + CPA_EVENT_CTRL); + val &= ~(1UL << hwc->idx); + writel(val, cpa_pmu->base + CPA_EVENT_CTRL); +} + +static void hisi_cpa_pmu_enable_counter_int(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(cpa_pmu->base + CPA_INT_MASK); + val &= ~(1UL << hwc->idx); + writel(val, cpa_pmu->base + CPA_INT_MASK); +} + +static void hisi_cpa_pmu_disable_counter_int(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(cpa_pmu->base + CPA_INT_MASK); + val |= 1 << hwc->idx; + writel(val, cpa_pmu->base + CPA_INT_MASK); +} + +static u32 hisi_cpa_pmu_get_int_status(struct hisi_pmu *cpa_pmu) +{ + return readl(cpa_pmu->base + CPA_INT_STATUS); +} + +static void hisi_cpa_pmu_clear_int_status(struct hisi_pmu *cpa_pmu, int idx) +{ + writel(1 << idx, cpa_pmu->base + CPA_INT_CLEAR); +} + +static const struct acpi_device_id hisi_cpa_pmu_acpi_match[] = { + { "HISI0281", }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match); + +static int hisi_cpa_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *cpa_pmu) +{ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &cpa_pmu->sicl_id)) { + dev_err(&pdev->dev, "Can not read sicl-id\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id", + &cpa_pmu->index_id)) { + dev_err(&pdev->dev, "Cannot read idx-id\n"); + return -EINVAL; + } + + cpa_pmu->ccl_id = -1; + cpa_pmu->sccl_id = -1; + cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cpa_pmu->base)) + return PTR_ERR(cpa_pmu->base); + + cpa_pmu->identifier = readl(cpa_pmu->base + CPA_VERSION); + + return 0; +} + +static struct attribute *hisi_cpa_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-15"), + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_format_group = { + .name = "format", + .attrs = hisi_cpa_pmu_format_attr, +}; + +static struct attribute *hisi_cpa_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(cpa_cycles, 0x00), + HISI_PMU_EVENT_ATTR(cpa_p1_wr_dat, 0x61), + HISI_PMU_EVENT_ATTR(cpa_p1_rd_dat, 0x62), + HISI_PMU_EVENT_ATTR(cpa_p0_wr_dat, 0xE1), + HISI_PMU_EVENT_ATTR(cpa_p0_rd_dat, 0xE2), + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_events_group = { + .name = "events", + .attrs = hisi_cpa_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = { + .attrs = hisi_cpa_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_cpa_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_cpa_pmu_identifier_attrs[] = { + &hisi_cpa_pmu_identifier_attr.attr, + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_identifier_group = { + .attrs = hisi_cpa_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = { + &hisi_cpa_pmu_format_group, + &hisi_cpa_pmu_events_group, + &hisi_cpa_pmu_cpumask_attr_group, + &hisi_cpa_pmu_identifier_group, + NULL +}; + +static const struct hisi_uncore_ops hisi_uncore_cpa_pmu_ops = { + .write_evtype = hisi_cpa_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_cpa_pmu_start_counters, + .stop_counters = hisi_cpa_pmu_stop_counters, + .enable_counter = hisi_cpa_pmu_enable_counter, + .disable_counter = hisi_cpa_pmu_disable_counter, + .enable_counter_int = hisi_cpa_pmu_enable_counter_int, + .disable_counter_int = hisi_cpa_pmu_disable_counter_int, + .write_counter = hisi_cpa_pmu_write_counter, + .read_counter = hisi_cpa_pmu_read_counter, + .get_int_status = hisi_cpa_pmu_get_int_status, + .clear_int_status = hisi_cpa_pmu_clear_int_status, +}; + +static int hisi_cpa_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *cpa_pmu) +{ + int ret; + + ret = hisi_cpa_pmu_init_data(pdev, cpa_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(cpa_pmu, pdev); + if (ret) + return ret; + + cpa_pmu->counter_bits = CPA_COUNTER_BITS; + cpa_pmu->check_event = CPA_NR_EVENTS; + cpa_pmu->pmu_events.attr_groups = hisi_cpa_pmu_attr_groups; + cpa_pmu->ops = &hisi_uncore_cpa_pmu_ops; + cpa_pmu->num_counters = CPA_NR_COUNTERS; + cpa_pmu->dev = &pdev->dev; + cpa_pmu->on_cpu = -1; + + return 0; +} + +static int hisi_cpa_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *cpa_pmu; + char *name; + int ret; + + cpa_pmu = devm_kzalloc(&pdev->dev, sizeof(*cpa_pmu), GFP_KERNEL); + if (!cpa_pmu) + return -ENOMEM; + + ret = hisi_cpa_pmu_dev_probe(pdev, cpa_pmu); + if (ret) + return ret; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u", + cpa_pmu->sicl_id, cpa_pmu->index_id); + if (!name) + return -ENOMEM; + + cpa_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = cpa_pmu->pmu_events.attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + /* Power Management should be disabled before using CPA PMU. */ + hisi_cpa_pmu_disable_pm(cpa_pmu); + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, + &cpa_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + hisi_cpa_pmu_enable_pm(cpa_pmu); + return ret; + } + + ret = perf_pmu_register(&cpa_pmu->pmu, name, -1); + if (ret) { + dev_err(cpa_pmu->dev, "PMU register failed\n"); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, &cpa_pmu->node); + hisi_cpa_pmu_enable_pm(cpa_pmu); + return ret; + } + + platform_set_drvdata(pdev, cpa_pmu); + return ret; +} + +static int hisi_cpa_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&cpa_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, + &cpa_pmu->node); + hisi_cpa_pmu_enable_pm(cpa_pmu); + return 0; +} + +static struct platform_driver hisi_cpa_pmu_driver = { + .driver = { + .name = "hisi_cpa_pmu", + .acpi_match_table = ACPI_PTR(hisi_cpa_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = hisi_cpa_pmu_probe, + .remove = hisi_cpa_pmu_remove, +}; + +static int __init hisi_cpa_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, + "AP_PERF_ARM_HISI_CPA_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("setup hotplug failed: %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_cpa_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE); + + return ret; +} +module_init(hisi_cpa_pmu_module_init); + +static void __exit hisi_cpa_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_cpa_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE); +} +module_exit(hisi_cpa_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Qi Liu "); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 82e33137f917..b66c5f389159 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -222,6 +222,7 @@ enum cpuhp_state { CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, + CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, From d158a0608eb85f29f98357b97d01b80250636613 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 5 May 2022 23:15:17 +0100 Subject: [PATCH 095/145] arm64/sme: More sensibly define the size for the ZA register set Since the vector length configuration mechanism is identical between SVE and SME we share large elements of the code including the definition for the maximum vector length. Unfortunately when we were defining the ABI for SVE we included not only the actual maximum vector length of 2048 bits but also the value possible if all the bits reserved in the architecture for expansion of the LEN field were used, 16384 bits. This starts creating problems if we try to allocate anything for the ZA matrix based on the maximum possible vector length, as we do for the regset used with ptrace during the process of generating a core dump. While the maximum potential size for ZA with the current architecture is a reasonably managable 64K with the higher reserved limit ZA would be 64M which leads to entirely reasonable complaints from the memory management code when we try to allocate a buffer of that size. Avoid these issues by defining the actual maximum vector length for the architecture and using it for the SME regsets. Also use the full ZA_PT_SIZE() with the header rather than just the actual register payload when specifying the size, fixing support for the largest vector lengths now that we have this new, lower define. With the SVE maximum this did not cause problems due to the extra headroom we had. While we're at it add a comment clarifying why even though ZA is a single register we tell the regset code that it is a multi-register regset. Reported-by: Qian Cai Signed-off-by: Mark Brown Tested-by: Naresh Kamboju Link: https://lore.kernel.org/r/20220505221517.1642014-1-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 12 ++++++++++++ arch/arm64/kernel/ptrace.c | 12 ++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 5afcd0709aae..75caa2098d5b 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -32,6 +32,18 @@ #define VFP_STATE_SIZE ((32 * 8) + 4) #endif +/* + * When we defined the maximum SVE vector length we defined the ABI so + * that the maximum vector length included all the reserved for future + * expansion bits in ZCR rather than those just currently defined by + * the architecture. While SME follows a similar pattern the fact that + * it includes a square matrix means that any allocations that attempt + * to cover the maximum potential vector length (such as happen with + * the regset used for ptrace) end up being extremely large. Define + * the much lower actual limit for use in such situations. + */ +#define SME_VQ_MAX 16 + struct task_struct; extern void fpsimd_save_state(struct user_fpsimd_state *state); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 47d8a7472171..60ebc3060cf1 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1438,7 +1438,7 @@ static const struct user_regset aarch64_regsets[] = { #ifdef CONFIG_ARM64_SME [REGSET_SSVE] = { /* Streaming mode SVE */ .core_note_type = NT_ARM_SSVE, - .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), + .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE), SVE_VQ_BYTES), .size = SVE_VQ_BYTES, .align = SVE_VQ_BYTES, @@ -1447,7 +1447,15 @@ static const struct user_regset aarch64_regsets[] = { }, [REGSET_ZA] = { /* SME ZA */ .core_note_type = NT_ARM_ZA, - .n = DIV_ROUND_UP(ZA_PT_ZA_SIZE(SVE_VQ_MAX), SVE_VQ_BYTES), + /* + * ZA is a single register but it's variably sized and + * the ptrace core requires that the size of any data + * be an exact multiple of the configured register + * size so report as though we had SVE_VQ_BYTES + * registers. These values aren't exposed to + * userspace. + */ + .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES), .size = SVE_VQ_BYTES, .align = SVE_VQ_BYTES, .regset_get = za_get, From 2e5920bb073a4e3e69cf8e581836cafc8ba1b464 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Fri, 6 May 2022 19:43:57 +0800 Subject: [PATCH 096/145] kdump: return -ENOENT if required cmdline option does not exist According to the current crashkernel=Y,low support in other ARCHes, it's an optional command-line option. When it doesn't exist, kernel will try to allocate minimum required memory below 4G automatically. However, __parse_crashkernel() returns '-EINVAL' for all error cases. It can't distinguish the nonexistent option from invalid option. Change __parse_crashkernel() to return '-ENOENT' for the nonexistent option case. With this change, crashkernel,low memory will take the default value if crashkernel=,low is not specified; while crashkernel reservation will fail and bail out if an invalid option is specified. Signed-off-by: Zhen Lei Acked-by: Baoquan He Link: https://lore.kernel.org/r/20220506114402.365-2-thunder.leizhen@huawei.com Signed-off-by: Catalin Marinas --- kernel/crash_core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 256cf6db573c..4d57c03714f4 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -243,9 +243,8 @@ static int __init __parse_crashkernel(char *cmdline, *crash_base = 0; ck_cmdline = get_last_crashkernel(cmdline, name, suffix); - if (!ck_cmdline) - return -EINVAL; + return -ENOENT; ck_cmdline += strlen(name); From e6b394425c615d1596ce7d9de23a3a34ee2e612b Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Fri, 6 May 2022 19:43:58 +0800 Subject: [PATCH 097/145] arm64: Use insert_resource() to simplify code insert_resource() traverses the subtree layer by layer from the root node until a proper location is found. Compared with request_resource(), the parent node does not need to be determined in advance. In addition, move the insertion of node 'crashk_res' into function reserve_crashkernel() to make the associated code close together. Signed-off-by: Zhen Lei Acked-by: John Donnelly Acked-by: Baoquan He Link: https://lore.kernel.org/r/20220506114402.365-3-thunder.leizhen@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/setup.c | 17 +++-------------- arch/arm64/mm/init.c | 1 + 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 3505789cf4bd..fea3223704b6 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -225,6 +225,8 @@ static void __init request_standard_resources(void) kernel_code.end = __pa_symbol(__init_begin - 1); kernel_data.start = __pa_symbol(_sdata); kernel_data.end = __pa_symbol(_end - 1); + insert_resource(&iomem_resource, &kernel_code); + insert_resource(&iomem_resource, &kernel_data); num_standard_resources = memblock.memory.cnt; res_size = num_standard_resources * sizeof(*standard_resources); @@ -246,20 +248,7 @@ static void __init request_standard_resources(void) res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; } - request_resource(&iomem_resource, res); - - if (kernel_code.start >= res->start && - kernel_code.end <= res->end) - request_resource(res, &kernel_code); - if (kernel_data.start >= res->start && - kernel_data.end <= res->end) - request_resource(res, &kernel_data); -#ifdef CONFIG_KEXEC_CORE - /* Userspace will find "Crash kernel" region in /proc/iomem. */ - if (crashk_res.end && crashk_res.start >= res->start && - crashk_res.end <= res->end) - request_resource(res, &crashk_res); -#endif + insert_resource(&iomem_resource, res); } } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e7b1550e2fc..51863f1448c6 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -137,6 +137,7 @@ static void __init reserve_crashkernel(void) kmemleak_ignore_phys(crash_base); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); } /* From 944a45abfabc171fd121315ff0d5e62b11cb5d6f Mon Sep 17 00:00:00 2001 From: Chen Zhou Date: Fri, 6 May 2022 19:43:59 +0800 Subject: [PATCH 098/145] arm64: kdump: Reimplement crashkernel=X There are following issues in arm64 kdump: 1. We use crashkernel=X to reserve crashkernel in DMA zone, which will fail when there is not enough low memory. 2. If reserving crashkernel above DMA zone, in this case, crash dump kernel will fail to boot because there is no low memory available for allocation. To solve these issues, introduce crashkernel=X,[high,low]. The "crashkernel=X,high" is used to select a region above DMA zone, and the "crashkernel=Y,low" is used to allocate specified size low memory. Signed-off-by: Chen Zhou Co-developed-by: Zhen Lei Signed-off-by: Zhen Lei Link: https://lore.kernel.org/r/20220506114402.365-4-thunder.leizhen@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/machine_kexec.c | 9 +++- arch/arm64/kernel/machine_kexec_file.c | 12 ++++- arch/arm64/mm/init.c | 63 +++++++++++++++++++++++--- 3 files changed, 74 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index e16b248699d5..19c2d487cb08 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -329,8 +329,13 @@ bool crash_is_nosave(unsigned long pfn) /* in reserved memory? */ addr = __pfn_to_phys(pfn); - if ((addr < crashk_res.start) || (crashk_res.end < addr)) - return false; + if ((addr < crashk_res.start) || (crashk_res.end < addr)) { + if (!crashk_low_res.end) + return false; + + if ((addr < crashk_low_res.start) || (crashk_low_res.end < addr)) + return false; + } if (!kexec_crash_image) return true; diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 59c648d51848..889951291cc0 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -65,10 +65,18 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) /* Exclude crashkernel region */ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); + if (ret) + goto out; - if (!ret) - ret = crash_prepare_elf64_headers(cmem, true, addr, sz); + if (crashk_low_res.end) { + ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); + if (ret) + goto out; + } + ret = crash_prepare_elf64_headers(cmem, true, addr, sz); + +out: kfree(cmem); return ret; } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 51863f1448c6..18ba66c90991 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -90,6 +90,32 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit; phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1; #endif +/* Current arm64 boot protocol requires 2MB alignment */ +#define CRASH_ALIGN SZ_2M + +#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit +#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1) + +static int __init reserve_crashkernel_low(unsigned long long low_size) +{ + unsigned long long low_base; + + low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX); + if (!low_base) { + pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size); + return -ENOMEM; + } + + pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n", + low_base, low_base + low_size, low_size >> 20); + + crashk_low_res.start = low_base; + crashk_low_res.end = low_base + low_size - 1; + insert_resource(&iomem_resource, &crashk_low_res); + + return 0; +} + /* * reserve_crashkernel() - reserves memory for crash kernel * @@ -100,17 +126,35 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1; static void __init reserve_crashkernel(void) { unsigned long long crash_base, crash_size; - unsigned long long crash_max = arm64_dma_phys_limit; + unsigned long long crash_low_size = 0; + unsigned long long crash_max = CRASH_ADDR_LOW_MAX; + char *cmdline = boot_command_line; int ret; if (!IS_ENABLED(CONFIG_KEXEC_CORE)) return; - ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + /* crashkernel=X[@offset] */ + ret = parse_crashkernel(cmdline, memblock_phys_mem_size(), &crash_size, &crash_base); - /* no crashkernel= or invalid value specified */ - if (ret || !crash_size) + if (ret == -ENOENT) { + ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base); + if (ret || !crash_size) + return; + + /* + * crashkernel=Y,low can be specified or not, but invalid value + * is not allowed. + */ + ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base); + if (ret && (ret != -ENOENT)) + return; + + crash_max = CRASH_ADDR_HIGH_MAX; + } else if (ret || !crash_size) { + /* The specified value is invalid */ return; + } crash_size = PAGE_ALIGN(crash_size); @@ -118,8 +162,7 @@ static void __init reserve_crashkernel(void) if (crash_base) crash_max = crash_base + crash_size; - /* Current arm64 boot protocol requires 2MB alignment */ - crash_base = memblock_phys_alloc_range(crash_size, SZ_2M, + crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, crash_base, crash_max); if (!crash_base) { pr_warn("cannot allocate crashkernel (size:0x%llx)\n", @@ -127,6 +170,11 @@ static void __init reserve_crashkernel(void) return; } + if (crash_low_size && reserve_crashkernel_low(crash_low_size)) { + memblock_phys_free(crash_base, crash_size); + return; + } + pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", crash_base, crash_base + crash_size, crash_size >> 20); @@ -135,6 +183,9 @@ static void __init reserve_crashkernel(void) * map. Inform kmemleak so that it won't try to access it. */ kmemleak_ignore_phys(crash_base); + if (crashk_low_res.end) + kmemleak_ignore_phys(crashk_low_res.start); + crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; insert_resource(&iomem_resource, &crashk_res); From fb319e77a0e70b9ccfef87827d34b10d6bc2ccce Mon Sep 17 00:00:00 2001 From: Chen Zhou Date: Fri, 6 May 2022 19:44:00 +0800 Subject: [PATCH 099/145] of: fdt: Add memory for devices by DT property "linux,usable-memory-range" When reserving crashkernel in high memory, some low memory is reserved for crash dump kernel devices and never mapped by the first kernel. This memory range is advertised to crash dump kernel via DT property under /chosen, linux,usable-memory-range = We reused the DT property linux,usable-memory-range and made the low memory region as the second range "BASE2 SIZE2", which keeps compatibility with existing user-space and older kdump kernels. Crash dump kernel reads this property at boot time and call memblock_add() to add the low memory region after memblock_cap_memory_range() has been called. Signed-off-by: Chen Zhou Co-developed-by: Zhen Lei Signed-off-by: Zhen Lei Reviewed-by: Rob Herring Tested-by: Dave Kleikamp Acked-by: Baoquan He Link: https://lore.kernel.org/r/20220506114402.365-5-thunder.leizhen@huawei.com Signed-off-by: Catalin Marinas --- drivers/of/fdt.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index ec315b060cd5..2f248d0acc04 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -973,16 +973,24 @@ static void __init early_init_dt_check_for_elfcorehdr(unsigned long node) static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND; +/* + * The main usage of linux,usable-memory-range is for crash dump kernel. + * Originally, the number of usable-memory regions is one. Now there may + * be two regions, low region and high region. + * To make compatibility with existing user-space and older kdump, the low + * region is always the last range of linux,usable-memory-range if exist. + */ +#define MAX_USABLE_RANGES 2 + /** * early_init_dt_check_for_usable_mem_range - Decode usable memory range * location from flat tree */ void __init early_init_dt_check_for_usable_mem_range(void) { - const __be32 *prop; - int len; - phys_addr_t cap_mem_addr; - phys_addr_t cap_mem_size; + struct memblock_region rgn[MAX_USABLE_RANGES] = {0}; + const __be32 *prop, *endp; + int len, i; unsigned long node = chosen_node_offset; if ((long)node < 0) @@ -991,16 +999,21 @@ void __init early_init_dt_check_for_usable_mem_range(void) pr_debug("Looking for usable-memory-range property... "); prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); - if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells))) + if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells))) return; - cap_mem_addr = dt_mem_next_cell(dt_root_addr_cells, &prop); - cap_mem_size = dt_mem_next_cell(dt_root_size_cells, &prop); + endp = prop + (len / sizeof(__be32)); + for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) { + rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop); + rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop); - pr_debug("cap_mem_start=%pa cap_mem_size=%pa\n", &cap_mem_addr, - &cap_mem_size); + pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n", + i, &rgn[i].base, &rgn[i].size); + } - memblock_cap_memory_range(cap_mem_addr, cap_mem_size); + memblock_cap_memory_range(rgn[0].base, rgn[0].size); + for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++) + memblock_add(rgn[i].base, rgn[i].size); } #ifdef CONFIG_SERIAL_EARLYCON From 8af6b91f58341325bf74ecb0389ddc0039091d84 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Fri, 6 May 2022 19:44:01 +0800 Subject: [PATCH 100/145] of: Support more than one crash kernel regions for kexec -s When "crashkernel=X,high" is used, there may be two crash regions: high=crashk_res and low=crashk_low_res. But now the syscall kexec_file_load() only add crashk_res into "linux,usable-memory-range", this may cause the second kernel to have no available dma memory. Fix it like kexec-tools does for option -c, add both 'high' and 'low' regions into the dtb. Signed-off-by: Zhen Lei Acked-by: Rob Herring Acked-by: Baoquan He Link: https://lore.kernel.org/r/20220506114402.365-6-thunder.leizhen@huawei.com Signed-off-by: Catalin Marinas --- drivers/of/kexec.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c index b9bd1cff1793..8d374cc552be 100644 --- a/drivers/of/kexec.c +++ b/drivers/of/kexec.c @@ -386,6 +386,15 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, crashk_res.end - crashk_res.start + 1); if (ret) goto out; + + if (crashk_low_res.end) { + ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, + "linux,usable-memory-range", + crashk_low_res.start, + crashk_low_res.end - crashk_low_res.start + 1); + if (ret) + goto out; + } } /* add bootargs */ From 5832f1ae50600ac6b2b6d00cfef42d33a9473f06 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Fri, 6 May 2022 19:44:02 +0800 Subject: [PATCH 101/145] docs: kdump: Update the crashkernel description for arm64 Now arm64 has added support for "crashkernel=X,high" and "crashkernel=Y,low". Unlike x86, crash low memory is not allocated if "crashkernel=Y,low" is not specified. Signed-off-by: Zhen Lei Acked-by: Baoquan He Link: https://lore.kernel.org/r/20220506114402.365-7-thunder.leizhen@huawei.com Signed-off-by: Catalin Marinas --- Documentation/admin-guide/kernel-parameters.txt | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3f1cc5e317ed..f6ff55840751 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -808,7 +808,7 @@ Documentation/admin-guide/kdump/kdump.rst for an example. crashkernel=size[KMG],high - [KNL, X86-64] range could be above 4G. Allow kernel + [KNL, X86-64, ARM64] range could be above 4G. Allow kernel to allocate physical memory region from top, so could be above 4G if system have more than 4G ram installed. Otherwise memory region will be allocated below 4G, if @@ -821,7 +821,7 @@ that require some amount of low memory, e.g. swiotlb requires at least 64M+32K low memory, also enough extra low memory is needed to make sure DMA buffers for 32-bit - devices won't run out. Kernel would try to allocate at + devices won't run out. Kernel would try to allocate at least 256M below 4G automatically. This one let user to specify own low range under 4G for second kernel instead. @@ -829,6 +829,11 @@ It will be ignored when crashkernel=X,high is not used or memory reserved is below 4G. + [KNL, ARM64] range in low memory. + This one lets the user specify a low range in the + DMA zone for the crash dump kernel. + It will be ignored when crashkernel=X,high is not used. + cryptomgr.notests [KNL] Disable crypto self-tests From 33835e8dfb3c79821cdc6e2a9b48ae05bd4820dc Mon Sep 17 00:00:00 2001 From: Tanmay Jagdale Date: Tue, 10 May 2022 15:56:57 +0530 Subject: [PATCH 102/145] perf/marvell_cn10k: Fix tad_pmu_event_init() to check pmu type first Make sure to check the pmu type first and then check event->attr.disabled. Doing so would avoid reading the disabled attribute of an event that is not handled by TAD PMU. Signed-off-by: Tanmay Jagdale Link: https://lore.kernel.org/r/20220510102657.487539-1-tanmay@marvell.com Signed-off-by: Will Deacon --- drivers/perf/marvell_cn10k_tad_pmu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c index ee67305f822d..282d3a071a67 100644 --- a/drivers/perf/marvell_cn10k_tad_pmu.c +++ b/drivers/perf/marvell_cn10k_tad_pmu.c @@ -146,12 +146,12 @@ static int tad_pmu_event_init(struct perf_event *event) { struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); - if (!event->attr.disabled) - return -EINVAL; - if (event->attr.type != event->pmu->type) return -ENOENT; + if (!event->attr.disabled) + return -EINVAL; + if (event->state != PERF_EVENT_STATE_OFF) return -EINVAL; From 3630b2a86390b4be907d8685b2fddee0dd73a835 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 10 May 2022 22:23:08 +0100 Subject: [PATCH 103/145] perf/arm-cmn: Fix filter_sel lookup Carefully considering the bounds of an array is all well and good, until you forget that that array also contains a NULL sentinel at the end and dereference it. So close... Reported-by: Qian Cai Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/bebba768156aa3c0757140457bdd0fec10819388.1652217788.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- drivers/perf/arm-cmn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 62f3842d1a47..a8421a43740c 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -1514,7 +1514,7 @@ static enum cmn_filter_select arm_cmn_filter_sel(enum cmn_model model, struct arm_cmn_event_attr *e; int i; - for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs); i++) { + for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) { e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr); if (e->model & model && e->type == type && e->eventid == eventid) return e->fsel; From 710c8d6c026c0bbbd5d9036ef210e263edeb07d3 Mon Sep 17 00:00:00 2001 From: Linu Cherian Date: Mon, 9 May 2022 10:02:21 +0530 Subject: [PATCH 104/145] arm64: Declare non global symbols as static Fix below sparse warnings introduced while adding errata. arch/arm64/kernel/cpu_errata.c:218:25: sparse: warning: symbol 'cavium_erratum_23154_cpus' was not declared. Should it be static? Reported-by: kernel test robot Signed-off-by: Linu Cherian Acked-by: Will Deacon Link: https://lore.kernel.org/r/20220509043221.16361-1-lcherian@marvell.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpu_errata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 4c9b5b4b7a0b..49f4863c6c56 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -215,7 +215,7 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { #endif #ifdef CONFIG_CAVIUM_ERRATUM_23154 -const struct midr_range cavium_erratum_23154_cpus[] = { +static const struct midr_range cavium_erratum_23154_cpus[] = { MIDR_ALL_VERSIONS(MIDR_THUNDERX), MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX), MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX), From fb396bb459c1fa3920dd8a9d84680398c65fed75 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 10 May 2022 10:09:30 +0530 Subject: [PATCH 105/145] arm64/hugetlb: Drop TLB flush from get_clear_flush() This drops now redundant TLB flush in get_clear_flush() which is no longer required after recent commit 697a1d44af8b ("tlb: hugetlb: Add more sizes to tlb_remove_huge_tlb_entry"). It also renames this function i.e dropping off '_flush' and replacing it with '__contig' as appropriate. Cc: Will Deacon Cc: Mike Kravetz Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220510043930.2410985-1-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/hugetlbpage.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index cbace1c9e137..749435b01a89 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -166,15 +166,14 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize) * * This helper performs the break step. */ -static pte_t get_clear_flush(struct mm_struct *mm, +static pte_t get_clear_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pgsize, unsigned long ncontig) { pte_t orig_pte = huge_ptep_get(ptep); - bool valid = pte_valid(orig_pte); - unsigned long i, saddr = addr; + unsigned long i; for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { pte_t pte = ptep_get_and_clear(mm, addr, ptep); @@ -190,11 +189,6 @@ static pte_t get_clear_flush(struct mm_struct *mm, if (pte_young(pte)) orig_pte = pte_mkyoung(orig_pte); } - - if (valid) { - struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); - flush_tlb_range(&vma, saddr, addr); - } return orig_pte; } @@ -392,7 +386,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, ncontig = find_num_contig(mm, addr, ptep, &pgsize); - return get_clear_flush(mm, addr, ptep, pgsize, ncontig); + return get_clear_contig(mm, addr, ptep, pgsize, ncontig); } /* @@ -443,7 +437,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, if (!__cont_access_flags_changed(ptep, pte, ncontig)) return 0; - orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); + orig_pte = get_clear_contig(vma->vm_mm, addr, ptep, pgsize, ncontig); /* Make sure we don't lose the dirty or young state */ if (pte_dirty(orig_pte)) @@ -476,7 +470,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, ncontig = find_num_contig(mm, addr, ptep, &pgsize); dpfn = pgsize >> PAGE_SHIFT; - pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig); + pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig); pte = pte_wrprotect(pte); hugeprot = pte_pgprot(pte); From c5781212985a76ae610d18429388f9ec6ee3f77b Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 11 May 2022 14:12:53 +0100 Subject: [PATCH 106/145] perf/arm-cmn: Decode CAL devices properly in debugfs The debugfs code is lazy, and since it only keeps the bottom byte of each connect_info register to save space, it also treats the whole thing as the device_type since the other bits were reserved anyway. Upon closer inspection, though, this is no longer true on newer IP versions, so let's be good and decode the exact field properly. This should help it not get confused when a Component Aggregation Layer is present (which is already implied if Node IDs are found for both device addresses represented by the next two lines of the table). Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/6a13a6128a28cfe2eec6d09cf372a167ec9c3b65.1652274773.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- drivers/perf/arm-cmn.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index a8421a43740c..80d8309652a4 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -63,6 +63,7 @@ #define CMN_MXP__CONNECT_INFO_P3 0x0030 #define CMN_MXP__CONNECT_INFO_P4 0x0038 #define CMN_MXP__CONNECT_INFO_P5 0x0040 +#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0) /* PMU registers occupy the 3rd 4KB page of each node's region */ #define CMN_PMU_OFFSET 0x2000 @@ -400,7 +401,7 @@ static struct dentry *arm_cmn_debugfs; #ifdef CONFIG_DEBUG_FS static const char *arm_cmn_device_type(u8 type) { - switch(type) { + switch(FIELD_GET(CMN__CONNECT_INFO_DEVICE_TYPE, type)) { case 0x00: return " |"; case 0x01: return " RN-I |"; case 0x02: return " RN-D |"; From 82bf59002e0f84e51c16589080c2feba6e6ec78a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 May 2022 18:41:17 +0100 Subject: [PATCH 107/145] arm64/sysreg: improve comment for regs without fields Currently for registers without fields we create a comment pointing at the common definitions, e.g. | #define REG_TTBR0_EL1 S3_0_C2_C0_0 | #define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) | #define SYS_TTBR0_EL1_Op0 3 | #define SYS_TTBR0_EL1_Op1 0 | #define SYS_TTBR0_EL1_CRn 2 | #define SYS_TTBR0_EL1_CRm 0 | #define SYS_TTBR0_EL1_Op2 0 | | /* See TTBRx_EL1 */ It would be slightly nicer if the comment said what we should be looking for, e.g. | #define REG_TTBR0_EL1 S3_0_C2_C0_0 | #define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) | #define SYS_TTBR0_EL1_Op0 3 | #define SYS_TTBR0_EL1_Op1 0 | #define SYS_TTBR0_EL1_CRn 2 | #define SYS_TTBR0_EL1_CRm 0 | #define SYS_TTBR0_EL1_Op2 0 | | /* For TTBR0_EL1 fields see TTBRx_EL1 */ Update the comment generation accordingly. Signed-off-by: Mark Rutland Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20220513174118.266966-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/tools/gen-sysreg.awk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk index 3ffd77cbb499..f41feb87d0ca 100755 --- a/arch/arm64/tools/gen-sysreg.awk +++ b/arch/arm64/tools/gen-sysreg.awk @@ -184,7 +184,7 @@ END { if (next_bit != 63) fatal("Some fields already defined for " reg) - print "/* See " $2 " */" + print "/* For " reg " fields see " $2 " */" print "" next_bit = 0 From 5005d1dbbb3828078f32dff24b77866502e45e93 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 May 2022 18:41:18 +0100 Subject: [PATCH 108/145] arm64/sysreg: fix odd line spacing Between the header and the definitions, there's no line gap, and in a couple of places a double line gap for no semantic reason, which makes the output look a little odd. Fix this so blocks are consistently separated with a single line gap: * Add a newline after the "Generated file" comment line, so this is clearly split from whatever the first definition in the file is. * At the start of a SysregFields block there's no need for a newline as we haven't output any sysreg encoding details prior to this. * At the end of a Sysreg block there's no need for a newline if we have no RES0 or RES1 fields, as there will be a line gap after the previous element (e.g. a Fields line). There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20220513174118.266966-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/tools/gen-sysreg.awk | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk index f41feb87d0ca..4aa7ff8ce707 100755 --- a/arch/arm64/tools/gen-sysreg.awk +++ b/arch/arm64/tools/gen-sysreg.awk @@ -77,6 +77,7 @@ BEGIN { print "#define __ASM_SYSREG_DEFS_H" print "" print "/* Generated file - do not edit */" + print "" block = "None" } @@ -98,8 +99,6 @@ END { res0 = "UL(0)" res1 = "UL(0)" - print "" - next_bit = 63 next @@ -162,7 +161,8 @@ END { define(reg "_RES0", "(" res0 ")") if (res1 != null) define(reg "_RES1", "(" res1 ")") - print "" + if (res0 != null || res1 != null) + print "" reg = null op0 = null From 9a5681710740e496ee8b08004ddf2c212b76b36a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:45:16 +0100 Subject: [PATCH 109/145] selftests/arm64: Log errors in verify_mte_pointer_validity() When we detect a problem in verify_mte_pointer_validity() while checking tags we don't log what the problem was which makes debugging harder. Add some diagnostics. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220510164520.768783-2-broonie@kernel.org Signed-off-by: Catalin Marinas --- .../selftests/arm64/mte/check_tags_inclusion.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c index deaef1f61076..b906914997ce 100644 --- a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c +++ b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c @@ -25,8 +25,11 @@ static int verify_mte_pointer_validity(char *ptr, int mode) /* Check the validity of the tagged pointer */ memset((void *)ptr, '1', BUFFER_SIZE); mte_wait_after_trig(); - if (cur_mte_cxt.fault_valid) + if (cur_mte_cxt.fault_valid) { + ksft_print_msg("Unexpected fault recorded for %p-%p in mode %x\n", + ptr, ptr + BUFFER_SIZE, mode); return KSFT_FAIL; + } /* Proceed further for nonzero tags */ if (!MT_FETCH_TAG((uintptr_t)ptr)) return KSFT_PASS; @@ -34,10 +37,13 @@ static int verify_mte_pointer_validity(char *ptr, int mode) /* Check the validity outside the range */ ptr[BUFFER_SIZE] = '2'; mte_wait_after_trig(); - if (!cur_mte_cxt.fault_valid) + if (!cur_mte_cxt.fault_valid) { + ksft_print_msg("No valid fault recorded for %p in mode %x\n", + ptr, mode); return KSFT_FAIL; - else + } else { return KSFT_PASS; + } } static int check_single_included_tags(int mem_type, int mode) From ffc8274c21938b30b10fcd6d4fc0feb29c222955 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:45:17 +0100 Subject: [PATCH 110/145] selftests/arm64: Allow zero tags in mte_switch_mode() mte_switch_mode() currently rejects attempts to set a zero tag however there are tests such as check_tags_inclusion which attempt to cover cases with zero tags using mte_switch_mode(). Since it is not clear why we are rejecting zero tags change the test to accept them. The issue has not previously been as apparent as it should be since the return value of mte_switch_mode() was not always checked in the callers and the tests weren't otherwise failing. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220510164520.768783-3-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/mte/mte_common_util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c index 260206f4dce0..6ff4c4bcbff1 100644 --- a/tools/testing/selftests/arm64/mte/mte_common_util.c +++ b/tools/testing/selftests/arm64/mte/mte_common_util.c @@ -283,7 +283,7 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask) return -EINVAL; } - if (!(incl_mask <= MTE_ALLOW_NON_ZERO_TAG)) { + if (incl_mask & ~MT_INCLUDE_TAG_MASK) { ksft_print_msg("FAIL: Invalid incl_mask %lx\n", incl_mask); return -EINVAL; } From 72d6771cb1734a2f32308c34c61043595e4bcb41 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:45:18 +0100 Subject: [PATCH 111/145] selftests/arm64: Check failures to set tags in check_tags_inclusion The MTE check_tags_inclusion test uses the mte_switch_mode() helper but ignores the return values it generates meaning we might not be testing the things we're trying to test, fail the test if it reports an error. The helper will log any errors it returns. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220510164520.768783-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- .../selftests/arm64/mte/check_tags_inclusion.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c index b906914997ce..d180ba3df990 100644 --- a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c +++ b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c @@ -49,7 +49,7 @@ static int verify_mte_pointer_validity(char *ptr, int mode) static int check_single_included_tags(int mem_type, int mode) { char *ptr; - int tag, run, result = KSFT_PASS; + int tag, run, ret, result = KSFT_PASS; ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE, @@ -57,7 +57,9 @@ static int check_single_included_tags(int mem_type, int mode) return KSFT_FAIL; for (tag = 0; (tag < MT_TAG_COUNT) && (result == KSFT_PASS); tag++) { - mte_switch_mode(mode, MT_INCLUDE_VALID_TAG(tag)); + ret = mte_switch_mode(mode, MT_INCLUDE_VALID_TAG(tag)); + if (ret != 0) + result = KSFT_FAIL; /* Try to catch a excluded tag by a number of tries. */ for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE); @@ -111,14 +113,16 @@ static int check_multiple_included_tags(int mem_type, int mode) static int check_all_included_tags(int mem_type, int mode) { char *ptr; - int run, result = KSFT_PASS; + int run, ret, result = KSFT_PASS; ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, false) != KSFT_PASS) return KSFT_FAIL; - mte_switch_mode(mode, MT_INCLUDE_TAG_MASK); + ret = mte_switch_mode(mode, MT_INCLUDE_TAG_MASK); + if (ret != 0) + return KSFT_FAIL; /* Try to catch a excluded tag by a number of tries. */ for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE); @@ -135,13 +139,15 @@ static int check_all_included_tags(int mem_type, int mode) static int check_none_included_tags(int mem_type, int mode) { char *ptr; - int run; + int run, ret; ptr = (char *)mte_allocate_memory(BUFFER_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE, mem_type, false) != KSFT_PASS) return KSFT_FAIL; - mte_switch_mode(mode, MT_EXCLUDE_TAG_MASK); + ret = mte_switch_mode(mode, MT_EXCLUDE_TAG_MASK); + if (ret != 0) + return KSFT_FAIL; /* Try to catch a excluded tag by a number of tries. */ for (run = 0; run < RUNS; run++) { ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE); From 541235dee01140c4ae9e71e8dfbdb4c2f9eac9d5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:45:19 +0100 Subject: [PATCH 112/145] selftests/arm64: Remove casts to/from void in check_tags_inclusion Void pointers may be freely used with other pointer types in C, any casts between void * and other pointer types serve no purpose other than to mask potential warnings. Drop such casts from check_tags_inclusion to help with future review of the code. Signed-off-by: Mark Brown Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220510164520.768783-5-broonie@kernel.org Signed-off-by: Catalin Marinas --- .../arm64/mte/check_tags_inclusion.c | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c index d180ba3df990..2b1425b92b69 100644 --- a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c +++ b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c @@ -23,7 +23,7 @@ static int verify_mte_pointer_validity(char *ptr, int mode) { mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE); /* Check the validity of the tagged pointer */ - memset((void *)ptr, '1', BUFFER_SIZE); + memset(ptr, '1', BUFFER_SIZE); mte_wait_after_trig(); if (cur_mte_cxt.fault_valid) { ksft_print_msg("Unexpected fault recorded for %p-%p in mode %x\n", @@ -51,7 +51,7 @@ static int check_single_included_tags(int mem_type, int mode) char *ptr; int tag, run, ret, result = KSFT_PASS; - ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); + ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, false) != KSFT_PASS) return KSFT_FAIL; @@ -62,7 +62,7 @@ static int check_single_included_tags(int mem_type, int mode) result = KSFT_FAIL; /* Try to catch a excluded tag by a number of tries. */ for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { - ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE); + ptr = mte_insert_tags(ptr, BUFFER_SIZE); /* Check tag value */ if (MT_FETCH_TAG((uintptr_t)ptr) == tag) { ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n", @@ -74,7 +74,7 @@ static int check_single_included_tags(int mem_type, int mode) result = verify_mte_pointer_validity(ptr, mode); } } - mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); + mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); return result; } @@ -84,7 +84,7 @@ static int check_multiple_included_tags(int mem_type, int mode) int tag, run, result = KSFT_PASS; unsigned long excl_mask = 0; - ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); + ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, false) != KSFT_PASS) return KSFT_FAIL; @@ -94,7 +94,7 @@ static int check_multiple_included_tags(int mem_type, int mode) mte_switch_mode(mode, MT_INCLUDE_VALID_TAGS(excl_mask)); /* Try to catch a excluded tag by a number of tries. */ for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { - ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE); + ptr = mte_insert_tags(ptr, BUFFER_SIZE); /* Check tag value */ if (MT_FETCH_TAG((uintptr_t)ptr) < tag) { ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n", @@ -106,7 +106,7 @@ static int check_multiple_included_tags(int mem_type, int mode) result = verify_mte_pointer_validity(ptr, mode); } } - mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); + mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); return result; } @@ -115,7 +115,7 @@ static int check_all_included_tags(int mem_type, int mode) char *ptr; int run, ret, result = KSFT_PASS; - ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); + ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, false) != KSFT_PASS) return KSFT_FAIL; @@ -132,7 +132,7 @@ static int check_all_included_tags(int mem_type, int mode) */ result = verify_mte_pointer_validity(ptr, mode); } - mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); + mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE); return result; } @@ -141,7 +141,7 @@ static int check_none_included_tags(int mem_type, int mode) char *ptr; int run, ret; - ptr = (char *)mte_allocate_memory(BUFFER_SIZE, mem_type, 0, false); + ptr = mte_allocate_memory(BUFFER_SIZE, mem_type, 0, false); if (check_allocated_memory(ptr, BUFFER_SIZE, mem_type, false) != KSFT_PASS) return KSFT_FAIL; @@ -159,12 +159,12 @@ static int check_none_included_tags(int mem_type, int mode) } mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE); /* Check the write validity of the untagged pointer */ - memset((void *)ptr, '1', BUFFER_SIZE); + memset(ptr, '1', BUFFER_SIZE); mte_wait_after_trig(); if (cur_mte_cxt.fault_valid) break; } - mte_free_memory((void *)ptr, BUFFER_SIZE, mem_type, false); + mte_free_memory(ptr, BUFFER_SIZE, mem_type, false); if (cur_mte_cxt.fault_valid) return KSFT_FAIL; else From 0639e02254e6863fc9c96666be45919437a6dc2e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:45:20 +0100 Subject: [PATCH 113/145] selftests/arm64: Use switch statements in mte_common_util.c In the MTE tests there are several places where we use chains of if statements to open code what could be written as switch statements, move over to switch statements to make the idiom clearer. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220510164520.768783-6-broonie@kernel.org Signed-off-by: Catalin Marinas --- .../selftests/arm64/mte/mte_common_util.c | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c index 6ff4c4bcbff1..00ffd34c66d3 100644 --- a/tools/testing/selftests/arm64/mte/mte_common_util.c +++ b/tools/testing/selftests/arm64/mte/mte_common_util.c @@ -128,13 +128,16 @@ static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping, int prot_flag, map_flag; size_t entire_size = size + range_before + range_after; - if (mem_type != USE_MALLOC && mem_type != USE_MMAP && - mem_type != USE_MPROTECT) { + switch (mem_type) { + case USE_MALLOC: + return malloc(entire_size) + range_before; + case USE_MMAP: + case USE_MPROTECT: + break; + default: ksft_print_msg("FAIL: Invalid allocate request\n"); return NULL; } - if (mem_type == USE_MALLOC) - return malloc(entire_size) + range_before; prot_flag = PROT_READ | PROT_WRITE; if (mem_type == USE_MMAP) @@ -287,13 +290,19 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask) ksft_print_msg("FAIL: Invalid incl_mask %lx\n", incl_mask); return -EINVAL; } + en = PR_TAGGED_ADDR_ENABLE; - if (mte_option == MTE_SYNC_ERR) + switch (mte_option) { + case MTE_SYNC_ERR: en |= PR_MTE_TCF_SYNC; - else if (mte_option == MTE_ASYNC_ERR) + break; + case MTE_ASYNC_ERR: en |= PR_MTE_TCF_ASYNC; - else if (mte_option == MTE_NONE_ERR) + break; + case MTE_NONE_ERR: en |= PR_MTE_TCF_NONE; + break; + } en |= (incl_mask << PR_MTE_TAG_SHIFT); /* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */ From d7a49291d786b4400996afe3afcc3ef5eeb6f0ef Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 11 May 2022 18:21:29 +0100 Subject: [PATCH 114/145] kselftest/arm64: bti: force static linking The "bti" selftests are built with -nostdlib, which apparently automatically creates a statically linked binary, which is what we want and need for BTI (to avoid interactions with the dynamic linker). However this is not true when building a PIE binary, which some toolchains (Ubuntu) configure as the default. When compiling btitest with such a toolchain, it will create a dynamically linked binary, which will probably fail some tests, as the dynamic linker might not support BTI: =================== TAP version 13 1..18 not ok 1 nohint_func/call_using_br_x0 not ok 2 nohint_func/call_using_br_x16 not ok 3 nohint_func/call_using_blr .... =================== To make sure we create static binaries, add an explicit -static on the linker command line. This forces static linking even if the toolchain defaults to PIE builds, and fixes btitest runs on BTI enabled machines. Signed-off-by: Andre Przywara Reviewed-by: Mark Brown Fixes: 314bcbf09f14 ("kselftest: arm64: Add BTI tests") Link: https://lore.kernel.org/r/20220511172129.2078337-1-andre.przywara@arm.com Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/bti/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/arm64/bti/Makefile b/tools/testing/selftests/arm64/bti/Makefile index 73e013c082a6..dafa1c2aa5c4 100644 --- a/tools/testing/selftests/arm64/bti/Makefile +++ b/tools/testing/selftests/arm64/bti/Makefile @@ -39,7 +39,7 @@ BTI_OBJS = \ teststubs-bti.o \ trampoline-bti.o gen/btitest: $(BTI_OBJS) - $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -o $@ $^ + $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -static -o $@ $^ NOBTI_OBJS = \ test-nobti.o \ @@ -50,7 +50,7 @@ NOBTI_OBJS = \ teststubs-nobti.o \ trampoline-nobti.o gen/nobtitest: $(NOBTI_OBJS) - $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -o $@ $^ + $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -static -o $@ $^ # Including KSFT lib.mk here will also mangle the TEST_GEN_PROGS list # to account for any OUTPUT target-dirs optionally provided by From b4d6bb38f9dc7cd8ccb117c247a41ec12578929c Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 20 Apr 2022 03:04:16 +0000 Subject: [PATCH 115/145] arm64: mte: Clean up user tag accessors Invoking user_ldst to explicitly add a post-increment of 0 is silly. Just use a normal USER() annotation and save the redundant instruction. Signed-off-by: Robin Murphy Reviewed-by: Tong Tiangen Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220420030418.3189040-6-tongtiangen@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/lib/mte.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index 8590af3c98c0..eeb9e45bcce8 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -93,7 +93,7 @@ SYM_FUNC_START(mte_copy_tags_from_user) mov x3, x1 cbz x2, 2f 1: - user_ldst 2f, ldtrb, w4, x1, 0 +USER(2f, ldtrb w4, [x1]) lsl x4, x4, #MTE_TAG_SHIFT stg x4, [x0], #MTE_GRANULE_SIZE add x1, x1, #1 @@ -120,7 +120,7 @@ SYM_FUNC_START(mte_copy_tags_to_user) 1: ldg x4, [x1] ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE - user_ldst 2f, sttrb, w4, x0, 0 +USER(2f, sttrb w4, [x0]) add x0, x0, #1 add x1, x1, #MTE_GRANULE_SIZE subs x2, x2, #1 From c733812dd77350ba0da18cd6e474e5a2e6461b49 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Thu, 5 May 2022 18:32:07 +0200 Subject: [PATCH 116/145] arm64: mm: Make arch_faults_on_old_pte() check for migratability arch_faults_on_old_pte() relies on the calling context being non-preemptible. CONFIG_PREEMPT_RT turns the PTE lock into a sleepable spinlock, which doesn't disable preemption once acquired, triggering the warning in arch_faults_on_old_pte(). It does however disable migration, ensuring the task remains on the same CPU during the entirety of the critical section, making the read of cpu_has_hw_af() safe and stable. Make arch_faults_on_old_pte() check cant_migrate() instead of preemptible(). Cc: Valentin Schneider Suggested-by: Sebastian Andrzej Siewior Signed-off-by: Valentin Schneider Link: https://lore.kernel.org/r/20220127192437.1192957-1-valentin.schneider@arm.com Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220505163207.85751-4-bigeasy@linutronix.de Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 94e147e5456c..9c0a9bfd6b07 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1001,7 +1001,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, */ static inline bool arch_faults_on_old_pte(void) { - WARN_ON(preemptible()); + /* The register read below requires a stable CPU to make any sense */ + cant_migrate(); return !cpu_has_hw_af(); } From a1259dd807192917eb98603f8bc7b43f70cea5b9 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 5 May 2022 18:32:05 +0200 Subject: [PATCH 117/145] arm64/sve: Delay freeing memory in fpsimd_flush_thread() fpsimd_flush_thread() invokes kfree() via sve_free()+sme_free() within a preempt disabled section which is not working on -RT. Delay freeing of memory until preemption is enabled again. Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20220505163207.85751-2-bigeasy@linutronix.de Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 64431bc62472..6e53badff8a6 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1562,6 +1562,9 @@ static void fpsimd_flush_thread_vl(enum vec_type type) void fpsimd_flush_thread(void) { + void *sve_state = NULL; + void *za_state = NULL; + if (!system_supports_fpsimd()) return; @@ -1573,18 +1576,28 @@ void fpsimd_flush_thread(void) if (system_supports_sve()) { clear_thread_flag(TIF_SVE); - sve_free(current); + + /* Defer kfree() while in atomic context */ + sve_state = current->thread.sve_state; + current->thread.sve_state = NULL; + fpsimd_flush_thread_vl(ARM64_VEC_SVE); } if (system_supports_sme()) { clear_thread_flag(TIF_SME); - sme_free(current); + + /* Defer kfree() while in atomic context */ + za_state = current->thread.za_state; + current->thread.za_state = NULL; + fpsimd_flush_thread_vl(ARM64_VEC_SME); current->thread.svcr = 0; } put_cpu_fpsimd_context(); + kfree(sve_state); + kfree(za_state); } /* From 696207d4258b2ab66dbd1655a7cfb3e978889085 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 5 May 2022 18:32:06 +0200 Subject: [PATCH 118/145] arm64/sve: Make kernel FPU protection RT friendly Non RT kernels need to protect FPU against preemption and bottom half processing. This is achieved by disabling bottom halves via local_bh_disable() which implictly disables preemption. On RT kernels this protection mechanism is not sufficient because local_bh_disable() does not disable preemption. It serializes bottom half related processing via a CPU local lock. As bottom halves are running always in thread context on RT kernels disabling preemption is the proper choice as it implicitly prevents bottom half processing. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Mark Brown Link: https://lore.kernel.org/r/20220505163207.85751-3-bigeasy@linutronix.de Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 6e53badff8a6..a568735b7c2e 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -237,10 +237,19 @@ static void __get_cpu_fpsimd_context(void) * * The double-underscore version must only be called if you know the task * can't be preempted. + * + * On RT kernels local_bh_disable() is not sufficient because it only + * serializes soft interrupt related sections via a local lock, but stays + * preemptible. Disabling preemption is the right choice here as bottom + * half processing is always in thread context on RT kernels so it + * implicitly prevents bottom half processing as well. */ static void get_cpu_fpsimd_context(void) { - local_bh_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_bh_disable(); + else + preempt_disable(); __get_cpu_fpsimd_context(); } @@ -261,7 +270,10 @@ static void __put_cpu_fpsimd_context(void) static void put_cpu_fpsimd_context(void) { __put_cpu_fpsimd_context(); - local_bh_enable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_bh_enable(); + else + preempt_enable(); } static bool have_cpu_fpsimd_context(void) From 9f93c2e0cda49a558c981a57fc4a7f8d143ced93 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 May 2022 19:22:13 +0100 Subject: [PATCH 119/145] kselftest/arm64: Explicitly build no BTI tests with BTI disabled In case a distribution enables branch protection by default do as we do for the main kernel and explicitly disable branch protection when building the test case for having BTI disabled to ensure it doesn't get turned on by the toolchain defaults. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220516182213.727589-1-broonie@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/bti/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/arm64/bti/Makefile b/tools/testing/selftests/arm64/bti/Makefile index dafa1c2aa5c4..ccdac414ad94 100644 --- a/tools/testing/selftests/arm64/bti/Makefile +++ b/tools/testing/selftests/arm64/bti/Makefile @@ -10,7 +10,7 @@ PROGS := $(patsubst %,gen/%,$(TEST_GEN_PROGS)) # cases for statically linked and dynamically lined binaries are # slightly different. -CFLAGS_NOBTI = -DBTI=0 +CFLAGS_NOBTI = -mbranch-protection=none -DBTI=0 CFLAGS_BTI = -mbranch-protection=standard -DBTI=1 CFLAGS_COMMON = -ffreestanding -Wall -Wextra $(CFLAGS) From f171f9e4097d29db88a99ea96bb6c08e819a52a4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:11:57 +0100 Subject: [PATCH 120/145] arm64/fp: Make SVE and SME length register definition match architecture Currently (as of DDI0487H.a) the architecture defines the vector length control field in ZCR and SMCR as being 4 bits wide with an additional 5 bits reserved above it marked as RAZ/WI for future expansion. The kernel currently attempts to anticipate such expansion by treating these extra bits as part of the LEN field but this will be inconvenient when we start generating the defines and would cause problems in the event that the architecture goes a different direction with these fields. Let's instead change the defines to reflect the currently defined architecture, we can update in future as needed. No change in behaviour should be seen in any system, even emulated systems using the maximum allowed vector length for the current architecture. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220510161208.631259-2-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 422741ca5631..4d78b6aeebb4 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1113,26 +1113,16 @@ #define DCZID_DZP_SHIFT 4 #define DCZID_BS_SHIFT 0 -/* - * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which - * are reserved by the SVE architecture for future expansion of the LEN - * field, with compatible semantics. - */ #define ZCR_ELx_LEN_SHIFT 0 -#define ZCR_ELx_LEN_SIZE 9 -#define ZCR_ELx_LEN_MASK 0x1ff +#define ZCR_ELx_LEN_SIZE 4 +#define ZCR_ELx_LEN_MASK 0xf #define SMCR_ELx_FA64_SHIFT 31 #define SMCR_ELx_FA64_MASK (1 << SMCR_ELx_FA64_SHIFT) -/* - * The SMCR_ELx_LEN_* definitions intentionally include bits [8:4] which - * are reserved by the SME architecture for future expansion of the LEN - * field, with compatible semantics. - */ #define SMCR_ELx_LEN_SHIFT 0 -#define SMCR_ELx_LEN_SIZE 9 -#define SMCR_ELx_LEN_MASK 0x1ff +#define SMCR_ELx_LEN_SIZE 4 +#define SMCR_ELx_LEN_MASK 0xf #define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */ #define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */ From 5b06dcfd9e0a5dd63ecadf9169ee92a80b063322 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:11:58 +0100 Subject: [PATCH 121/145] arm64/fp: Rename SVE and SME LEN field name to _WIDTH The SVE and SVE length configuration field LEN have constants specifying their width called _SIZE rather than the more normal _WIDTH, in preparation for automatic generation rename to _WIDTH. No functional change. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220510161208.631259-3-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 4 ++-- arch/arm64/kernel/cpufeature.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 4d78b6aeebb4..b83808ebc58f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1114,14 +1114,14 @@ #define DCZID_BS_SHIFT 0 #define ZCR_ELx_LEN_SHIFT 0 -#define ZCR_ELx_LEN_SIZE 4 +#define ZCR_ELx_LEN_WIDTH 4 #define ZCR_ELx_LEN_MASK 0xf #define SMCR_ELx_FA64_SHIFT 31 #define SMCR_ELx_FA64_MASK (1 << SMCR_ELx_FA64_SHIFT) #define SMCR_ELx_LEN_SHIFT 0 -#define SMCR_ELx_LEN_SIZE 4 +#define SMCR_ELx_LEN_WIDTH 4 #define SMCR_ELx_LEN_MASK 0xf #define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 08689362cd89..665ad380c07f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -577,13 +577,13 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = { static const struct arm64_ftr_bits ftr_zcr[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */ + ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_WIDTH, 0), /* LEN */ ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_smcr[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_SIZE, 0), /* LEN */ + SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_WIDTH, 0), /* LEN */ ARM64_FTR_END, }; From a6dab6cc0f4cd0b341f002ce7d0683701612f527 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:11:59 +0100 Subject: [PATCH 122/145] arm64/sme: Drop SYS_ from SMIDR_EL1 defines We currently have a non-standard SYS_ prefix in the constants generated for SMIDR_EL1 bitfields. Drop this in preparation for automatic register definition generation, no functional change. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220510161208.631259-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/el2_setup.h | 2 +- arch/arm64/include/asm/sysreg.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index fabdbde0fe02..34ceff08cac4 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -171,7 +171,7 @@ msr_s SYS_SMCR_EL2, x1 // length for EL1. mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? - ubfx x1, x1, #SYS_SMIDR_EL1_SMPS_SHIFT, #1 + ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 cbz x1, .Lskip_sme_\@ msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b83808ebc58f..ab2d7cbc63fc 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -467,9 +467,9 @@ #define SYS_SMIDR_EL1 sys_reg(3, 1, 0, 0, 6) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) -#define SYS_SMIDR_EL1_IMPLEMENTER_SHIFT 24 -#define SYS_SMIDR_EL1_SMPS_SHIFT 15 -#define SYS_SMIDR_EL1_AFFINITY_SHIFT 0 +#define SMIDR_EL1_IMPLEMENTER_SHIFT 24 +#define SMIDR_EL1_SMPS_SHIFT 15 +#define SMIDR_EL1_AFFINITY_SHIFT 0 #define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0) From e65fc01bf271cefa6269b7ab1badcf7cddae5d40 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:12:00 +0100 Subject: [PATCH 123/145] arm64/sme: Standardise bitfield names for SVCR The bitfield definitions for SVCR have a SYS_ added to the names of the constant which will be a problem for automatic generation. Remove the prefixes, no functional change. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220510161208.631259-5-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 4 ++-- arch/arm64/include/asm/processor.h | 2 +- arch/arm64/include/asm/sysreg.h | 4 ++-- arch/arm64/kernel/fpsimd.c | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 75caa2098d5b..aa11dbec0d70 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -67,12 +67,12 @@ extern void fpsimd_save_and_flush_cpu_state(void); static inline bool thread_sm_enabled(struct thread_struct *thread) { - return system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_SM_MASK); + return system_supports_sme() && (thread->svcr & SVCR_EL0_SM_MASK); } static inline bool thread_za_enabled(struct thread_struct *thread) { - return system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_ZA_MASK); + return system_supports_sme() && (thread->svcr & SVCR_EL0_ZA_MASK); } /* Maximum VL that SVE/SME VL-agnostic software can transparently support */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 1d2ca4870b84..69ce163d2fb2 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -192,7 +192,7 @@ static inline unsigned int thread_get_sme_vl(struct thread_struct *thread) static inline unsigned int thread_get_cur_vl(struct thread_struct *thread) { - if (system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_SM_MASK)) + if (system_supports_sme() && (thread->svcr & SVCR_EL0_SM_MASK)) return thread_get_sme_vl(thread); else return thread_get_sve_vl(thread); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index ab2d7cbc63fc..4459cd4a37f5 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -480,8 +480,8 @@ #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) #define SYS_SVCR_EL0 sys_reg(3, 3, 4, 2, 2) -#define SYS_SVCR_EL0_ZA_MASK 2 -#define SYS_SVCR_EL0_SM_MASK 1 +#define SVCR_EL0_ZA_MASK 2 +#define SVCR_EL0_SM_MASK 1 #define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0) #define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a568735b7c2e..a5f6d6d9f372 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1918,7 +1918,7 @@ void __efi_fpsimd_begin(void) svcr = read_sysreg_s(SYS_SVCR_EL0); if (!system_supports_fa64()) - ffr = svcr & SYS_SVCR_EL0_SM_MASK; + ffr = svcr & SVCR_EL0_SM_MASK; __this_cpu_write(efi_sm_state, ffr); } @@ -1929,7 +1929,7 @@ void __efi_fpsimd_begin(void) if (system_supports_sme()) sysreg_clear_set_s(SYS_SVCR_EL0, - SYS_SVCR_EL0_SM_MASK, 0); + SVCR_EL0_SM_MASK, 0); } else { fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state)); @@ -1964,7 +1964,7 @@ void __efi_fpsimd_end(void) if (__this_cpu_read(efi_sm_state)) { sysreg_clear_set_s(SYS_SVCR_EL0, 0, - SYS_SVCR_EL0_SM_MASK); + SVCR_EL0_SM_MASK); if (!system_supports_fa64()) ffr = efi_sm_state; } From ec0067a63e5a37de74025d46095cfe7a7af3114a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:12:01 +0100 Subject: [PATCH 124/145] arm64/sme: Remove _EL0 from name of SVCR - FIXME sysreg.h The defines for SVCR call it SVCR_EL0 however the architecture calls the register SVCR with no _EL0 suffix. In preparation for generating the sysreg definitions rename to match the architecture, no functional change. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220510161208.631259-6-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 4 ++-- arch/arm64/include/asm/processor.h | 2 +- arch/arm64/include/asm/sysreg.h | 6 +++--- arch/arm64/kernel/fpsimd.c | 26 +++++++++++++------------- arch/arm64/kernel/ptrace.c | 8 ++++---- arch/arm64/kernel/signal.c | 14 +++++++------- arch/arm64/kernel/syscall.c | 4 ++-- arch/arm64/kvm/fpsimd.c | 4 ++-- arch/arm64/kvm/sys_regs.c | 2 +- 9 files changed, 35 insertions(+), 35 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index aa11dbec0d70..9bb1873f5295 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -67,12 +67,12 @@ extern void fpsimd_save_and_flush_cpu_state(void); static inline bool thread_sm_enabled(struct thread_struct *thread) { - return system_supports_sme() && (thread->svcr & SVCR_EL0_SM_MASK); + return system_supports_sme() && (thread->svcr & SVCR_SM_MASK); } static inline bool thread_za_enabled(struct thread_struct *thread) { - return system_supports_sme() && (thread->svcr & SVCR_EL0_ZA_MASK); + return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK); } /* Maximum VL that SVE/SME VL-agnostic software can transparently support */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 69ce163d2fb2..8de5a4fc06e3 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -192,7 +192,7 @@ static inline unsigned int thread_get_sme_vl(struct thread_struct *thread) static inline unsigned int thread_get_cur_vl(struct thread_struct *thread) { - if (system_supports_sme() && (thread->svcr & SVCR_EL0_SM_MASK)) + if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK)) return thread_get_sme_vl(thread); else return thread_get_sve_vl(thread); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 4459cd4a37f5..a2f0759f65b2 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -479,9 +479,9 @@ #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) -#define SYS_SVCR_EL0 sys_reg(3, 3, 4, 2, 2) -#define SVCR_EL0_ZA_MASK 2 -#define SVCR_EL0_SM_MASK 1 +#define SYS_SVCR sys_reg(3, 3, 4, 2, 2) +#define SVCR_ZA_MASK 2 +#define SVCR_SM_MASK 1 #define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0) #define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a5f6d6d9f372..759d40cac1fe 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -410,7 +410,7 @@ static void task_fpsimd_load(void) if (test_thread_flag(TIF_SME)) sme_set_vq(sve_vq_from_vl(sme_vl) - 1); - write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0); + write_sysreg_s(current->thread.svcr, SYS_SVCR); if (thread_za_enabled(¤t->thread)) za_load_state(current->thread.za_state); @@ -462,15 +462,15 @@ static void fpsimd_save(void) if (system_supports_sme()) { u64 *svcr = last->svcr; - *svcr = read_sysreg_s(SYS_SVCR_EL0); + *svcr = read_sysreg_s(SYS_SVCR); - *svcr = read_sysreg_s(SYS_SVCR_EL0); + *svcr = read_sysreg_s(SYS_SVCR); - if (*svcr & SYS_SVCR_EL0_ZA_MASK) + if (*svcr & SVCR_ZA_MASK) za_save_state(last->za_state); /* If we are in streaming mode override regular SVE. */ - if (*svcr & SYS_SVCR_EL0_SM_MASK) { + if (*svcr & SVCR_SM_MASK) { save_sve_regs = true; save_ffr = system_supports_fa64(); vl = last->sme_vl; @@ -852,8 +852,8 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, sve_to_fpsimd(task); if (system_supports_sme() && type == ARM64_VEC_SME) { - task->thread.svcr &= ~(SYS_SVCR_EL0_SM_MASK | - SYS_SVCR_EL0_ZA_MASK); + task->thread.svcr &= ~(SVCR_SM_MASK | + SVCR_ZA_MASK); clear_thread_flag(TIF_SME); } @@ -1915,10 +1915,10 @@ void __efi_fpsimd_begin(void) __this_cpu_write(efi_sve_state_used, true); if (system_supports_sme()) { - svcr = read_sysreg_s(SYS_SVCR_EL0); + svcr = read_sysreg_s(SYS_SVCR); if (!system_supports_fa64()) - ffr = svcr & SVCR_EL0_SM_MASK; + ffr = svcr & SVCR_SM_MASK; __this_cpu_write(efi_sm_state, ffr); } @@ -1928,8 +1928,8 @@ void __efi_fpsimd_begin(void) ffr); if (system_supports_sme()) - sysreg_clear_set_s(SYS_SVCR_EL0, - SVCR_EL0_SM_MASK, 0); + sysreg_clear_set_s(SYS_SVCR, + SVCR_SM_MASK, 0); } else { fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state)); @@ -1962,9 +1962,9 @@ void __efi_fpsimd_end(void) */ if (system_supports_sme()) { if (__this_cpu_read(efi_sm_state)) { - sysreg_clear_set_s(SYS_SVCR_EL0, + sysreg_clear_set_s(SYS_SVCR, 0, - SVCR_EL0_SM_MASK); + SVCR_SM_MASK); if (!system_supports_fa64()) ffr = efi_sm_state; } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 60ebc3060cf1..21da83187a60 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -867,10 +867,10 @@ static int sve_set_common(struct task_struct *target, switch (type) { case ARM64_VEC_SVE: - target->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK; + target->thread.svcr &= ~SVCR_SM_MASK; break; case ARM64_VEC_SME: - target->thread.svcr |= SYS_SVCR_EL0_SM_MASK; + target->thread.svcr |= SVCR_SM_MASK; break; default: WARN_ON_ONCE(1); @@ -1100,7 +1100,7 @@ static int za_set(struct task_struct *target, /* If there is no data then disable ZA */ if (!count) { - target->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + target->thread.svcr &= ~SVCR_ZA_MASK; goto out; } @@ -1125,7 +1125,7 @@ static int za_set(struct task_struct *target, /* Mark ZA as active and let userspace use it */ set_tsk_thread_flag(target, TIF_SME); - target->thread.svcr |= SYS_SVCR_EL0_ZA_MASK; + target->thread.svcr |= SVCR_ZA_MASK; out: fpsimd_flush_task_state(target); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 2295948d97fd..18bf590dc1c7 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -288,7 +288,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) if (sve.head.size <= sizeof(*user->sve)) { clear_thread_flag(TIF_SVE); - current->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK; + current->thread.svcr &= ~SVCR_SM_MASK; goto fpsimd_only; } @@ -321,7 +321,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) return -EFAULT; if (sve.flags & SVE_SIG_FLAG_SM) - current->thread.svcr |= SYS_SVCR_EL0_SM_MASK; + current->thread.svcr |= SVCR_SM_MASK; else set_thread_flag(TIF_SVE); @@ -398,7 +398,7 @@ static int restore_za_context(struct user_ctxs __user *user) return -EINVAL; if (za.head.size <= sizeof(*user->za)) { - current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + current->thread.svcr &= ~SVCR_ZA_MASK; return 0; } @@ -419,7 +419,7 @@ static int restore_za_context(struct user_ctxs __user *user) sme_alloc(current); if (!current->thread.za_state) { - current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + current->thread.svcr &= ~SVCR_ZA_MASK; clear_thread_flag(TIF_SME); return -ENOMEM; } @@ -432,7 +432,7 @@ static int restore_za_context(struct user_ctxs __user *user) return -EFAULT; set_thread_flag(TIF_SME); - current->thread.svcr |= SYS_SVCR_EL0_ZA_MASK; + current->thread.svcr |= SVCR_ZA_MASK; return 0; } @@ -922,8 +922,8 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, /* Signal handlers are invoked with ZA and streaming mode disabled */ if (system_supports_sme()) { - current->thread.svcr &= ~(SYS_SVCR_EL0_ZA_MASK | - SYS_SVCR_EL0_SM_MASK); + current->thread.svcr &= ~(SVCR_ZA_MASK | + SVCR_SM_MASK); sme_smstop(); } diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 92c69e5ac269..733451fe7e41 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -174,9 +174,9 @@ static inline void fp_user_discard(void) * need updating. */ if (system_supports_sme() && test_thread_flag(TIF_SME)) { - u64 svcr = read_sysreg_s(SYS_SVCR_EL0); + u64 svcr = read_sysreg_s(SYS_SVCR); - if (svcr & SYS_SVCR_EL0_SM_MASK) + if (svcr & SVCR_SM_MASK) sme_smstop_sm(); } diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 441edb9c398c..3d251a4d2cf7 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -96,8 +96,8 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED; - if (read_sysreg_s(SYS_SVCR_EL0) & - (SYS_SVCR_EL0_SM_MASK | SYS_SVCR_EL0_ZA_MASK)) { + if (read_sysreg_s(SYS_SVCR) & + (SVCR_SM_MASK | SVCR_ZA_MASK)) { vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; fpsimd_save_and_flush_cpu_state(); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 689e53dd4cb1..1cf01c022b30 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1685,7 +1685,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SMIDR_EL1), undef_access }, { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, { SYS_DESC(SYS_CTR_EL0), access_ctr }, - { SYS_DESC(SYS_SVCR_EL0), undef_access }, + { SYS_DESC(SYS_SVCR), undef_access }, { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, .reg = PMCR_EL0 }, From 9e2c0819ac853d94c927d5d2f59e2ca2b48500b4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:12:02 +0100 Subject: [PATCH 125/145] arm64/sysreg: Support generation of RAZ fields Add a statement for RAZ bitfields to the automatic register generation script. Nothing is emitted to the header for these fields. Signed-off-by: Mark Brown Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220510161208.631259-7-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/gen-sysreg.awk | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk index 4aa7ff8ce707..89bfb74e28de 100755 --- a/arch/arm64/tools/gen-sysreg.awk +++ b/arch/arm64/tools/gen-sysreg.awk @@ -226,6 +226,13 @@ END { next } +/^Raz/ && (block == "Sysreg" || block == "SysregFields") { + expect_fields(2) + parse_bitdef(reg, field, $2) + + next +} + /^Enum/ { change_block("Enum", "Sysreg", "Enum") expect_fields(3) From 0d1322e7ea755b9de4819aa246ebab924b4cefec Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:12:03 +0100 Subject: [PATCH 126/145] arm64/sme: Automatically generate defines for SMCR Convert SMCR to use the register definition code, no functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220510161208.631259-8-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 10 ---------- arch/arm64/tools/sysreg | 20 ++++++++++++++++++++ 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index a2f0759f65b2..cbf03a1f316e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -216,7 +216,6 @@ #define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) #define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) #define SYS_SMPRI_EL1 sys_reg(3, 0, 1, 2, 4) -#define SYS_SMCR_EL1 sys_reg(3, 0, 1, 2, 6) #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) @@ -571,7 +570,6 @@ #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2) #define SYS_SMPRIMAP_EL2 sys_reg(3, 4, 1, 2, 5) -#define SYS_SMCR_EL2 sys_reg(3, 4, 1, 2, 6) #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) #define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) @@ -631,7 +629,6 @@ #define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) #define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) #define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) -#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6) #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) #define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) @@ -1117,13 +1114,6 @@ #define ZCR_ELx_LEN_WIDTH 4 #define ZCR_ELx_LEN_MASK 0xf -#define SMCR_ELx_FA64_SHIFT 31 -#define SMCR_ELx_FA64_MASK (1 << SMCR_ELx_FA64_SHIFT) - -#define SMCR_ELx_LEN_SHIFT 0 -#define SMCR_ELx_LEN_WIDTH 4 -#define SMCR_ELx_LEN_MASK 0xf - #define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */ #define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */ diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index c5619629bf9c..d0ac57648000 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -185,6 +185,26 @@ Field 1 A Field 0 M EndSysreg +SysregFields SMCR_ELx +Res0 63:32 +Field 31 FA64 +Res0 30:9 +Raz 8:4 +Field 3:0 LEN +EndSysregFields + +Sysreg SMCR_EL1 3 0 1 2 6 +Fields SMCR_ELx +EndSysreg + +Sysreg SMCR_EL2 3 4 1 2 6 +Fields SMCR_ELx +EndSysreg + +Sysreg SMCR_EL12 3 5 1 2 6 +Fields SMCR_ELx +EndSysreg + SysregFields TTBRx_EL1 Field 63:48 ASID Field 47:1 BADDR From c37b8700b7234c91e38c3a6c8dcddb6bffdfb218 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:12:04 +0100 Subject: [PATCH 127/145] arm64/sme: Automatically generate SMIDR_EL1 defines Automatically generate the defines for SMIDR_EL1, no functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220510161208.631259-9-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/tools/sysreg | 9 +++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index cbf03a1f316e..ce08a42637bc 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -463,7 +463,6 @@ #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) #define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1) #define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4) -#define SYS_SMIDR_EL1 sys_reg(3, 1, 0, 0, 6) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) #define SMIDR_EL1_IMPLEMENTER_SHIFT 24 diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index d0ac57648000..1bf88ca3da5b 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -197,6 +197,15 @@ Sysreg SMCR_EL1 3 0 1 2 6 Fields SMCR_ELx EndSysreg +Sysreg SMIDR_EL1 3 1 0 0 6 +Res0 63:32 +Field 31:24 IMPLEMENTER +Field 23:16 REVISION +Field 15 SMPS +Res0 14:12 +Field 11:0 AFFINITY +EndSysreg + Sysreg SMCR_EL2 3 4 1 2 6 Fields SMCR_ELx EndSysreg From 8e053810e6ce90bd45f97370708b7803f6957651 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:12:05 +0100 Subject: [PATCH 128/145] arm64/sme: Automatically generate SMPRIMAP_EL2 definitions No functional change should be seen from converting SMPRIMAP_EL2 to be generated. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220510161208.631259-10-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/tools/sysreg | 19 +++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index ce08a42637bc..2a9468d449fa 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -568,7 +568,6 @@ #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2) -#define SYS_SMPRIMAP_EL2 sys_reg(3, 4, 1, 2, 5) #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) #define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 1bf88ca3da5b..2cdcdac0465e 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -206,6 +206,25 @@ Res0 14:12 Field 11:0 AFFINITY EndSysreg +Sysreg SMPRIMAP_EL2 3 4 1 2 5 +Field 63:60 P15 +Field 59:56 P14 +Field 55:52 P13 +Field 51:48 P12 +Field 47:44 P11 +Field 43:40 P10 +Field 39:36 F9 +Field 35:32 P8 +Field 31:28 P7 +Field 27:24 P6 +Field 23:20 P5 +Field 19:16 P4 +Field 15:12 P3 +Field 11:8 P2 +Field 7:4 P1 +Field 3:0 P0 +EndSysreg + Sysreg SMCR_EL2 3 4 1 2 6 Fields SMCR_ELx EndSysreg From 9321f0492b89c8d8286d8c5e06f45c984a8221a4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:12:06 +0100 Subject: [PATCH 129/145] arm64/sme: Generate SMPRI_EL1 definitions Convert SMPRI_EL1 to be generated. No functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220510161208.631259-11-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 3 --- arch/arm64/tools/sysreg | 5 +++++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 2a9468d449fa..b4affc3fd569 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -215,7 +215,6 @@ #define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) #define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) -#define SYS_SMPRI_EL1 sys_reg(3, 0, 1, 2, 4) #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) @@ -406,8 +405,6 @@ #define TRBIDR_ALIGN_MASK GENMASK(3, 0) #define TRBIDR_ALIGN_SHIFT 0 -#define SMPRI_EL1_PRIORITY_MASK 0xf - #define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) #define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 2cdcdac0465e..d29bc429f504 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -185,6 +185,11 @@ Field 1 A Field 0 M EndSysreg +Sysreg SMPRI_EL1 3 0 1 2 4 +Res0 63:4 +Field 3:0 PRIORITY +EndSysreg + SysregFields SMCR_ELx Res0 63:32 Field 31 FA64 From 11e12a91c118780b76ecae3610efd49b7ff7d39e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:12:07 +0100 Subject: [PATCH 130/145] arm64/sme: Generate defintions for SVCR Convert SVCR to automatic generation, no functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220510161208.631259-12-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 4 ---- arch/arm64/tools/sysreg | 6 ++++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b4affc3fd569..804b5326c393 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -474,10 +474,6 @@ #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) -#define SYS_SVCR sys_reg(3, 3, 4, 2, 2) -#define SVCR_ZA_MASK 2 -#define SVCR_SM_MASK 1 - #define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0) #define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) #define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index d29bc429f504..7888603db50a 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -211,6 +211,12 @@ Res0 14:12 Field 11:0 AFFINITY EndSysreg +Sysreg SVCR 3 3 4 2 2 +Res0 63:2 +Field 1 ZA +Field 0 SM +EndSysreg + Sysreg SMPRIMAP_EL2 3 4 1 2 5 Field 63:60 P15 Field 59:56 P14 From 89e9fb327421081166c1d1682b6601ac93dd610c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 May 2022 17:12:08 +0100 Subject: [PATCH 131/145] arm64/sve: Generate ZCR definitions Convert the various ZCR instances to automatic generation, no functional changes expected. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220510161208.631259-13-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 7 ------- arch/arm64/tools/sysreg | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 804b5326c393..91e4f8601393 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -213,7 +213,6 @@ #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) -#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) #define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) @@ -558,7 +557,6 @@ #define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4) #define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5) #define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) -#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2) #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) @@ -619,7 +617,6 @@ /* VHE encodings for architectural EL0/1 system registers */ #define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) #define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) -#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) #define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) @@ -1101,10 +1098,6 @@ #define DCZID_DZP_SHIFT 4 #define DCZID_BS_SHIFT 0 -#define ZCR_ELx_LEN_SHIFT 0 -#define ZCR_ELx_LEN_WIDTH 4 -#define ZCR_ELx_LEN_MASK 0xf - #define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */ #define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */ diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 7888603db50a..a236d7a821b4 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -190,6 +190,16 @@ Res0 63:4 Field 3:0 PRIORITY EndSysreg +SysregFields ZCR_ELx +Res0 63:9 +Raz 8:4 +Field 3:0 LEN +EndSysregFields + +Sysreg ZCR_EL1 3 0 1 2 0 +Fields ZCR_ELx +EndSysreg + SysregFields SMCR_ELx Res0 63:32 Field 31 FA64 @@ -217,6 +227,10 @@ Field 1 ZA Field 0 SM EndSysreg +Sysreg ZCR_EL2 3 4 1 2 0 +Fields ZCR_ELx +EndSysreg + Sysreg SMPRIMAP_EL2 3 4 1 2 5 Field 63:60 P15 Field 59:56 P14 @@ -240,6 +254,10 @@ Sysreg SMCR_EL2 3 4 1 2 6 Fields SMCR_ELx EndSysreg +Sysreg ZCR_EL12 3 5 1 2 0 +Fields ZCR_ELx +EndSysreg + Sysreg SMCR_EL12 3 5 1 2 6 Fields SMCR_ELx EndSysreg From 8f0f104e2ab6eed4cad3b111dc206f843bda43ea Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Wed, 11 May 2022 11:20:32 +0800 Subject: [PATCH 132/145] arm64: kdump: Do not allocate crash low memory if not needed When "crashkernel=X,high" is specified, the specified "crashkernel=Y,low" memory is not required in the following corner cases: 1. If both CONFIG_ZONE_DMA and CONFIG_ZONE_DMA32 are disabled, it means that the devices can access any memory. 2. If the system memory is small, the crash high memory may be allocated from the DMA zones. If that happens, there's no need to allocate another crash low memory because there's already one. Add condition '(crash_base >= CRASH_ADDR_LOW_MAX)' to determine whether the 'high' memory is allocated above DMA zones. Note: when both CONFIG_ZONE_DMA and CONFIG_ZONE_DMA32 are disabled, the entire physical memory is DMA accessible, CRASH_ADDR_LOW_MAX equals 'PHYS_MASK + 1'. Signed-off-by: Zhen Lei Acked-by: Baoquan He Link: https://lore.kernel.org/r/20220511032033.426-1-thunder.leizhen@huawei.com Signed-off-by: Catalin Marinas --- Documentation/admin-guide/kernel-parameters.txt | 5 +++-- arch/arm64/mm/init.c | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index f6ff55840751..1b543c3109f4 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -823,7 +823,7 @@ low memory is needed to make sure DMA buffers for 32-bit devices won't run out. Kernel would try to allocate at least 256M below 4G automatically. - This one let user to specify own low range under 4G + This one lets the user specify own low range under 4G for second kernel instead. 0: to disable low allocation. It will be ignored when crashkernel=X,high is not used @@ -832,7 +832,8 @@ [KNL, ARM64] range in low memory. This one lets the user specify a low range in the DMA zone for the crash dump kernel. - It will be ignored when crashkernel=X,high is not used. + It will be ignored when crashkernel=X,high is not used + or memory reserved is located in the DMA zones. cryptomgr.notests [KNL] Disable crypto self-tests diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 18ba66c90991..ac510fb6a2c0 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -170,7 +170,8 @@ static void __init reserve_crashkernel(void) return; } - if (crash_low_size && reserve_crashkernel_low(crash_low_size)) { + if ((crash_base >= CRASH_ADDR_LOW_MAX) && + crash_low_size && reserve_crashkernel_low(crash_low_size)) { memblock_phys_free(crash_base, crash_size); return; } From f0d9d79ec793ec66271e80ff2f9bf7a10458a584 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 16 May 2022 08:55:57 +0800 Subject: [PATCH 133/145] arm64/hugetlb: Use ptep_get() to get the pte value of a huge page The original huge_ptep_get() on ARM64 is just a wrapper of ptep_get(), which will not take into account any contig-PTEs dirty and access bits. Meanwhile we will implement a new ARM64-specific huge_ptep_get() interface in following patch, which will take into account any contig-PTEs dirty and access bits. To keep the same efficient logic to get the pte value, change to use ptep_get() as a preparation. Signed-off-by: Baolin Wang Reviewed-by: Muchun Song Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/5113ed6e103f995e1d0f0c9fda0373b761bbcad2.1652496622.git.baolin.wang@linux.alibaba.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/hugetlbpage.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 749435b01a89..2aa1b0e176a2 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -172,7 +172,7 @@ static pte_t get_clear_contig(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - pte_t orig_pte = huge_ptep_get(ptep); + pte_t orig_pte = ptep_get(ptep); unsigned long i; for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { @@ -379,7 +379,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, { int ncontig; size_t pgsize; - pte_t orig_pte = huge_ptep_get(ptep); + pte_t orig_pte = ptep_get(ptep); if (!pte_cont(orig_pte)) return ptep_get_and_clear(mm, addr, ptep); @@ -402,11 +402,11 @@ static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) { int i; - if (pte_write(pte) != pte_write(huge_ptep_get(ptep))) + if (pte_write(pte) != pte_write(ptep_get(ptep))) return 1; for (i = 0; i < ncontig; i++) { - pte_t orig_pte = huge_ptep_get(ptep + i); + pte_t orig_pte = ptep_get(ptep + i); if (pte_dirty(pte) != pte_dirty(orig_pte)) return 1; From bc5dfb4fd7bd471c77ea48143159eb5e1308d636 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 16 May 2022 08:55:58 +0800 Subject: [PATCH 134/145] arm64/hugetlb: Implement arm64 specific huge_ptep_get() Now we use huge_ptep_get() to get the pte value of a hugetlb page, however it will only return one specific pte value for the CONT-PTE or CONT-PMD size hugetlb on ARM64 system, which can contain several continuous pte or pmd entries with same page table attributes. And it will not take into account the subpages' dirty or young bits of a CONT-PTE/PMD size hugetlb page. So the huge_ptep_get() is inconsistent with huge_ptep_get_and_clear(), which already takes account the dirty or young bits for any subpages in this CONT-PTE/PMD size hugetlb [1]. Meanwhile we can miss dirty or young flags statistics for hugetlb pages with current huge_ptep_get(), such as the gather_hugetlb_stats() function, and CONT-PTE/PMD hugetlb monitoring with DAMON. Thus define an ARM64 specific huge_ptep_get() implementation as well as enabling __HAVE_ARCH_HUGE_PTEP_GET, that will take into account any subpages' dirty or young bits for CONT-PTE/PMD size hugetlb page, for those functions that want to check the dirty and young flags of a hugetlb page. [1] https://lore.kernel.org/linux-mm/85bd80b4-b4fd-0d3f-a2e5-149559f2f387@oracle.com/ Suggested-by: Muchun Song Signed-off-by: Baolin Wang Reviewed-by: Muchun Song Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/624109a80ac4bbdf1e462dfa0b49e9f7c31a7c0d.1652496622.git.baolin.wang@linux.alibaba.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hugetlb.h | 2 ++ arch/arm64/mm/hugetlbpage.c | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 1242f71937f8..d656822b13f1 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -44,6 +44,8 @@ extern void huge_ptep_clear_flush(struct vm_area_struct *vma, #define __HAVE_ARCH_HUGE_PTE_CLEAR extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz); +#define __HAVE_ARCH_HUGE_PTEP_GET +extern pte_t huge_ptep_get(pte_t *ptep); extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned long sz); #define set_huge_swap_pte_at set_huge_swap_pte_at diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 2aa1b0e176a2..64bb078e2e7b 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -158,6 +158,28 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize) return contig_ptes; } +pte_t huge_ptep_get(pte_t *ptep) +{ + int ncontig, i; + size_t pgsize; + pte_t orig_pte = ptep_get(ptep); + + if (!pte_present(orig_pte) || !pte_cont(orig_pte)) + return orig_pte; + + ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize); + for (i = 0; i < ncontig; i++, ptep++) { + pte_t pte = ptep_get(ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + return orig_pte; +} + /* * Changing some bits of contiguous entries requires us to follow a * Break-Before-Make approach, breaking the whole contiguous set From 6ee3cf6a209fc76d8ae51fba357a62841ec6124c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 29 Apr 2022 15:13:46 +0200 Subject: [PATCH 135/145] arm64: lds: move special code sections out of kernel exec segment There are a few code sections that are emitted into the kernel's executable .text segment simply because they contain code, but are actually never executed via this mapping, so they can happily live in a region that gets mapped without executable permissions, reducing the risk of being gadgetized. Note that the kexec and hibernate region contents are always copied into a fresh page, and so there is no need to align them as long as the overall size of each is below 4 KiB. Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220429131347.3621090-2-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vmlinux.lds.S | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index edaf0faf766f..2d4a8f995175 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -93,7 +93,6 @@ jiffies = jiffies_64; #ifdef CONFIG_HIBERNATION #define HIBERNATE_TEXT \ - . = ALIGN(SZ_4K); \ __hibernate_exit_text_start = .; \ *(.hibernate_exit.text) \ __hibernate_exit_text_end = .; @@ -103,7 +102,6 @@ jiffies = jiffies_64; #ifdef CONFIG_KEXEC_CORE #define KEXEC_TEXT \ - . = ALIGN(SZ_4K); \ __relocate_new_kernel_start = .; \ *(.kexec_relocate.text) \ __relocate_new_kernel_end = .; @@ -170,9 +168,6 @@ SECTIONS KPROBES_TEXT HYPERVISOR_TEXT IDMAP_TEXT - HIBERNATE_TEXT - KEXEC_TEXT - TRAMP_TEXT *(.gnu.warning) . = ALIGN(16); *(.got) /* Global offset table */ @@ -194,6 +189,14 @@ SECTIONS HYPERVISOR_DATA_SECTIONS + /* code sections that are never executed via the kernel mapping */ + .rodata.text : { + TRAMP_TEXT + HIBERNATE_TEXT + KEXEC_TEXT + . = ALIGN(PAGE_SIZE); + } + idmap_pg_dir = .; . += IDMAP_DIR_SIZE; idmap_pg_end = .; @@ -337,8 +340,8 @@ ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE, ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, "ID map text too big or misaligned") #ifdef CONFIG_HIBERNATION -ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) - <= SZ_4K, "Hibernate exit text too big or misaligned") +ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K, + "Hibernate exit text is bigger than 4 KiB") #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, @@ -362,7 +365,7 @@ ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET, #ifdef CONFIG_KEXEC_CORE /* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */ -ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1)) - <= SZ_4K, "kexec relocation code is too big or misaligned") +ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K, + "kexec relocation code is bigger than 4 KiB") ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken") #endif From 01142791b0d11f20becccd0b30ed5e8fbb3822b6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 29 Apr 2022 15:13:47 +0200 Subject: [PATCH 136/145] arm64: mm: avoid writable executable mappings in kexec/hibernate code The temporary mappings of the low-level kexec and hibernate helpers are created with both writable and executable attributes, which is not necessary here, and generally best avoided. So use read-only, executable attributes instead. Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220429131347.3621090-3-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/mm/trans_pgd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c index d7da8ca40d2e..4ea2eefbc053 100644 --- a/arch/arm64/mm/trans_pgd.c +++ b/arch/arm64/mm/trans_pgd.c @@ -238,7 +238,7 @@ int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0, int this_level, index, level_lsb, level_msb; dst_addr &= PAGE_MASK; - prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_EXEC)); + prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_ROX)); for (this_level = 3; this_level >= 0; this_level--) { levels[this_level] = trans_alloc(info); From 3cb7e662a9309e1d54d3d3aba530616a20ea9a10 Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Tue, 17 May 2022 16:16:47 +0200 Subject: [PATCH 137/145] arm64: Kconfig: Fix indentation and add comments The convention for indentation seems to be a single tab. Help text is further indented by an additional two whitespaces. Fix the lines that violate these rules. While add it, add trailing comments to endif and endmenu statements for better readability. Signed-off-by: Juerg Haefliger Link: https://lore.kernel.org/r/20220517141648.331976-2-juergh@canonical.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 95 +++++++++++++++++++++++----------------------- 1 file changed, 47 insertions(+), 48 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 764433588fdd..2741e98d27fd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -253,31 +253,31 @@ config ARM64_CONT_PMD_SHIFT default 4 config ARCH_MMAP_RND_BITS_MIN - default 14 if ARM64_64K_PAGES - default 16 if ARM64_16K_PAGES - default 18 + default 14 if ARM64_64K_PAGES + default 16 if ARM64_16K_PAGES + default 18 # max bits determined by the following formula: # VA_BITS - PAGE_SHIFT - 3 config ARCH_MMAP_RND_BITS_MAX - default 19 if ARM64_VA_BITS=36 - default 24 if ARM64_VA_BITS=39 - default 27 if ARM64_VA_BITS=42 - default 30 if ARM64_VA_BITS=47 - default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES - default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES - default 33 if ARM64_VA_BITS=48 - default 14 if ARM64_64K_PAGES - default 16 if ARM64_16K_PAGES - default 18 + default 19 if ARM64_VA_BITS=36 + default 24 if ARM64_VA_BITS=39 + default 27 if ARM64_VA_BITS=42 + default 30 if ARM64_VA_BITS=47 + default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES + default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES + default 33 if ARM64_VA_BITS=48 + default 14 if ARM64_64K_PAGES + default 16 if ARM64_16K_PAGES + default 18 config ARCH_MMAP_RND_COMPAT_BITS_MIN - default 7 if ARM64_64K_PAGES - default 9 if ARM64_16K_PAGES - default 11 + default 7 if ARM64_64K_PAGES + default 9 if ARM64_16K_PAGES + default 11 config ARCH_MMAP_RND_COMPAT_BITS_MAX - default 16 + default 16 config NO_IOPORT_MAP def_bool y if !PCI @@ -304,7 +304,7 @@ config GENERIC_HWEIGHT def_bool y config GENERIC_CSUM - def_bool y + def_bool y config GENERIC_CALIBRATE_DELAY def_bool y @@ -1037,8 +1037,7 @@ config SOCIONEXT_SYNQUACER_PREITS If unsure, say Y. -endmenu - +endmenu # "ARM errata workarounds via the alternatives framework" choice prompt "Page size" @@ -1566,9 +1565,9 @@ config SETEND_EMULATION be unexpected results in the applications. If unsure, say Y -endif +endif # ARMV8_DEPRECATED -endif +endif # COMPAT menu "ARMv8.1 architectural features" @@ -1593,15 +1592,15 @@ config ARM64_PAN bool "Enable support for Privileged Access Never (PAN)" default y help - Privileged Access Never (PAN; part of the ARMv8.1 Extensions) - prevents the kernel or hypervisor from accessing user-space (EL0) - memory directly. + Privileged Access Never (PAN; part of the ARMv8.1 Extensions) + prevents the kernel or hypervisor from accessing user-space (EL0) + memory directly. - Choosing this option will cause any unprotected (not using - copy_to_user et al) memory access to fail with a permission fault. + Choosing this option will cause any unprotected (not using + copy_to_user et al) memory access to fail with a permission fault. - The feature is detected at runtime, and will remain as a 'nop' - instruction if the cpu does not implement the feature. + The feature is detected at runtime, and will remain as a 'nop' + instruction if the cpu does not implement the feature. config AS_HAS_LDAPR def_bool $(as-instr,.arch_extension rcpc) @@ -1629,15 +1628,15 @@ config ARM64_USE_LSE_ATOMICS built with binutils >= 2.25 in order for the new instructions to be used. -endmenu +endmenu # "ARMv8.1 architectural features" menu "ARMv8.2 architectural features" config AS_HAS_ARMV8_2 - def_bool $(cc-option,-Wa$(comma)-march=armv8.2-a) + def_bool $(cc-option,-Wa$(comma)-march=armv8.2-a) config AS_HAS_SHA3 - def_bool $(as-instr,.arch armv8.2-a+sha3) + def_bool $(as-instr,.arch armv8.2-a+sha3) config ARM64_PMEM bool "Enable support for persistent memory" @@ -1681,7 +1680,7 @@ config ARM64_CNP at runtime, and does not affect PEs that do not implement this feature. -endmenu +endmenu # "ARMv8.2 architectural features" menu "ARMv8.3 architectural features" @@ -1744,7 +1743,7 @@ config AS_HAS_PAC config AS_HAS_CFI_NEGATE_RA_STATE def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n) -endmenu +endmenu # "ARMv8.3 architectural features" menu "ARMv8.4 architectural features" @@ -1785,7 +1784,7 @@ config ARM64_TLB_RANGE The feature introduces new assembly instructions, and they were support when binutils >= 2.30. -endmenu +endmenu # "ARMv8.4 architectural features" menu "ARMv8.5 architectural features" @@ -1892,7 +1891,7 @@ config ARM64_MTE Documentation/arm64/memory-tagging-extension.rst. -endmenu +endmenu # "ARMv8.5 architectural features" menu "ARMv8.7 architectural features" @@ -1901,12 +1900,12 @@ config ARM64_EPAN default y depends on ARM64_PAN help - Enhanced Privileged Access Never (EPAN) allows Privileged - Access Never to be used with Execute-only mappings. + Enhanced Privileged Access Never (EPAN) allows Privileged + Access Never to be used with Execute-only mappings. - The feature is detected at runtime, and will remain disabled - if the cpu does not implement the feature. -endmenu + The feature is detected at runtime, and will remain disabled + if the cpu does not implement the feature. +endmenu # "ARMv8.7 architectural features" config ARM64_SVE bool "ARM Scalable Vector Extension support" @@ -1982,7 +1981,7 @@ config ARM64_DEBUG_PRIORITY_MASKING the validity of ICC_PMR_EL1 when calling concerned functions. If unsure, say N -endif +endif # ARM64_PSEUDO_NMI config RELOCATABLE bool "Build a relocatable kernel image" if EXPERT @@ -2053,7 +2052,7 @@ config ARCH_NR_GPIO If unsure, leave the default value. -endmenu +endmenu # "Kernel Features" menu "Boot options" @@ -2117,7 +2116,7 @@ config EFI help This option provides support for runtime services provided by UEFI firmware (such as non-volatile variables, realtime - clock, and platform reset). A UEFI stub is also provided to + clock, and platform reset). A UEFI stub is also provided to allow the kernel to be booted as an EFI application. This is only useful on systems that have UEFI firmware. @@ -2132,7 +2131,7 @@ config DMI However, even with this option, the resultant kernel should continue to boot on existing non-UEFI platforms. -endmenu +endmenu # "Boot options" config SYSVIPC_COMPAT def_bool y @@ -2153,7 +2152,7 @@ config ARCH_HIBERNATION_HEADER config ARCH_SUSPEND_POSSIBLE def_bool y -endmenu +endmenu # "Power management options" menu "CPU Power Management" @@ -2161,7 +2160,7 @@ source "drivers/cpuidle/Kconfig" source "drivers/cpufreq/Kconfig" -endmenu +endmenu # "CPU Power Management" source "drivers/acpi/Kconfig" @@ -2169,4 +2168,4 @@ source "arch/arm64/kvm/Kconfig" if CRYPTO source "arch/arm64/crypto/Kconfig" -endif +endif # CRYPTO From aea3cb356c9643b0936cb7c898e23edcd7c8f6c9 Mon Sep 17 00:00:00 2001 From: Juerg Haefliger Date: Tue, 17 May 2022 16:16:48 +0200 Subject: [PATCH 138/145] arm64: Kconfig.platforms: Add comments Add trailing comments to endmenu statements for better readability. Signed-off-by: Juerg Haefliger Link: https://lore.kernel.org/r/20220517141648.331976-3-juergh@canonical.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig.platforms | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 30b123cde02c..de9a18d3026f 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -325,4 +325,4 @@ config ARCH_ZYNQMP help This enables support for Xilinx ZynqMP Family -endmenu +endmenu # "Platform selection" From 8e1f78a92101e327740ea0dac903bff9ad37a59a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 17 May 2022 16:52:03 +0200 Subject: [PATCH 139/145] arm64/sve: Move sve_free() into SVE code section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If CONFIG_ARM64_SVE is not set: arch/arm64/kernel/fpsimd.c:294:13: warning: ‘sve_free’ defined but not used [-Wunused-function] Fix this by moving sve_free() and __sve_free() into the existing section protected by "#ifdef CONFIG_ARM64_SVE", now the last user outside that section has been removed. Fixes: a1259dd80719 ("arm64/sve: Delay freeing memory in fpsimd_flush_thread()") Signed-off-by: Geert Uytterhoeven Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/cd633284683c24cb9469f8ff429915aedf67f868.1652798894.git.geert+renesas@glider.be Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a568735b7c2e..a6eee3fa3448 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -281,23 +281,6 @@ static bool have_cpu_fpsimd_context(void) return !preemptible() && __this_cpu_read(fpsimd_context_busy); } -/* - * Call __sve_free() directly only if you know task can't be scheduled - * or preempted. - */ -static void __sve_free(struct task_struct *task) -{ - kfree(task->thread.sve_state); - task->thread.sve_state = NULL; -} - -static void sve_free(struct task_struct *task) -{ - WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); - - __sve_free(task); -} - unsigned int task_get_vl(const struct task_struct *task, enum vec_type type) { return task->thread.vl[type]; @@ -690,6 +673,22 @@ static void sve_to_fpsimd(struct task_struct *task) } #ifdef CONFIG_ARM64_SVE +/* + * Call __sve_free() directly only if you know task can't be scheduled + * or preempted. + */ +static void __sve_free(struct task_struct *task) +{ + kfree(task->thread.sve_state); + task->thread.sve_state = NULL; +} + +static void sve_free(struct task_struct *task) +{ + WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); + + __sve_free(task); +} /* * Return how many bytes of memory are required to store the full SVE From af65ea977bb8056b4e12b7057ababf19ab086f67 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 20 May 2022 17:16:33 +0100 Subject: [PATCH 140/145] arm64/sysreg: Generate definitions for CLIDR_EL1 Convert CLIDR_EL1 to be automatically generated with definition as per DDI0487H.a. No functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220520161639.324236-2-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/tools/sysreg | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 91e4f8601393..0cea8bdb792f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -457,7 +457,6 @@ #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) -#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1) #define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index a236d7a821b4..0067d07f9125 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -212,6 +212,22 @@ Sysreg SMCR_EL1 3 0 1 2 6 Fields SMCR_ELx EndSysreg +Sysreg CLIDR_EL1 3 1 0 0 1 +Res0 63:47 +Field 46:33 Ttypen +Field 32:30 ICB +Field 29:27 LoUU +Field 26:24 LoC +Field 23:21 LoUIS +Field 20:18 Ctype7 +Field 17:15 Ctype6 +Field 14:12 Ctype5 +Field 11:9 Ctype4 +Field 8:6 Ctype3 +Field 5:3 Ctype2 +Field 2:0 Ctype1 +EndSysreg + Sysreg SMIDR_EL1 3 1 0 0 6 Res0 63:32 Field 31:24 IMPLEMENTER From 8c12e22c9f88142630bfadc3685b640aea94a9d5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 20 May 2022 17:16:34 +0100 Subject: [PATCH 141/145] arm64/sysreg: Generate definitions for CONTEXTIDR_ELx Convert the various CONTEXTIDR_ELx register definitions to be automatically generated following the definitions in DDI0487H.a. No functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220520161639.324236-3-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 -- arch/arm64/tools/sysreg | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 0cea8bdb792f..4fd64e6ec407 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -449,7 +449,6 @@ #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) -#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1) #define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4) #define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7) @@ -629,7 +628,6 @@ #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) -#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) #define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 0067d07f9125..b2e01e108b8e 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -212,6 +212,15 @@ Sysreg SMCR_EL1 3 0 1 2 6 Fields SMCR_ELx EndSysreg +SysregFields CONTEXTIDR_ELx +Res0 63:32 +Field 31:0 PROCID +EndSysregFields + +Sysreg CONTEXTIDR_EL1 3 0 13 0 1 +Fields CONTEXTIDR_ELx +EndSysreg + Sysreg CLIDR_EL1 3 1 0 0 1 Res0 63:47 Field 46:33 Ttypen @@ -270,6 +279,10 @@ Sysreg SMCR_EL2 3 4 1 2 6 Fields SMCR_ELx EndSysreg +Sysreg CONTEXTIDR_EL2 3 4 13 0 1 +Fields CONTEXTIDR_ELx +EndSysreg + Sysreg ZCR_EL12 3 5 1 2 0 Fields ZCR_ELx EndSysreg @@ -278,6 +291,10 @@ Sysreg SMCR_EL12 3 5 1 2 6 Fields SMCR_ELx EndSysreg +Sysreg CONTEXTIDR_EL12 3 5 13 0 1 +Fields CONTEXTIDR_ELx +EndSysreg + SysregFields TTBRx_EL1 Field 63:48 ASID Field 47:1 BADDR From b5c0f1051dc3ba4a4c53fccb6604ecc56b0a2982 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 20 May 2022 17:16:35 +0100 Subject: [PATCH 142/145] arm64/sysreg: Generate definitions for CPACR_ELx Convert the CPACR system register definitions to be automatically generated using the definitions in DDI0487H.a. The kernel does have some additional definitions for subfields of SMEN, FPEN and ZEN which are not identified as distinct subfields in the architecture so the definitions are not updated as part of this patch. No functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220520161639.324236-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 -- arch/arm64/tools/sysreg | 20 ++++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 4fd64e6ec407..7603c3344697 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -209,7 +209,6 @@ #define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) -#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2) #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) @@ -614,7 +613,6 @@ /* VHE encodings for architectural EL0/1 system registers */ #define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) -#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) #define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index b2e01e108b8e..4bf413770b65 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -185,6 +185,22 @@ Field 1 A Field 0 M EndSysreg +SysregFields CPACR_ELx +Res0 63:29 +Field 28 TTA +Res0 27:26 +Field 25:24 SMEN +Res0 23:22 +Field 21:20 FPEN +Res0 19:18 +Field 17:16 ZEN +Res0 15:0 +EndSysregFields + +Sysreg CPACR_EL1 3 0 1 0 2 +Fields CPACR_ELx +EndSysreg + Sysreg SMPRI_EL1 3 0 1 2 4 Res0 63:4 Field 3:0 PRIORITY @@ -283,6 +299,10 @@ Sysreg CONTEXTIDR_EL2 3 4 13 0 1 Fields CONTEXTIDR_ELx EndSysreg +Sysreg CPACR_EL12 3 5 1 0 2 +Fields CPACR_ELx +EndSysreg + Sysreg ZCR_EL12 3 5 1 2 0 Fields ZCR_ELx EndSysreg From 8bd354b30533632396627291c4a3792f9c2947b2 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 20 May 2022 17:16:36 +0100 Subject: [PATCH 143/145] arm64/sysreg: Generate definitions for CSSELR_EL1 Convert CSSELR_EL1 to automatic generation as per DDI0487H.a, no functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220520161639.324236-5-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 -- arch/arm64/tools/sysreg | 7 +++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 7603c3344697..d1ca7f11e110 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -462,8 +462,6 @@ #define SMIDR_EL1_SMPS_SHIFT 15 #define SMIDR_EL1_AFFINITY_SHIFT 0 -#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0) - #define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1) #define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 4bf413770b65..759075747dcc 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -262,6 +262,13 @@ Res0 14:12 Field 11:0 AFFINITY EndSysreg +Sysreg CSSELR_EL1 3 2 0 0 0 +Res0 63:5 +Field 4 TnD +Field 3:1 Level +Field 0 InD +EndSysreg + Sysreg SVCR 3 3 4 2 2 Res0 63:2 Field 1 ZA From 01baa57ad6865bf60d5fcd77b31b2bd8bb155176 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 20 May 2022 17:16:37 +0100 Subject: [PATCH 144/145] arm64/sysreg: Generate definitions for DACR32_EL2 Convert DACR32_EL2 to automatic register generation as per DDI0487H.a, no functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220520161639.324236-6-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/tools/sysreg | 20 ++++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index d1ca7f11e110..c9d2d2a3dd68 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -554,7 +554,6 @@ #define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2) -#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) #define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 759075747dcc..af21acbb542d 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -302,6 +302,26 @@ Sysreg SMCR_EL2 3 4 1 2 6 Fields SMCR_ELx EndSysreg +Sysreg DACR32_EL2 3 4 3 0 0 +Res0 63:32 +Field 31:30 D15 +Field 29:28 D14 +Field 27:26 D13 +Field 25:24 D12 +Field 23:22 D11 +Field 21:20 D10 +Field 19:18 D9 +Field 17:16 D8 +Field 15:14 D7 +Field 13:12 D6 +Field 11:10 D5 +Field 9:8 D4 +Field 7:6 D3 +Field 5:4 D2 +Field 3:2 D1 +Field 1:0 D0 +EndSysreg + Sysreg CONTEXTIDR_EL2 3 4 13 0 1 Fields CONTEXTIDR_ELx EndSysreg From dffdeade18432d257e0c1845dc4e694f414a9721 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 20 May 2022 17:16:38 +0100 Subject: [PATCH 145/145] arm64/sysreg: Generate definitions for FAR_ELx Convert FAR_ELx to automatic register generation as per DDI0487H.a. In the architecture these registers have a single field "named" as "Faulting Virtual Address for synchronous exceptions taken to ELx" occupying the entire register, in order to fit in with the requirement to describe the contents of the register I have created a single field named ADDR. No functional change. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220520161639.324236-7-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 3 --- arch/arm64/tools/sysreg | 12 ++++++++++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c9d2d2a3dd68..55f998c3dc28 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -249,7 +249,6 @@ #define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0) #define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1) -#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) #define SYS_PAR_EL1_F BIT(0) @@ -564,7 +563,6 @@ #define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3) #define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0) #define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0) -#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0) #define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1) #define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) @@ -619,7 +617,6 @@ #define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) #define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) -#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index af21acbb542d..ff5e552f7420 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -228,6 +228,10 @@ Sysreg SMCR_EL1 3 0 1 2 6 Fields SMCR_ELx EndSysreg +Sysreg FAR_EL1 3 0 6 0 0 +Field 63:0 ADDR +EndSysreg + SysregFields CONTEXTIDR_ELx Res0 63:32 Field 31:0 PROCID @@ -322,6 +326,10 @@ Field 3:2 D1 Field 1:0 D0 EndSysreg +Sysreg FAR_EL2 3 4 6 0 0 +Field 63:0 ADDR +EndSysreg + Sysreg CONTEXTIDR_EL2 3 4 13 0 1 Fields CONTEXTIDR_ELx EndSysreg @@ -338,6 +346,10 @@ Sysreg SMCR_EL12 3 5 1 2 6 Fields SMCR_ELx EndSysreg +Sysreg FAR_EL12 3 5 6 0 0 +Field 63:0 ADDR +EndSysreg + Sysreg CONTEXTIDR_EL12 3 5 13 0 1 Fields CONTEXTIDR_ELx EndSysreg