02c587733c
CONFIG_KASAN_STACK and CONFIG_KASAN_STACK_ENABLE both enable KASAN stack
instrumentation, but we should only need one config, so that we remove
CONFIG_KASAN_STACK_ENABLE and make CONFIG_KASAN_STACK workable. see [1].
When enable KASAN stack instrumentation, then for gcc we could do no
prompt and default value y, and for clang prompt and default value n.
This patch fixes the following compilation warning:
include/linux/kasan.h:333:30: warning: 'CONFIG_KASAN_STACK' is not defined, evaluates to 0 [-Wundef]
[akpm@linux-foundation.org: fix merge snafu]
Link: https://bugzilla.kernel.org/show_bug.cgi?id=210221 [1]
Link: https://lkml.kernel.org/r/20210226012531.29231-1-walter-zh.wu@mediatek.com
Fixes: d9b571c885
("kasan: fix KASAN_STACK dependency for HW_TAGS")
Signed-off-by: Walter Wu <walter-zh.wu@mediatek.com>
Suggested-by: Dmitry Vyukov <dvyukov@google.com>
Reviewed-by: Nathan Chancellor <natechancellor@gmail.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Andrey Konovalov <andreyknvl@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
151 lines
4.5 KiB
ArmAsm
151 lines
4.5 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#include <linux/errno.h>
|
|
#include <linux/linkage.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/smp.h>
|
|
|
|
.text
|
|
/*
|
|
* Implementation of MPIDR_EL1 hash algorithm through shifting
|
|
* and OR'ing.
|
|
*
|
|
* @dst: register containing hash result
|
|
* @rs0: register containing affinity level 0 bit shift
|
|
* @rs1: register containing affinity level 1 bit shift
|
|
* @rs2: register containing affinity level 2 bit shift
|
|
* @rs3: register containing affinity level 3 bit shift
|
|
* @mpidr: register containing MPIDR_EL1 value
|
|
* @mask: register containing MPIDR mask
|
|
*
|
|
* Pseudo C-code:
|
|
*
|
|
*u32 dst;
|
|
*
|
|
*compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
|
|
* u32 aff0, aff1, aff2, aff3;
|
|
* u64 mpidr_masked = mpidr & mask;
|
|
* aff0 = mpidr_masked & 0xff;
|
|
* aff1 = mpidr_masked & 0xff00;
|
|
* aff2 = mpidr_masked & 0xff0000;
|
|
* aff3 = mpidr_masked & 0xff00000000;
|
|
* dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
|
|
*}
|
|
* Input registers: rs0, rs1, rs2, rs3, mpidr, mask
|
|
* Output register: dst
|
|
* Note: input and output registers must be disjoint register sets
|
|
(eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
|
|
*/
|
|
.macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
|
|
and \mpidr, \mpidr, \mask // mask out MPIDR bits
|
|
and \dst, \mpidr, #0xff // mask=aff0
|
|
lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
|
|
and \mask, \mpidr, #0xff00 // mask = aff1
|
|
lsr \mask ,\mask, \rs1
|
|
orr \dst, \dst, \mask // dst|=(aff1>>rs1)
|
|
and \mask, \mpidr, #0xff0000 // mask = aff2
|
|
lsr \mask ,\mask, \rs2
|
|
orr \dst, \dst, \mask // dst|=(aff2>>rs2)
|
|
and \mask, \mpidr, #0xff00000000 // mask = aff3
|
|
lsr \mask ,\mask, \rs3
|
|
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
|
|
.endm
|
|
/*
|
|
* Save CPU state in the provided sleep_stack_data area, and publish its
|
|
* location for cpu_resume()'s use in sleep_save_stash.
|
|
*
|
|
* cpu_resume() will restore this saved state, and return. Because the
|
|
* link-register is saved and restored, it will appear to return from this
|
|
* function. So that the caller can tell the suspend/resume paths apart,
|
|
* __cpu_suspend_enter() will always return a non-zero value, whereas the
|
|
* path through cpu_resume() will return 0.
|
|
*
|
|
* x0 = struct sleep_stack_data area
|
|
*/
|
|
SYM_FUNC_START(__cpu_suspend_enter)
|
|
stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
|
|
stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
|
|
stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
|
|
stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48]
|
|
stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64]
|
|
stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80]
|
|
|
|
/* save the sp in cpu_suspend_ctx */
|
|
mov x2, sp
|
|
str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
|
|
|
|
/* find the mpidr_hash */
|
|
ldr_l x1, sleep_save_stash
|
|
mrs x7, mpidr_el1
|
|
adr_l x9, mpidr_hash
|
|
ldr x10, [x9, #MPIDR_HASH_MASK]
|
|
/*
|
|
* Following code relies on the struct mpidr_hash
|
|
* members size.
|
|
*/
|
|
ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
|
|
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
|
|
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
|
|
add x1, x1, x8, lsl #3
|
|
|
|
str x0, [x1]
|
|
add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
|
|
stp x29, lr, [sp, #-16]!
|
|
bl cpu_do_suspend
|
|
ldp x29, lr, [sp], #16
|
|
mov x0, #1
|
|
ret
|
|
SYM_FUNC_END(__cpu_suspend_enter)
|
|
|
|
.pushsection ".idmap.text", "awx"
|
|
SYM_CODE_START(cpu_resume)
|
|
bl init_kernel_el
|
|
bl switch_to_vhe
|
|
bl __cpu_setup
|
|
/* enable the MMU early - so we can access sleep_save_stash by va */
|
|
adrp x1, swapper_pg_dir
|
|
bl __enable_mmu
|
|
ldr x8, =_cpu_resume
|
|
br x8
|
|
SYM_CODE_END(cpu_resume)
|
|
.ltorg
|
|
.popsection
|
|
|
|
SYM_FUNC_START(_cpu_resume)
|
|
mrs x1, mpidr_el1
|
|
adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address
|
|
|
|
/* retrieve mpidr_hash members to compute the hash */
|
|
ldr x2, [x8, #MPIDR_HASH_MASK]
|
|
ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
|
|
ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
|
|
compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
|
|
|
|
/* x7 contains hash index, let's use it to grab context pointer */
|
|
ldr_l x0, sleep_save_stash
|
|
ldr x0, [x0, x7, lsl #3]
|
|
add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
|
|
add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
|
|
/* load sp from context */
|
|
ldr x2, [x0, #CPU_CTX_SP]
|
|
mov sp, x2
|
|
/*
|
|
* cpu_do_resume expects x0 to contain context address pointer
|
|
*/
|
|
bl cpu_do_resume
|
|
|
|
#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
|
|
mov x0, sp
|
|
bl kasan_unpoison_task_stack_below
|
|
#endif
|
|
|
|
ldp x19, x20, [x29, #16]
|
|
ldp x21, x22, [x29, #32]
|
|
ldp x23, x24, [x29, #48]
|
|
ldp x25, x26, [x29, #64]
|
|
ldp x27, x28, [x29, #80]
|
|
ldp x29, lr, [x29]
|
|
mov x0, #0
|
|
ret
|
|
SYM_FUNC_END(_cpu_resume)
|