riscv: Use SYM_*() assembly macros instead of deprecated ones
ENTRY()/END()/WEAK() macros are deprecated and we should make use of the new SYM_*() macros [1] for better annotation of symbols. Replace the deprecated ones with the new ones and fix wrong usage of END()/ENDPROC() to correctly describe the symbols. [1] https://docs.kernel.org/core-api/asm-annotations.html Signed-off-by: Clément Léger <cleger@rivosinc.com> Reviewed-by: Andrew Jones <ajones@ventanamicro.com> Link: https://lore.kernel.org/r/20231024132655.730417-3-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
parent
b18f7296fb
commit
76329c6939
@ -9,7 +9,7 @@
|
||||
/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
|
||||
/* Performs a memcpy without aligning buffers, using word loads and stores. */
|
||||
/* Note: The size is truncated to a multiple of 8 * SZREG */
|
||||
ENTRY(__riscv_copy_words_unaligned)
|
||||
SYM_FUNC_START(__riscv_copy_words_unaligned)
|
||||
andi a4, a2, ~((8*SZREG)-1)
|
||||
beqz a4, 2f
|
||||
add a3, a1, a4
|
||||
@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned)
|
||||
|
||||
2:
|
||||
ret
|
||||
END(__riscv_copy_words_unaligned)
|
||||
SYM_FUNC_END(__riscv_copy_words_unaligned)
|
||||
|
||||
/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
|
||||
/* Performs a memcpy without aligning buffers, using only byte accesses. */
|
||||
/* Note: The size is truncated to a multiple of 8 */
|
||||
ENTRY(__riscv_copy_bytes_unaligned)
|
||||
SYM_FUNC_START(__riscv_copy_bytes_unaligned)
|
||||
andi a4, a2, ~(8-1)
|
||||
beqz a4, 2f
|
||||
add a3, a1, a4
|
||||
@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned)
|
||||
|
||||
2:
|
||||
ret
|
||||
END(__riscv_copy_bytes_unaligned)
|
||||
SYM_FUNC_END(__riscv_copy_bytes_unaligned)
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <asm/csr.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
ENTRY(__fstate_save)
|
||||
SYM_FUNC_START(__fstate_save)
|
||||
li a2, TASK_THREAD_F0
|
||||
add a0, a0, a2
|
||||
li t1, SR_FS
|
||||
@ -60,9 +60,9 @@ ENTRY(__fstate_save)
|
||||
sw t0, TASK_THREAD_FCSR_F0(a0)
|
||||
csrc CSR_STATUS, t1
|
||||
ret
|
||||
ENDPROC(__fstate_save)
|
||||
SYM_FUNC_END(__fstate_save)
|
||||
|
||||
ENTRY(__fstate_restore)
|
||||
SYM_FUNC_START(__fstate_restore)
|
||||
li a2, TASK_THREAD_F0
|
||||
add a0, a0, a2
|
||||
li t1, SR_FS
|
||||
@ -103,7 +103,7 @@ ENTRY(__fstate_restore)
|
||||
fscsr t0
|
||||
csrc CSR_STATUS, t1
|
||||
ret
|
||||
ENDPROC(__fstate_restore)
|
||||
SYM_FUNC_END(__fstate_restore)
|
||||
|
||||
#define get_f32(which) fmv.x.s a0, which; j 2f
|
||||
#define put_f32(which) fmv.s.x which, a1; j 2f
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include "efi-header.S"
|
||||
|
||||
__HEAD
|
||||
ENTRY(_start)
|
||||
SYM_CODE_START(_start)
|
||||
/*
|
||||
* Image header expected by Linux boot-loaders. The image header data
|
||||
* structure is described in asm/image.h.
|
||||
@ -187,9 +187,9 @@ secondary_start_sbi:
|
||||
wfi
|
||||
j .Lsecondary_park
|
||||
|
||||
END(_start)
|
||||
SYM_CODE_END(_start)
|
||||
|
||||
ENTRY(_start_kernel)
|
||||
SYM_CODE_START(_start_kernel)
|
||||
/* Mask all interrupts */
|
||||
csrw CSR_IE, zero
|
||||
csrw CSR_IP, zero
|
||||
@ -348,10 +348,10 @@ ENTRY(_start_kernel)
|
||||
tail .Lsecondary_start_common
|
||||
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
|
||||
|
||||
END(_start_kernel)
|
||||
SYM_CODE_END(_start_kernel)
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
ENTRY(reset_regs)
|
||||
SYM_CODE_START_LOCAL(reset_regs)
|
||||
li sp, 0
|
||||
li gp, 0
|
||||
li tp, 0
|
||||
@ -449,5 +449,5 @@ ENTRY(reset_regs)
|
||||
.Lreset_regs_done_vector:
|
||||
#endif /* CONFIG_RISCV_ISA_V */
|
||||
ret
|
||||
END(reset_regs)
|
||||
SYM_CODE_END(reset_regs)
|
||||
#endif /* CONFIG_RISCV_M_MODE */
|
||||
|
@ -21,7 +21,7 @@
|
||||
*
|
||||
* Always returns 0
|
||||
*/
|
||||
ENTRY(__hibernate_cpu_resume)
|
||||
SYM_FUNC_START(__hibernate_cpu_resume)
|
||||
/* switch to hibernated image's page table. */
|
||||
csrw CSR_SATP, s0
|
||||
sfence.vma
|
||||
@ -34,7 +34,7 @@ ENTRY(__hibernate_cpu_resume)
|
||||
mv a0, zero
|
||||
|
||||
ret
|
||||
END(__hibernate_cpu_resume)
|
||||
SYM_FUNC_END(__hibernate_cpu_resume)
|
||||
|
||||
/*
|
||||
* Prepare to restore the image.
|
||||
@ -42,7 +42,7 @@ END(__hibernate_cpu_resume)
|
||||
* a1: satp of temporary page tables.
|
||||
* a2: cpu_resume.
|
||||
*/
|
||||
ENTRY(hibernate_restore_image)
|
||||
SYM_FUNC_START(hibernate_restore_image)
|
||||
mv s0, a0
|
||||
mv s1, a1
|
||||
mv s2, a2
|
||||
@ -50,7 +50,7 @@ ENTRY(hibernate_restore_image)
|
||||
REG_L a1, relocated_restore_code
|
||||
|
||||
jr a1
|
||||
END(hibernate_restore_image)
|
||||
SYM_FUNC_END(hibernate_restore_image)
|
||||
|
||||
/*
|
||||
* The below code will be executed from a 'safe' page.
|
||||
@ -58,7 +58,7 @@ END(hibernate_restore_image)
|
||||
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
|
||||
* to restore the CPU context.
|
||||
*/
|
||||
ENTRY(hibernate_core_restore_code)
|
||||
SYM_FUNC_START(hibernate_core_restore_code)
|
||||
/* switch to temp page table. */
|
||||
csrw satp, s1
|
||||
sfence.vma
|
||||
@ -73,4 +73,4 @@ ENTRY(hibernate_core_restore_code)
|
||||
bnez s4, .Lcopy
|
||||
|
||||
jr s2
|
||||
END(hibernate_core_restore_code)
|
||||
SYM_FUNC_END(hibernate_core_restore_code)
|
||||
|
@ -82,7 +82,7 @@
|
||||
.endm
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
|
||||
|
||||
ENTRY(ftrace_caller)
|
||||
SYM_FUNC_START(ftrace_caller)
|
||||
SAVE_ABI
|
||||
|
||||
addi a0, t0, -FENTRY_RA_OFFSET
|
||||
@ -91,8 +91,7 @@ ENTRY(ftrace_caller)
|
||||
mv a1, ra
|
||||
mv a3, sp
|
||||
|
||||
ftrace_call:
|
||||
.global ftrace_call
|
||||
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
|
||||
call ftrace_stub
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
@ -102,16 +101,15 @@ ftrace_call:
|
||||
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
mv a2, s0
|
||||
#endif
|
||||
ftrace_graph_call:
|
||||
.global ftrace_graph_call
|
||||
SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
|
||||
call ftrace_stub
|
||||
#endif
|
||||
RESTORE_ABI
|
||||
jr t0
|
||||
ENDPROC(ftrace_caller)
|
||||
SYM_FUNC_END(ftrace_caller)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
ENTRY(ftrace_regs_caller)
|
||||
SYM_FUNC_START(ftrace_regs_caller)
|
||||
SAVE_ALL
|
||||
|
||||
addi a0, t0, -FENTRY_RA_OFFSET
|
||||
@ -120,8 +118,7 @@ ENTRY(ftrace_regs_caller)
|
||||
mv a1, ra
|
||||
mv a3, sp
|
||||
|
||||
ftrace_regs_call:
|
||||
.global ftrace_regs_call
|
||||
SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
|
||||
call ftrace_stub
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
@ -131,12 +128,11 @@ ftrace_regs_call:
|
||||
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
mv a2, s0
|
||||
#endif
|
||||
ftrace_graph_regs_call:
|
||||
.global ftrace_graph_regs_call
|
||||
SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
|
||||
call ftrace_stub
|
||||
#endif
|
||||
|
||||
RESTORE_ALL
|
||||
jr t0
|
||||
ENDPROC(ftrace_regs_caller)
|
||||
SYM_FUNC_END(ftrace_regs_caller)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
|
||||
|
@ -61,7 +61,7 @@ SYM_TYPED_FUNC_START(ftrace_stub_graph)
|
||||
ret
|
||||
SYM_FUNC_END(ftrace_stub_graph)
|
||||
|
||||
ENTRY(return_to_handler)
|
||||
SYM_FUNC_START(return_to_handler)
|
||||
/*
|
||||
* On implementing the frame point test, the ideal way is to compare the
|
||||
* s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
|
||||
@ -76,11 +76,11 @@ ENTRY(return_to_handler)
|
||||
mv a2, a0
|
||||
RESTORE_RET_ABI_STATE
|
||||
jalr a2
|
||||
ENDPROC(return_to_handler)
|
||||
SYM_FUNC_END(return_to_handler)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(MCOUNT_NAME)
|
||||
SYM_FUNC_START(MCOUNT_NAME)
|
||||
la t4, ftrace_stub
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
la t0, ftrace_graph_return
|
||||
@ -126,6 +126,6 @@ ENTRY(MCOUNT_NAME)
|
||||
jalr t5
|
||||
RESTORE_ABI_STATE
|
||||
ret
|
||||
ENDPROC(MCOUNT_NAME)
|
||||
SYM_FUNC_END(MCOUNT_NAME)
|
||||
#endif
|
||||
EXPORT_SYMBOL(MCOUNT_NAME)
|
||||
|
@ -75,7 +75,7 @@
|
||||
REG_L x31, PT_T6(sp)
|
||||
.endm
|
||||
|
||||
ENTRY(arch_rethook_trampoline)
|
||||
SYM_CODE_START(arch_rethook_trampoline)
|
||||
addi sp, sp, -(PT_SIZE_ON_STACK)
|
||||
save_all_base_regs
|
||||
|
||||
@ -90,4 +90,4 @@ ENTRY(arch_rethook_trampoline)
|
||||
addi sp, sp, PT_SIZE_ON_STACK
|
||||
|
||||
ret
|
||||
ENDPROC(arch_rethook_trampoline)
|
||||
SYM_CODE_END(arch_rethook_trampoline)
|
||||
|
@ -16,7 +16,7 @@
|
||||
.altmacro
|
||||
.option norelax
|
||||
|
||||
ENTRY(__cpu_suspend_enter)
|
||||
SYM_FUNC_START(__cpu_suspend_enter)
|
||||
/* Save registers (except A0 and T0-T6) */
|
||||
REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
|
||||
REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
|
||||
@ -57,7 +57,7 @@ ENTRY(__cpu_suspend_enter)
|
||||
|
||||
/* Return to C code */
|
||||
ret
|
||||
END(__cpu_suspend_enter)
|
||||
SYM_FUNC_END(__cpu_suspend_enter)
|
||||
|
||||
SYM_TYPED_FUNC_START(__cpu_resume_enter)
|
||||
/* Load the global pointer */
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
.text
|
||||
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
|
||||
ENTRY(__vdso_flush_icache)
|
||||
SYM_FUNC_START(__vdso_flush_icache)
|
||||
.cfi_startproc
|
||||
#ifdef CONFIG_SMP
|
||||
li a7, __NR_riscv_flush_icache
|
||||
@ -19,4 +19,4 @@ ENTRY(__vdso_flush_icache)
|
||||
#endif
|
||||
ret
|
||||
.cfi_endproc
|
||||
ENDPROC(__vdso_flush_icache)
|
||||
SYM_FUNC_END(__vdso_flush_icache)
|
||||
|
@ -8,11 +8,11 @@
|
||||
|
||||
.text
|
||||
/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
|
||||
ENTRY(__vdso_getcpu)
|
||||
SYM_FUNC_START(__vdso_getcpu)
|
||||
.cfi_startproc
|
||||
/* For now, just do the syscall. */
|
||||
li a7, __NR_getcpu
|
||||
ecall
|
||||
ret
|
||||
.cfi_endproc
|
||||
ENDPROC(__vdso_getcpu)
|
||||
SYM_FUNC_END(__vdso_getcpu)
|
||||
|
@ -7,10 +7,10 @@
|
||||
#include <asm/unistd.h>
|
||||
|
||||
.text
|
||||
ENTRY(__vdso_rt_sigreturn)
|
||||
SYM_FUNC_START(__vdso_rt_sigreturn)
|
||||
.cfi_startproc
|
||||
.cfi_signal_frame
|
||||
li a7, __NR_rt_sigreturn
|
||||
ecall
|
||||
.cfi_endproc
|
||||
ENDPROC(__vdso_rt_sigreturn)
|
||||
SYM_FUNC_END(__vdso_rt_sigreturn)
|
||||
|
@ -5,11 +5,11 @@
|
||||
#include <asm/unistd.h>
|
||||
|
||||
.text
|
||||
ENTRY(riscv_hwprobe)
|
||||
SYM_FUNC_START(riscv_hwprobe)
|
||||
.cfi_startproc
|
||||
li a7, __NR_riscv_hwprobe
|
||||
ecall
|
||||
ret
|
||||
|
||||
.cfi_endproc
|
||||
ENDPROC(riscv_hwprobe)
|
||||
SYM_FUNC_END(riscv_hwprobe)
|
||||
|
@ -7,8 +7,7 @@
|
||||
#include <asm/asm.h>
|
||||
|
||||
/* void *memcpy(void *, const void *, size_t) */
|
||||
ENTRY(__memcpy)
|
||||
WEAK(memcpy)
|
||||
SYM_FUNC_START(__memcpy)
|
||||
move t6, a0 /* Preserve return value */
|
||||
|
||||
/* Defer to byte-oriented copy for small sizes */
|
||||
@ -105,6 +104,7 @@ WEAK(memcpy)
|
||||
bltu a1, a3, 5b
|
||||
6:
|
||||
ret
|
||||
END(__memcpy)
|
||||
SYM_FUNC_END(__memcpy)
|
||||
SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
|
||||
SYM_FUNC_ALIAS(__pi_memcpy, __memcpy)
|
||||
SYM_FUNC_ALIAS(__pi___memcpy, __memcpy)
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <asm/asm.h>
|
||||
|
||||
SYM_FUNC_START(__memmove)
|
||||
SYM_FUNC_START_WEAK(memmove)
|
||||
/*
|
||||
* Returns
|
||||
* a0 - dest
|
||||
@ -312,7 +311,7 @@ SYM_FUNC_START_WEAK(memmove)
|
||||
.Lreturn_from_memmove:
|
||||
ret
|
||||
|
||||
SYM_FUNC_END(memmove)
|
||||
SYM_FUNC_END(__memmove)
|
||||
SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
|
||||
SYM_FUNC_ALIAS(__pi_memmove, __memmove)
|
||||
SYM_FUNC_ALIAS(__pi___memmove, __memmove)
|
||||
|
@ -8,8 +8,7 @@
|
||||
#include <asm/asm.h>
|
||||
|
||||
/* void *memset(void *, int, size_t) */
|
||||
ENTRY(__memset)
|
||||
WEAK(memset)
|
||||
SYM_FUNC_START(__memset)
|
||||
move t0, a0 /* Preserve return value */
|
||||
|
||||
/* Defer to byte-oriented fill for small sizes */
|
||||
@ -110,4 +109,5 @@ WEAK(memset)
|
||||
bltu t0, a3, 5b
|
||||
6:
|
||||
ret
|
||||
END(__memset)
|
||||
SYM_FUNC_END(__memset)
|
||||
SYM_FUNC_ALIAS_WEAK(memset, __memset)
|
||||
|
@ -10,8 +10,7 @@
|
||||
_asm_extable 100b, \lbl
|
||||
.endm
|
||||
|
||||
ENTRY(__asm_copy_to_user)
|
||||
ENTRY(__asm_copy_from_user)
|
||||
SYM_FUNC_START(__asm_copy_to_user)
|
||||
|
||||
/* Enable access to user memory */
|
||||
li t6, SR_SUM
|
||||
@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user)
|
||||
csrc CSR_STATUS, t6
|
||||
sub a0, t5, a0
|
||||
ret
|
||||
ENDPROC(__asm_copy_to_user)
|
||||
ENDPROC(__asm_copy_from_user)
|
||||
SYM_FUNC_END(__asm_copy_to_user)
|
||||
EXPORT_SYMBOL(__asm_copy_to_user)
|
||||
SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
|
||||
EXPORT_SYMBOL(__asm_copy_from_user)
|
||||
|
||||
|
||||
ENTRY(__clear_user)
|
||||
SYM_FUNC_START(__clear_user)
|
||||
|
||||
/* Enable access to user memory */
|
||||
li t6, SR_SUM
|
||||
@ -233,5 +232,5 @@ ENTRY(__clear_user)
|
||||
csrc CSR_STATUS, t6
|
||||
sub a0, a3, a0
|
||||
ret
|
||||
ENDPROC(__clear_user)
|
||||
SYM_FUNC_END(__clear_user)
|
||||
EXPORT_SYMBOL(__clear_user)
|
||||
|
@ -7,15 +7,11 @@
|
||||
* Author: Li Zhengyu (lizhengyu3@huawei.com)
|
||||
*
|
||||
*/
|
||||
|
||||
.macro size, sym:req
|
||||
.size \sym, . - \sym
|
||||
.endm
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
|
||||
.globl purgatory_start
|
||||
purgatory_start:
|
||||
SYM_CODE_START(purgatory_start)
|
||||
|
||||
lla sp, .Lstack
|
||||
mv s0, a0 /* The hartid of the current hart */
|
||||
@ -28,8 +24,7 @@ purgatory_start:
|
||||
mv a1, s1
|
||||
ld a2, riscv_kernel_entry
|
||||
jr a2
|
||||
|
||||
size purgatory_start
|
||||
SYM_CODE_END(purgatory_start)
|
||||
|
||||
.align 4
|
||||
.rept 256
|
||||
@ -39,9 +34,6 @@ size purgatory_start
|
||||
|
||||
.data
|
||||
|
||||
.globl riscv_kernel_entry
|
||||
riscv_kernel_entry:
|
||||
.quad 0
|
||||
size riscv_kernel_entry
|
||||
SYM_DATA(riscv_kernel_entry, .quad 0)
|
||||
|
||||
.end
|
||||
|
Loading…
x
Reference in New Issue
Block a user