Xtensa updates for v6.5

- clean up platform_* interface of the xtensa architecture
 - enable HAVE_ASM_MODVERSIONS
 - drop ARCH_WANT_FRAME_POINTERS
 - clean up unaligned access exception handler
 - provide handler for load/store exceptions
 - various small fixes and cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEK2eFS5jlMn3N6xfYUfnMkfg/oEQFAmSavmgTHGpjbXZia2Jj
 QGdtYWlsLmNvbQAKCRBR+cyR+D+gRKGrD/43MoeSAq00Uwi4tOVCBIT9LhDuPc0+
 +2I2JzLx7hL2O6gxbikGjQIZRd9vS9HaeIq8ecbIZ2CsOpnbitg25HTLtA4gfPWk
 VGpZJPKZ1xeB7nDsIFWTwYPMcIwnQBYNOHHg/bE1DU4kWIy03NQ8u73D9n93pQbA
 n3+pgQf4nTwTyGQE03/yeS68OhdsAW1jqZ4V2W7w1DhJj1g9Xalbk0RTEWFW+ID2
 lFdcjgMkDBKG7VwFur/vzz50fNvJiZifsCPrGACtS9yKJXizZnSIwQF2V40JMw9S
 rXyd7JKa8cvKQaanpZ2HpTmE9YyjRIR9vrpJOrK7I/qm47Aism8L4zU0+GPxmLJt
 LtWrreE1QphES+IU2AzqSw2VQzjRa330Sg4C3WOq88IXJZeAIkLYDXL8Z+VsS5cH
 sgmKYKYXmslHOz93RfqWlNV5aBb8YwGcvCZuE5mAmfP7QGIexRDOma5gwRGp5DJR
 6CHg77B5f8+JiQZpcMdflOGtmj6ZEhLicxDbO+0pVYUdf7X2/hPJtbKczdhhgnlu
 zf9vSrMsjgp8GIWDy5ZSIg564zqieJZWibc35ebkYk3W5kWsMqXSTE5TnrLy/32A
 Iz7LukwA5ZikGR4eo9kYPnPHHnSvZu30+4U2jU2V0nLhfZ372XWixaAqHPLX3rfT
 gVzhnTU/Bc2vGw==
 =ws6a
 -----END PGP SIGNATURE-----

Merge tag 'xtensa-20230627' of https://github.com/jcmvbkbc/linux-xtensa

Pull xtensa updates from Max Filippov:

 - clean up platform_* interface of the xtensa architecture

 - enable HAVE_ASM_MODVERSIONS

 - drop ARCH_WANT_FRAME_POINTERS

 - clean up unaligned access exception handler

 - provide handler for load/store exceptions

 - various small fixes and cleanups

* tag 'xtensa-20230627' of https://github.com/jcmvbkbc/linux-xtensa:
  xtensa: dump userspace code around the exception PC
  xtensa: rearrange show_stack output
  xtensa: add load/store exception handler
  xtensa: rearrange unaligned exception handler
  xtensa: always install slow handler for unaligned access exception
  xtensa: move early_trap_init from kasan_early_init to init_arch
  xtensa: drop ARCH_WANT_FRAME_POINTERS
  xtensa: report trax and perf counters in cpuinfo
  xtensa: add asm-prototypes.h
  xtensa: only build __strncpy_user with CONFIG_ARCH_HAS_STRNCPY_FROM_USER
  xtensa: drop bcopy implementation
  xtensa: drop EXPORT_SYMBOL for common_exception_return
  xtensa: boot-redboot: clean up Makefile
  xtensa: clean up default platform functions
  xtensa: drop platform_halt and platform_power_off
  xtensa: drop platform_restart
  xtensa: drop platform_heartbeat
  xtensa: xt2000: drop empty platform_init
This commit is contained in:
Linus Torvalds 2023-06-27 15:44:11 -07:00
commit 8d8026f376
41 changed files with 479 additions and 338 deletions

View File

@ -16,7 +16,6 @@ config XTENSA
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
@ -35,6 +34,7 @@ config XTENSA
select HAVE_ARCH_KCSAN
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING_USER
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
@ -203,6 +203,18 @@ config XTENSA_UNALIGNED_USER
Say Y here to enable unaligned memory access in user space.
config XTENSA_LOAD_STORE
bool "Load/store exception handler for memory only readable with l32"
help
The Xtensa architecture only allows reading memory attached to its
instruction bus with l32r and l32i instructions, all other
instructions raise an exception with the LoadStoreErrorCause code.
This makes it hard to use some configurations, e.g. store string
literals in FLASH memory attached to the instruction bus.
Say Y here to enable exception handler that allows transparent
byte and 2-byte access to memory attached to instruction bus.
config HAVE_SMP
bool "System Supports SMP (MX)"
depends on XTENSA_VARIANT_CUSTOM

View File

@ -38,3 +38,11 @@ config PRINT_STACK_DEPTH
help
This option allows you to set the stack depth that the kernel
prints in stack traces.
config PRINT_USER_CODE_ON_UNHANDLED_EXCEPTION
bool "Dump user code around unhandled exception address"
help
Enable this option to display user code around PC of the unhandled
exception (starting at address aligned on 16 byte boundary).
This may simplify finding faulting code in the absence of other
debug facilities.

View File

@ -6,16 +6,12 @@
OBJCOPY_ARGS := -O $(if $(CONFIG_CPU_BIG_ENDIAN),elf32-xtensa-be,elf32-xtensa-le)
LD_ARGS = -T $(srctree)/$(obj)/boot.ld
boot-y := bootstrap.o
targets += $(boot-y)
OBJS := $(addprefix $(obj)/,$(boot-y))
LIBS := arch/xtensa/boot/lib/lib.a arch/xtensa/lib/lib.a
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
$(obj)/zImage.o: $(obj)/../vmlinux.bin.gz $(OBJS)
$(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
--add-section image=$< \
@ -23,7 +19,10 @@ $(obj)/zImage.o: $(obj)/../vmlinux.bin.gz $(OBJS)
$(OBJS) $@
$(obj)/zImage.elf: $(obj)/zImage.o $(LIBS)
$(Q)$(LD) $(LD_ARGS) -o $@ $^ -L/xtensa-elf/lib $(LIBGCC)
$(Q)$(LD) $(KBUILD_LDFLAGS) \
-T $(srctree)/$(obj)/boot.ld \
--build-id=none \
-o $@ $^
$(obj)/../zImage.redboot: $(obj)/zImage.elf
$(Q)$(OBJCOPY) -S -O binary $< $@

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_PROTOTYPES_H
#define __ASM_PROTOTYPES_H
#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/ftrace.h>
#include <asm/page.h>
#include <asm/string.h>
#include <asm/uaccess.h>
#include <asm-generic/asm-prototypes.h>
/*
* gcc internal math functions
*/
long long __ashrdi3(long long, int);
long long __ashldi3(long long, int);
long long __bswapdi2(long long);
int __bswapsi2(int);
long long __lshrdi3(long long, int);
int __divsi3(int, int);
int __modsi3(int, int);
int __mulsi3(int, int);
unsigned int __udivsi3(unsigned int, unsigned int);
unsigned int __umodsi3(unsigned int, unsigned int);
unsigned long long __umulsidi3(unsigned int, unsigned int);
#endif /* __ASM_PROTOTYPES_H */

View File

@ -11,6 +11,7 @@
#ifndef _XTENSA_ASMMACRO_H
#define _XTENSA_ASMMACRO_H
#include <asm-generic/export.h>
#include <asm/core.h>
/*

View File

@ -26,6 +26,14 @@
#define XCHAL_SPANNING_WAY 0
#endif
#ifndef XCHAL_HAVE_TRAX
#define XCHAL_HAVE_TRAX 0
#endif
#ifndef XCHAL_NUM_PERF_COUNTERS
#define XCHAL_NUM_PERF_COUNTERS 0
#endif
#if XCHAL_HAVE_WINDOWED
#if defined(CONFIG_USER_ABI_DEFAULT) || defined(CONFIG_USER_ABI_CALL0_PROBE)
/* Whether windowed ABI is supported in userspace. */

View File

@ -13,17 +13,8 @@
#include <asm/processor.h>
#ifndef __ASSEMBLY__
#define ftrace_return_address0 ({ unsigned long a0, a1; \
__asm__ __volatile__ ( \
"mov %0, a0\n" \
"mov %1, a1\n" \
: "=r"(a0), "=r"(a1)); \
MAKE_PC_FROM_RA(a0, a1); })
#ifdef CONFIG_FRAME_POINTER
extern unsigned long return_address(unsigned level);
#define ftrace_return_address(n) return_address(n)
#endif
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_FUNCTION_TRACER

View File

@ -27,31 +27,11 @@ extern void platform_init(bp_tag_t*);
*/
extern void platform_setup (char **);
/*
* platform_restart is called to restart the system.
*/
extern void platform_restart (void);
/*
* platform_halt is called to stop the system and halt.
*/
extern void platform_halt (void);
/*
* platform_power_off is called to stop the system and power it off.
*/
extern void platform_power_off (void);
/*
* platform_idle is called from the idle function.
*/
extern void platform_idle (void);
/*
* platform_heartbeat is called every HZ
*/
extern void platform_heartbeat (void);
/*
* platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
*/

View File

@ -118,9 +118,6 @@ extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
/* Don't build bcopy at all ... */
#define __HAVE_ARCH_BCOPY
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
/*

View File

@ -47,6 +47,7 @@ __init trap_set_handler(int cause, xtensa_exception_handler *handler);
asmlinkage void fast_illegal_instruction_user(void);
asmlinkage void fast_syscall_user(void);
asmlinkage void fast_alloca(void);
asmlinkage void fast_load_store(void);
asmlinkage void fast_unaligned(void);
asmlinkage void fast_second_level_miss(void);
asmlinkage void fast_store_prohibited(void);
@ -64,8 +65,14 @@ void do_unhandled(struct pt_regs *regs);
static inline void __init early_trap_init(void)
{
static struct exc_table init_exc_table __initdata = {
#ifdef CONFIG_XTENSA_LOAD_STORE
.fast_kernel_handler[EXCCAUSE_LOAD_STORE_ERROR] =
fast_load_store,
#endif
#ifdef CONFIG_MMU
.fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
fast_second_level_miss,
#endif
};
xtensa_set_sr(&init_exc_table, excsave1);
}

View File

@ -22,7 +22,17 @@
#include <asm/asmmacro.h>
#include <asm/processor.h>
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || defined CONFIG_XTENSA_LOAD_STORE
#define LOAD_EXCEPTION_HANDLER
#endif
#if XCHAL_UNALIGNED_STORE_EXCEPTION || defined LOAD_EXCEPTION_HANDLER
#define ANY_EXCEPTION_HANDLER
#endif
#if XCHAL_HAVE_WINDOWED
#define UNALIGNED_USER_EXCEPTION
#endif
/* First-level exception handler for unaligned exceptions.
*
@ -58,10 +68,6 @@
* BE shift left / mask 0 0 X X
*/
#if XCHAL_HAVE_WINDOWED
#define UNALIGNED_USER_EXCEPTION
#endif
#if XCHAL_HAVE_BE
#define HWORD_START 16
@ -103,7 +109,7 @@
*
* 23 0
* -----------------------------
* res 0000 0010
* L8UI xxxx xxxx 0000 ssss tttt 0010
* L16UI xxxx xxxx 0001 ssss tttt 0010
* L32I xxxx xxxx 0010 ssss tttt 0010
* XXX 0011 ssss tttt 0010
@ -128,9 +134,11 @@
#define OP0_L32I_N 0x8 /* load immediate narrow */
#define OP0_S32I_N 0x9 /* store immediate narrow */
#define OP0_LSAI 0x2 /* load/store */
#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */
#define OP1_SI_BIT 2 /* OP1 bit number for stores */
#define OP1_L8UI 0x0
#define OP1_L32I 0x2
#define OP1_L16UI 0x1
#define OP1_L16SI 0x9
@ -155,60 +163,74 @@
*/
.literal_position
#ifdef CONFIG_XTENSA_LOAD_STORE
ENTRY(fast_load_store)
call0 .Lsave_and_load_instruction
/* Analyze the instruction (load or store?). */
extui a0, a4, INSN_OP0, 4 # get insn.op0 nibble
#if XCHAL_HAVE_DENSITY
_beqi a0, OP0_L32I_N, 1f # L32I.N, jump
#endif
bnei a0, OP0_LSAI, .Linvalid_instruction
/* 'store indicator bit' set, jump */
bbsi.l a4, OP1_SI_BIT + INSN_OP1, .Linvalid_instruction
1:
movi a3, ~3
and a3, a3, a8 # align memory address
__ssa8 a8
#ifdef CONFIG_MMU
/* l32e can't be used here even when it's available. */
/* TODO access_ok(a3) could be used here */
j .Linvalid_instruction
#endif
l32i a5, a3, 0
l32i a6, a3, 4
__src_b a3, a5, a6 # a3 has the data word
#if XCHAL_HAVE_DENSITY
addi a7, a7, 2 # increment PC (assume 16-bit insn)
_beqi a0, OP0_L32I_N, .Lload_w# l32i.n: jump
addi a7, a7, 1
#else
addi a7, a7, 3
#endif
extui a5, a4, INSN_OP1, 4
_beqi a5, OP1_L32I, .Lload_w
bnei a5, OP1_L8UI, .Lload16
extui a3, a3, 0, 8
j .Lload_w
ENDPROC(fast_load_store)
#endif
/*
* Entry condition:
*
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
* a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
* excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
#ifdef ANY_EXCEPTION_HANDLER
ENTRY(fast_unaligned)
/* Note: We don't expect the address to be aligned on a word
* boundary. After all, the processor generated that exception
* and it would be a hardware fault.
*/
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
/* Save some working register */
s32i a4, a2, PT_AREG4
s32i a5, a2, PT_AREG5
s32i a6, a2, PT_AREG6
s32i a7, a2, PT_AREG7
s32i a8, a2, PT_AREG8
rsr a0, depc
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
rsr a3, excsave1
movi a4, fast_unaligned_fixup
s32i a4, a3, EXC_TABLE_FIXUP
/* Keep value of SAR in a0 */
rsr a0, sar
rsr a8, excvaddr # load unaligned memory address
/* Now, identify one of the following load/store instructions.
*
* The only possible danger of a double exception on the
* following l32i instructions is kernel code in vmalloc
* memory. The processor was just executing at the EPC_1
* address, and indeed, already fetched the instruction. That
* guarantees a TLB mapping, which hasn't been replaced by
* this unaligned exception handler that uses only static TLB
* mappings. However, high-level interrupt handlers might
* modify TLB entries, so for the generic case, we register a
* TABLE_FIXUP handler here, too.
*/
/* a3...a6 saved on stack, a2 = SP */
/* Extract the instruction that caused the unaligned access. */
rsr a7, epc1 # load exception address
movi a3, ~3
and a3, a3, a7 # mask lower bits
l32i a4, a3, 0 # load 2 words
l32i a5, a3, 4
__ssa8 a7
__src_b a4, a4, a5 # a4 has the instruction
call0 .Lsave_and_load_instruction
/* Analyze the instruction (load or store?). */
@ -222,12 +244,17 @@ ENTRY(fast_unaligned)
/* 'store indicator bit' not set, jump */
_bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
#endif
#if XCHAL_UNALIGNED_STORE_EXCEPTION
/* Store: Jump to table entry to get the value in the source register.*/
.Lstore:movi a5, .Lstore_table # table
extui a6, a4, INSN_T, 4 # get source register
addx8 a5, a6, a5
jx a5 # jump into table
#endif
#if XCHAL_UNALIGNED_LOAD_EXCEPTION
/* Load: Load memory address. */
@ -249,7 +276,7 @@ ENTRY(fast_unaligned)
addi a7, a7, 2 # increment PC (assume 16-bit insn)
extui a5, a4, INSN_OP0, 4
_beqi a5, OP0_L32I_N, 1f # l32i.n: jump
_beqi a5, OP0_L32I_N, .Lload_w# l32i.n: jump
addi a7, a7, 1
#else
@ -257,21 +284,26 @@ ENTRY(fast_unaligned)
#endif
extui a5, a4, INSN_OP1, 4
_beqi a5, OP1_L32I, 1f # l32i: jump
_beqi a5, OP1_L32I, .Lload_w # l32i: jump
#endif
#ifdef LOAD_EXCEPTION_HANDLER
.Lload16:
extui a3, a3, 0, 16 # extract lower 16 bits
_beqi a5, OP1_L16UI, 1f
_beqi a5, OP1_L16UI, .Lload_w
addi a5, a5, -OP1_L16SI
_bnez a5, .Linvalid_instruction_load
_bnez a5, .Linvalid_instruction
/* sign extend value */
#if XCHAL_HAVE_SEXT
sext a3, a3, 15
#else
slli a3, a3, 16
srai a3, a3, 16
#endif
/* Set target register. */
1:
.Lload_w:
extui a4, a4, INSN_T, 4 # extract target register
movi a5, .Lload_table
addx8 a4, a4, a5
@ -295,30 +327,32 @@ ENTRY(fast_unaligned)
mov a13, a3 ; _j .Lexit; .align 8
mov a14, a3 ; _j .Lexit; .align 8
mov a15, a3 ; _j .Lexit; .align 8
#endif
#if XCHAL_UNALIGNED_STORE_EXCEPTION
.Lstore_table:
l32i a3, a2, PT_AREG0; _j 1f; .align 8
mov a3, a1; _j 1f; .align 8 # fishy??
l32i a3, a2, PT_AREG2; _j 1f; .align 8
l32i a3, a2, PT_AREG3; _j 1f; .align 8
l32i a3, a2, PT_AREG4; _j 1f; .align 8
l32i a3, a2, PT_AREG5; _j 1f; .align 8
l32i a3, a2, PT_AREG6; _j 1f; .align 8
l32i a3, a2, PT_AREG7; _j 1f; .align 8
l32i a3, a2, PT_AREG8; _j 1f; .align 8
mov a3, a9 ; _j 1f; .align 8
mov a3, a10 ; _j 1f; .align 8
mov a3, a11 ; _j 1f; .align 8
mov a3, a12 ; _j 1f; .align 8
mov a3, a13 ; _j 1f; .align 8
mov a3, a14 ; _j 1f; .align 8
mov a3, a15 ; _j 1f; .align 8
l32i a3, a2, PT_AREG0; _j .Lstore_w; .align 8
mov a3, a1; _j .Lstore_w; .align 8 # fishy??
l32i a3, a2, PT_AREG2; _j .Lstore_w; .align 8
l32i a3, a2, PT_AREG3; _j .Lstore_w; .align 8
l32i a3, a2, PT_AREG4; _j .Lstore_w; .align 8
l32i a3, a2, PT_AREG5; _j .Lstore_w; .align 8
l32i a3, a2, PT_AREG6; _j .Lstore_w; .align 8
l32i a3, a2, PT_AREG7; _j .Lstore_w; .align 8
l32i a3, a2, PT_AREG8; _j .Lstore_w; .align 8
mov a3, a9 ; _j .Lstore_w; .align 8
mov a3, a10 ; _j .Lstore_w; .align 8
mov a3, a11 ; _j .Lstore_w; .align 8
mov a3, a12 ; _j .Lstore_w; .align 8
mov a3, a13 ; _j .Lstore_w; .align 8
mov a3, a14 ; _j .Lstore_w; .align 8
mov a3, a15 ; _j .Lstore_w; .align 8
#endif
#ifdef ANY_EXCEPTION_HANDLER
/* We cannot handle this exception. */
.extern _kernel_exception
.Linvalid_instruction_load:
.Linvalid_instruction_store:
.Linvalid_instruction:
movi a4, 0
rsr a3, excsave1
@ -326,6 +360,7 @@ ENTRY(fast_unaligned)
/* Restore a4...a8 and SAR, set SP, and jump to default exception. */
l32i a0, a2, PT_SAR
l32i a8, a2, PT_AREG8
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
@ -342,9 +377,11 @@ ENTRY(fast_unaligned)
2: movi a0, _user_exception
jx a0
#endif
#if XCHAL_UNALIGNED_STORE_EXCEPTION
1: # a7: instruction pointer, a4: instruction, a3: value
# a7: instruction pointer, a4: instruction, a3: value
.Lstore_w:
movi a6, 0 # mask: ffffffff:00000000
#if XCHAL_HAVE_DENSITY
@ -361,7 +398,7 @@ ENTRY(fast_unaligned)
extui a5, a4, INSN_OP1, 4 # extract OP1
_beqi a5, OP1_S32I, 1f # jump if 32 bit store
_bnei a5, OP1_S16I, .Linvalid_instruction_store
_bnei a5, OP1_S16I, .Linvalid_instruction
movi a5, -1
__extl a3, a3 # get 16-bit value
@ -406,7 +443,8 @@ ENTRY(fast_unaligned)
#else
s32i a6, a4, 4
#endif
#endif
#ifdef ANY_EXCEPTION_HANDLER
.Lexit:
#if XCHAL_HAVE_LOOPS
rsr a4, lend # check if we reached LEND
@ -434,6 +472,7 @@ ENTRY(fast_unaligned)
/* Restore working register */
l32i a0, a2, PT_SAR
l32i a8, a2, PT_AREG8
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
@ -448,6 +487,59 @@ ENTRY(fast_unaligned)
l32i a2, a2, PT_AREG2
rfe
.align 4
.Lsave_and_load_instruction:
/* Save some working register */
s32i a3, a2, PT_AREG3
s32i a4, a2, PT_AREG4
s32i a5, a2, PT_AREG5
s32i a6, a2, PT_AREG6
s32i a7, a2, PT_AREG7
s32i a8, a2, PT_AREG8
rsr a4, depc
s32i a4, a2, PT_AREG2
rsr a5, sar
s32i a5, a2, PT_SAR
rsr a3, excsave1
movi a4, fast_unaligned_fixup
s32i a4, a3, EXC_TABLE_FIXUP
rsr a8, excvaddr # load unaligned memory address
/* Now, identify one of the following load/store instructions.
*
* The only possible danger of a double exception on the
* following l32i instructions is kernel code in vmalloc
* memory. The processor was just executing at the EPC_1
* address, and indeed, already fetched the instruction. That
* guarantees a TLB mapping, which hasn't been replaced by
* this unaligned exception handler that uses only static TLB
* mappings. However, high-level interrupt handlers might
* modify TLB entries, so for the generic case, we register a
* TABLE_FIXUP handler here, too.
*/
/* a3...a6 saved on stack, a2 = SP */
/* Extract the instruction that caused the unaligned access. */
rsr a7, epc1 # load exception address
movi a3, ~3
and a3, a3, a7 # mask lower bits
l32i a4, a3, 0 # load 2 words
l32i a5, a3, 4
__ssa8 a7
__src_b a4, a4, a5 # a4 has the instruction
ret
#endif
ENDPROC(fast_unaligned)
ENTRY(fast_unaligned_fixup)
@ -459,10 +551,11 @@ ENTRY(fast_unaligned_fixup)
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5
l32i a4, a2, PT_AREG4
l32i a4, a2, PT_SAR
l32i a0, a2, PT_AREG2
xsr a0, depc # restore depc and a0
wsr a0, sar
wsr a4, sar
wsr a0, depc # restore depc and a0
l32i a4, a2, PT_AREG4
rsr a0, exccause
s32i a0, a2, PT_DEPC # mark as a regular exception
@ -483,5 +576,4 @@ ENTRY(fast_unaligned_fixup)
jx a0
ENDPROC(fast_unaligned_fixup)
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
#endif

View File

@ -78,6 +78,7 @@ ENTRY(_mcount)
#error Unsupported Xtensa ABI
#endif
ENDPROC(_mcount)
EXPORT_SYMBOL(_mcount)
ENTRY(ftrace_stub)
abi_entry_default

View File

@ -17,27 +17,28 @@
#include <asm/platform.h>
#include <asm/timex.h>
#define _F(r,f,a,b) \
r __platform_##f a b; \
r platform_##f a __attribute__((weak, alias("__platform_"#f)))
/*
* Default functions that are used if no platform specific function is defined.
* (Please, refer to include/asm-xtensa/platform.h for more information)
* (Please, refer to arch/xtensa/include/asm/platform.h for more information)
*/
_F(void, init, (bp_tag_t *first), { });
_F(void, setup, (char** cmd), { });
_F(void, restart, (void), { while(1); });
_F(void, halt, (void), { while(1); });
_F(void, power_off, (void), { while(1); });
_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
_F(void, heartbeat, (void), { });
void __weak __init platform_init(bp_tag_t *first)
{
}
void __weak __init platform_setup(char **cmd)
{
}
void __weak platform_idle(void)
{
__asm__ __volatile__ ("waiti 0" ::: "memory");
}
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
_F(void, calibrate_ccount, (void),
void __weak platform_calibrate_ccount(void)
{
pr_err("ERROR: Cannot calibrate cpu frequency! Assuming 10MHz.\n");
ccount_freq = 10 * 1000000UL;
});
}
#endif

View File

@ -22,6 +22,7 @@
#include <linux/screen_info.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/reboot.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@ -46,6 +47,7 @@
#include <asm/smp.h>
#include <asm/sysmem.h>
#include <asm/timex.h>
#include <asm/traps.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = {
@ -241,6 +243,12 @@ void __init early_init_devtree(void *params)
void __init init_arch(bp_tag_t *bp_start)
{
/* Initialize basic exception handling if configuration may need it */
if (IS_ENABLED(CONFIG_KASAN) ||
IS_ENABLED(CONFIG_XTENSA_LOAD_STORE))
early_trap_init();
/* Initialize MMU. */
init_mmu();
@ -522,19 +530,30 @@ void cpu_reset(void)
void machine_restart(char * cmd)
{
platform_restart();
local_irq_disable();
smp_send_stop();
do_kernel_restart(cmd);
pr_err("Reboot failed -- System halted\n");
while (1)
cpu_relax();
}
void machine_halt(void)
{
platform_halt();
while (1);
local_irq_disable();
smp_send_stop();
do_kernel_power_off();
while (1)
cpu_relax();
}
void machine_power_off(void)
{
platform_power_off();
while (1);
local_irq_disable();
smp_send_stop();
do_kernel_power_off();
while (1)
cpu_relax();
}
#ifdef CONFIG_PROC_FS
@ -574,6 +593,12 @@ c_show(struct seq_file *f, void *slot)
# if XCHAL_HAVE_OCD
"ocd "
# endif
#if XCHAL_HAVE_TRAX
"trax "
#endif
#if XCHAL_NUM_PERF_COUNTERS
"perf "
#endif
#endif
#if XCHAL_HAVE_DENSITY
"density "
@ -623,11 +648,13 @@ c_show(struct seq_file *f, void *slot)
seq_printf(f,"physical aregs\t: %d\n"
"misc regs\t: %d\n"
"ibreak\t\t: %d\n"
"dbreak\t\t: %d\n",
"dbreak\t\t: %d\n"
"perf counters\t: %d\n",
XCHAL_NUM_AREGS,
XCHAL_NUM_MISC_REGS,
XCHAL_NUM_IBREAK,
XCHAL_NUM_DBREAK);
XCHAL_NUM_DBREAK,
XCHAL_NUM_PERF_COUNTERS);
/* Interrupt. */

View File

@ -237,8 +237,6 @@ EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
#ifdef CONFIG_FRAME_POINTER
struct return_addr_data {
unsigned long addr;
unsigned skip;
@ -271,5 +269,3 @@ unsigned long return_address(unsigned level)
return r.addr;
}
EXPORT_SYMBOL(return_address);
#endif

View File

@ -121,10 +121,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
set_linux_timer(get_linux_timer());
evt->event_handler(evt);
/* Allow platform to do something useful (Wdog). */
platform_heartbeat();
return IRQ_HANDLED;
}

View File

@ -54,9 +54,10 @@ static void do_interrupt(struct pt_regs *regs);
#if XTENSA_FAKE_NMI
static void do_nmi(struct pt_regs *regs);
#endif
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
static void do_unaligned_user(struct pt_regs *regs);
#ifdef CONFIG_XTENSA_LOAD_STORE
static void do_load_store(struct pt_regs *regs);
#endif
static void do_unaligned_user(struct pt_regs *regs);
static void do_multihit(struct pt_regs *regs);
#if XTENSA_HAVE_COPROCESSORS
static void do_coprocessor(struct pt_regs *regs);
@ -91,7 +92,10 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
#ifdef CONFIG_XTENSA_LOAD_STORE
{ EXCCAUSE_LOAD_STORE_ERROR, USER|KRNL, fast_load_store },
{ EXCCAUSE_LOAD_STORE_ERROR, 0, do_load_store },
#endif
{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
#ifdef SUPPORT_WINDOWED
{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
@ -102,9 +106,9 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
#ifdef CONFIG_XTENSA_UNALIGNED_USER
{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
#endif
{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
#endif
{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
#ifdef CONFIG_MMU
{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
@ -171,6 +175,23 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err)
die(str, regs, err);
}
#ifdef CONFIG_PRINT_USER_CODE_ON_UNHANDLED_EXCEPTION
static inline void dump_user_code(struct pt_regs *regs)
{
char buf[32];
if (copy_from_user(buf, (void __user *)(regs->pc & -16), sizeof(buf)) == 0) {
print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE,
32, 1, buf, sizeof(buf), false);
}
}
#else
static inline void dump_user_code(struct pt_regs *regs)
{
}
#endif
/*
* Unhandled Exceptions. Kill user task or panic if in kernel space.
*/
@ -186,6 +207,7 @@ void do_unhandled(struct pt_regs *regs)
"\tEXCCAUSE is %ld\n",
current->comm, task_pid_nr(current), regs->pc,
regs->exccause);
dump_user_code(regs);
force_sig(SIGILL);
}
@ -349,6 +371,19 @@ static void do_div0(struct pt_regs *regs)
force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc);
}
#ifdef CONFIG_XTENSA_LOAD_STORE
static void do_load_store(struct pt_regs *regs)
{
__die_if_kernel("Unhandled load/store exception in kernel",
regs, SIGKILL);
pr_info_ratelimited("Load/store error to %08lx in '%s' (pid = %d, pc = %#010lx)\n",
regs->excvaddr, current->comm,
task_pid_nr(current), regs->pc);
force_sig_fault(SIGBUS, BUS_ADRERR, (void *)regs->excvaddr);
}
#endif
/*
* Handle unaligned memory accesses from user space. Kill task.
*
@ -356,7 +391,6 @@ static void do_div0(struct pt_regs *regs)
* accesses causes from user space.
*/
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
static void do_unaligned_user(struct pt_regs *regs)
{
__die_if_kernel("Unhandled unaligned exception in kernel",
@ -368,7 +402,6 @@ static void do_unaligned_user(struct pt_regs *regs)
task_pid_nr(current), regs->pc);
force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr);
}
#endif
#if XTENSA_HAVE_COPROCESSORS
static void do_coprocessor(struct pt_regs *regs)
@ -534,31 +567,58 @@ static void show_trace(struct task_struct *task, unsigned long *sp,
}
#define STACK_DUMP_ENTRY_SIZE 4
#define STACK_DUMP_LINE_SIZE 32
#define STACK_DUMP_LINE_SIZE 16
static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
struct stack_fragment
{
size_t len;
size_t off;
u8 *sp;
const char *loglvl;
};
static int show_stack_fragment_cb(struct stackframe *frame, void *data)
{
struct stack_fragment *sf = data;
while (sf->off < sf->len) {
u8 line[STACK_DUMP_LINE_SIZE];
size_t line_len = sf->len - sf->off > STACK_DUMP_LINE_SIZE ?
STACK_DUMP_LINE_SIZE : sf->len - sf->off;
bool arrow = sf->off == 0;
if (frame && frame->sp == (unsigned long)(sf->sp + sf->off))
arrow = true;
__memcpy(line, sf->sp + sf->off, line_len);
print_hex_dump(sf->loglvl, arrow ? "> " : " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
line, line_len, false);
sf->off += STACK_DUMP_LINE_SIZE;
if (arrow)
return 0;
}
return 1;
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
size_t len, off = 0;
struct stack_fragment sf;
if (!sp)
sp = stack_pointer(task);
len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
sf.len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
sf.off = 0;
sf.sp = (u8 *)sp;
sf.loglvl = loglvl;
printk("%sStack:\n", loglvl);
while (off < len) {
u8 line[STACK_DUMP_LINE_SIZE];
size_t line_len = len - off > STACK_DUMP_LINE_SIZE ?
STACK_DUMP_LINE_SIZE : len - off;
__memcpy(line, (u8 *)sp + off, line_len);
print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
line, line_len, false);
off += STACK_DUMP_LINE_SIZE;
}
walk_stackframe(sp, show_stack_fragment_cb, &sf);
while (sf.off < sf.len)
show_stack_fragment_cb(NULL, &sf);
show_trace(task, sp, loglvl);
}

View File

@ -13,71 +13,10 @@
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <linux/in6.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/ftrace.h>
#ifdef CONFIG_BLK_DEV_FD
#include <asm/floppy.h>
#endif
#ifdef CONFIG_NET
#include <net/checksum.h>
#endif /* CONFIG_NET */
/*
* String functions
*/
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
#ifdef CONFIG_ARCH_HAS_STRNCPY_FROM_USER
EXPORT_SYMBOL(__strncpy_user);
#endif
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
#include <asm/pgtable.h>
EXPORT_SYMBOL(empty_zero_page);
/*
* gcc internal math functions
*/
extern long long __ashrdi3(long long, int);
extern long long __ashldi3(long long, int);
extern long long __bswapdi2(long long);
extern int __bswapsi2(int);
extern long long __lshrdi3(long long, int);
extern int __divsi3(int, int);
extern int __modsi3(int, int);
extern int __mulsi3(int, int);
extern unsigned int __udivsi3(unsigned int, unsigned int);
extern unsigned int __umodsi3(unsigned int, unsigned int);
extern unsigned long long __umulsidi3(unsigned int, unsigned int);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__bswapdi2);
EXPORT_SYMBOL(__bswapsi2);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__mulsi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__umulsidi3);
unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
{
BUG();
@ -89,35 +28,3 @@ unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v)
BUG();
}
EXPORT_SYMBOL(__sync_fetch_and_or_4);
/*
* Networking support
*/
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
/*
* Architecture-specific symbols
*/
EXPORT_SYMBOL(__xtensa_copy_user);
EXPORT_SYMBOL(__invalidate_icache_range);
/*
* Kernel hacking ...
*/
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
// FIXME EXPORT_SYMBOL(screen_info);
#endif
extern long common_exception_return;
EXPORT_SYMBOL(common_exception_return);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
EXPORT_SYMBOL(__invalidate_dcache_range);
#if XCHAL_DCACHE_IS_WRITEBACK
EXPORT_SYMBOL(__flush_dcache_range);
#endif

View File

@ -6,7 +6,8 @@
lib-y += memcopy.o memset.o checksum.o \
ashldi3.o ashrdi3.o bswapdi2.o bswapsi2.o lshrdi3.o \
divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o umulsidi3.o \
usercopy.o strncpy_user.o strnlen_user.o
usercopy.o strnlen_user.o
lib-$(CONFIG_ARCH_HAS_STRNCPY_FROM_USER) += strncpy_user.o
lib-$(CONFIG_PCI) += pci-auto.o
lib-$(CONFIG_KCSAN) += kcsan-stubs.o
KCSAN_SANITIZE_kcsan-stubs.o := n

View File

@ -26,3 +26,4 @@ ENTRY(__ashldi3)
abi_ret_default
ENDPROC(__ashldi3)
EXPORT_SYMBOL(__ashldi3)

View File

@ -26,3 +26,4 @@ ENTRY(__ashrdi3)
abi_ret_default
ENDPROC(__ashrdi3)
EXPORT_SYMBOL(__ashrdi3)

View File

@ -19,3 +19,4 @@ ENTRY(__bswapdi2)
abi_ret_default
ENDPROC(__bswapdi2)
EXPORT_SYMBOL(__bswapdi2)

View File

@ -14,3 +14,4 @@ ENTRY(__bswapsi2)
abi_ret_default
ENDPROC(__bswapsi2)
EXPORT_SYMBOL(__bswapsi2)

View File

@ -169,6 +169,7 @@ ENTRY(csum_partial)
j 5b /* branch to handle the remaining byte */
ENDPROC(csum_partial)
EXPORT_SYMBOL(csum_partial)
/*
* Copy from ds while checksumming, otherwise like csum_partial
@ -346,6 +347,7 @@ EX(10f) s8i a8, a3, 1
j 4b /* process the possible trailing odd byte */
ENDPROC(csum_partial_copy_generic)
EXPORT_SYMBOL(csum_partial_copy_generic)
# Exception handler:

View File

@ -72,3 +72,4 @@ ENTRY(__divsi3)
abi_ret_default
ENDPROC(__divsi3)
EXPORT_SYMBOL(__divsi3)

View File

@ -26,3 +26,4 @@ ENTRY(__lshrdi3)
abi_ret_default
ENDPROC(__lshrdi3)
EXPORT_SYMBOL(__lshrdi3)

View File

@ -273,21 +273,8 @@ WEAK(memcpy)
abi_ret_default
ENDPROC(__memcpy)
/*
* void bcopy(const void *src, void *dest, size_t n);
*/
ENTRY(bcopy)
abi_entry_default
# a2=src, a3=dst, a4=len
mov a5, a3
mov a3, a2
mov a2, a5
j .Lmovecommon # go to common code for memmove+bcopy
ENDPROC(bcopy)
EXPORT_SYMBOL(__memcpy)
EXPORT_SYMBOL(memcpy)
/*
* void *memmove(void *dst, const void *src, size_t len);
@ -551,3 +538,5 @@ WEAK(memmove)
abi_ret_default
ENDPROC(__memmove)
EXPORT_SYMBOL(__memmove)
EXPORT_SYMBOL(memmove)

View File

@ -142,6 +142,8 @@ EX(10f) s8i a3, a5, 0
abi_ret_default
ENDPROC(__memset)
EXPORT_SYMBOL(__memset)
EXPORT_SYMBOL(memset)
.section .fixup, "ax"
.align 4

View File

@ -60,6 +60,7 @@ ENTRY(__modsi3)
abi_ret_default
ENDPROC(__modsi3)
EXPORT_SYMBOL(__modsi3)
#if !XCHAL_HAVE_NSA
.section .rodata

View File

@ -131,3 +131,4 @@ ENTRY(__mulsi3)
abi_ret_default
ENDPROC(__mulsi3)
EXPORT_SYMBOL(__mulsi3)

View File

@ -201,6 +201,7 @@ EX(10f) s8i a9, a11, 0
abi_ret_default
ENDPROC(__strncpy_user)
EXPORT_SYMBOL(__strncpy_user)
.section .fixup, "ax"
.align 4

View File

@ -133,6 +133,7 @@ EX(10f) l32i a9, a4, 0 # get word with first two bytes of string
abi_ret_default
ENDPROC(__strnlen_user)
EXPORT_SYMBOL(__strnlen_user)
.section .fixup, "ax"
.align 4

View File

@ -66,3 +66,4 @@ ENTRY(__udivsi3)
abi_ret_default
ENDPROC(__udivsi3)
EXPORT_SYMBOL(__udivsi3)

View File

@ -55,3 +55,4 @@ ENTRY(__umodsi3)
abi_ret_default
ENDPROC(__umodsi3)
EXPORT_SYMBOL(__umodsi3)

View File

@ -228,3 +228,4 @@ ENTRY(__umulsidi3)
#endif /* XCHAL_NO_MUL */
ENDPROC(__umulsidi3)
EXPORT_SYMBOL(__umulsidi3)

View File

@ -283,6 +283,7 @@ EX(10f) s8i a6, a5, 0
abi_ret(STACK_SIZE)
ENDPROC(__xtensa_copy_user)
EXPORT_SYMBOL(__xtensa_copy_user)
.section .fixup, "ax"
.align 4

View File

@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <asm/initialize_mmu.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
void __init kasan_early_init(void)
{
@ -31,7 +30,6 @@ void __init kasan_early_init(void)
BUG_ON(!pmd_none(*pmd));
set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
}
early_trap_init();
}
static void __init populate(void *start, void *end)

View File

@ -47,6 +47,7 @@ ENTRY(clear_page)
abi_ret_default
ENDPROC(clear_page)
EXPORT_SYMBOL(clear_page)
/*
* copy_page and copy_user_page are the same for non-cache-aliased configs.
@ -89,6 +90,7 @@ ENTRY(copy_page)
abi_ret_default
ENDPROC(copy_page)
EXPORT_SYMBOL(copy_page)
#ifdef CONFIG_MMU
/*
@ -367,6 +369,7 @@ ENTRY(__invalidate_icache_range)
abi_ret_default
ENDPROC(__invalidate_icache_range)
EXPORT_SYMBOL(__invalidate_icache_range)
/*
* void __flush_invalidate_dcache_range(ulong start, ulong size)
@ -397,6 +400,7 @@ ENTRY(__flush_dcache_range)
abi_ret_default
ENDPROC(__flush_dcache_range)
EXPORT_SYMBOL(__flush_dcache_range)
/*
* void _invalidate_dcache_range(ulong start, ulong size)
@ -411,6 +415,7 @@ ENTRY(__invalidate_dcache_range)
abi_ret_default
ENDPROC(__invalidate_dcache_range)
EXPORT_SYMBOL(__invalidate_dcache_range)
/*
* void _invalidate_icache_all(void)

View File

@ -16,6 +16,7 @@
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include <linux/printk.h>
#include <linux/reboot.h>
#include <linux/string.h>
#include <asm/platform.h>
@ -24,26 +25,27 @@
#include <platform/simcall.h>
void platform_halt(void)
{
pr_info(" ** Called platform_halt() **\n");
simc_exit(0);
}
void platform_power_off(void)
static int iss_power_off(struct sys_off_data *unused)
{
pr_info(" ** Called platform_power_off() **\n");
simc_exit(0);
return NOTIFY_DONE;
}
void platform_restart(void)
static int iss_restart(struct notifier_block *this,
unsigned long event, void *ptr)
{
/* Flush and reset the mmu, simulate a processor reset, and
* jump to the reset vector. */
cpu_reset();
/* control never gets here */
return NOTIFY_DONE;
}
static struct notifier_block iss_restart_block = {
.notifier_call = iss_restart,
};
static int
iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
@ -82,4 +84,8 @@ void __init platform_setup(char **p_cmdline)
}
atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
register_restart_handler(&iss_restart_block);
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_PLATFORM,
iss_power_off, NULL);
}

View File

@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/timer.h>
#include <asm/processor.h>
#include <asm/platform.h>
@ -41,51 +42,46 @@ static void led_print (int f, char *s)
break;
}
void platform_halt(void)
{
led_print (0, " HALT ");
local_irq_disable();
while (1);
}
void platform_power_off(void)
static int xt2000_power_off(struct sys_off_data *unused)
{
led_print (0, "POWEROFF");
local_irq_disable();
while (1);
return NOTIFY_DONE;
}
void platform_restart(void)
static int xt2000_restart(struct notifier_block *this,
unsigned long event, void *ptr)
{
/* Flush and reset the mmu, simulate a processor reset, and
* jump to the reset vector. */
cpu_reset();
/* control never gets here */
return NOTIFY_DONE;
}
static struct notifier_block xt2000_restart_block = {
.notifier_call = xt2000_restart,
};
void __init platform_setup(char** cmdline)
{
led_print (0, "LINUX ");
}
/* early initialization */
void __init platform_init(bp_tag_t *first)
{
}
/* Heartbeat. Let the LED blink. */
void platform_heartbeat(void)
{
static int i, t;
static void xt2000_heartbeat(struct timer_list *unused);
if (--t < 0)
{
t = 59;
led_print(7, i ? ".": " ");
i ^= 1;
}
static DEFINE_TIMER(heartbeat_timer, xt2000_heartbeat);
static void xt2000_heartbeat(struct timer_list *unused)
{
static int i;
led_print(7, i ? "." : " ");
i ^= 1;
mod_timer(&heartbeat_timer, jiffies + HZ / 2);
}
//#define RS_TABLE_SIZE 2
@ -143,7 +139,11 @@ static int __init xt2000_setup_devinit(void)
{
platform_device_register(&xt2000_serial8250_device);
platform_device_register(&xt2000_sonic_device);
mod_timer(&heartbeat_timer, jiffies + HZ / 2);
register_restart_handler(&xt2000_restart_block);
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_DEFAULT,
xt2000_power_off, NULL);
return 0;
}

View File

@ -33,23 +33,17 @@
#include <platform/lcd.h>
#include <platform/hardware.h>
void platform_halt(void)
{
lcd_disp_at_pos(" HALT ", 0);
local_irq_disable();
while (1)
cpu_relax();
}
void platform_power_off(void)
static int xtfpga_power_off(struct sys_off_data *unused)
{
lcd_disp_at_pos("POWEROFF", 0);
local_irq_disable();
while (1)
cpu_relax();
return NOTIFY_DONE;
}
void platform_restart(void)
static int xtfpga_restart(struct notifier_block *this,
unsigned long event, void *ptr)
{
/* Try software reset first. */
WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
@ -58,9 +52,14 @@ void platform_restart(void)
* simulate a processor reset, and jump to the reset vector.
*/
cpu_reset();
/* control never gets here */
return NOTIFY_DONE;
}
static struct notifier_block xtfpga_restart_block = {
.notifier_call = xtfpga_restart,
};
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
void __init platform_calibrate_ccount(void)
@ -70,6 +69,14 @@ void __init platform_calibrate_ccount(void)
#endif
static void __init xtfpga_register_handlers(void)
{
register_restart_handler(&xtfpga_restart_block);
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_DEFAULT,
xtfpga_power_off, NULL);
}
#ifdef CONFIG_USE_OF
static void __init xtfpga_clk_setup(struct device_node *np)
@ -134,6 +141,9 @@ static int __init machine_setup(void)
if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
update_local_mac(eth);
of_node_put(eth);
xtfpga_register_handlers();
return 0;
}
arch_initcall(machine_setup);
@ -281,6 +291,8 @@ static int __init xtavnet_init(void)
pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR;
xtfpga_register_handlers();
return 0;
}