ARM development updates for 5.18:
Updates for IRQ stacks and virtually mapped stack support for ARM from the following pull requests, etc: 1) ARM: support for IRQ and vmap'ed stacks This PR covers all the work related to implementing IRQ stacks and vmap'ed stacks for all 32-bit ARM systems that are currently supported by the Linux kernel, including RiscPC and Footbridge. It has been submitted for review in three different waves: - IRQ stacks support for v7 SMP systems [0], - vmap'ed stacks support for v7 SMP systems[1], - extending support for both IRQ stacks and vmap'ed stacks for all remaining configurations, including v6/v7 SMP multiplatform kernels and uniprocessor configurations including v7-M [2] [0] https://lore.kernel.org/linux-arm-kernel/20211115084732.3704393-1-ardb@kernel.org/ [1] https://lore.kernel.org/linux-arm-kernel/20211122092816.2865873-1-ardb@kernel.org/ [2] https://lore.kernel.org/linux-arm-kernel/20211206164659.1495084-1-ardb@kernel.org/ 2) ARM: support for IRQ and vmap'ed stacks [v6] This tag covers the changes between the version of vmap'ed + IRQ stacks support pulled into rmk/devel-stable [0] (which was dropped from v5.17 due to issues discovered too late in the cycle), and my v5 proposed for the v5.18 cycle [1]. [0] git://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux.git arm-irq-and-vmap-stacks-for-rmk [1] https://lore.kernel.org/linux-arm-kernel/20220124174744.1054712-1-ardb@kernel.org/ 3) ARM: ftrace fixes and cleanups Make all flavors of ftrace available on all builds, regardless of ISA choice, unwinder choice or compiler: - use ADD not POP where possible - fix a couple of Thumb2 related issues - enable HAVE_FUNCTION_GRAPH_FP_TEST for robustness - enable the graph tracer with the EABI unwinder - avoid clobbering frame pointer registers to make Clang happy Link: https://lore.kernel.org/linux-arm-kernel/20220203082204.1176734-1-ardb@kernel.org/ 4) Fixes for the above. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEuNNh8scc2k/wOAE+9OeQG+StrGQFAmI7U9IACgkQ9OeQG+St rGQghg/+PmgLJ9zmJrMGOarNLmmGzCbkPi6SrlbaDxriE7ofqo76qrQhGAsWxvDx OEYBNdWmOxTi7GP6sozFaTWrpD2ZbuFuKUUpusnjU2sMD/BwYHZZ/lKfZpn7WoE0 48e2FCFYsJ3sYpROhVgaFWk+64eVwHfZ7pr9pad1gAEB4SAaT+CiuXBsJCl4DBi7 eobYzLqETtCBkXFUo46n6r0xESdzQfgfZMsh5IpPRyswSPhzqdYrSLXJRmFGBqvT FS2gcMgd7IpcVsmd4pgFKe0Y9rBSuMEqsqYvzwSWI4GAgFppZO1R5RvHdS89US4P 9F6hgxYnJdc8hVhoAZNNi5cCcJp9th3Io97YzTUIm0xgK3nXyhsSGWIk3ahx76mX mnCcflUoOP9YVHUuoi1/N7iSe6xwtH+dg0Mn69aM4rNcZh5J59jV2rrNhdnr1Pjb XE8iQHJZATHZrxyAtj7PzlnNzJsfVcJyT/WieT0My7tZaZC0cICdKEJ6yurTlCvE v7P3EHUYFaQGkQijHFJdstkouY7SHpN0iH18xKErciWOwDmRsgVaoxw18iNIvuY/ TvSNXJBDgh8is8eV/mmN0iVkK0mYTxhy0G5CHavrgy8STWNC6CdqFtrxZnInoCAz wq25QvQtPZcxz1dS9bTuWUfrPATaIeQeCzUsAIiE7u9aP/olL5M= =AVCL -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm Pull ARM updates from Russell King: "Updates for IRQ stacks and virtually mapped stack support, and ftrace: - Support for IRQ and vmap'ed stacks This covers all the work related to implementing IRQ stacks and vmap'ed stacks for all 32-bit ARM systems that are currently supported by the Linux kernel, including RiscPC and Footbridge. It has been submitted for review in four different waves: - IRQ stacks support for v7 SMP systems [0] - vmap'ed stacks support for v7 SMP systems[1] - extending support for both IRQ stacks and vmap'ed stacks for all remaining configurations, including v6/v7 SMP multiplatform kernels and uniprocessor configurations including v7-M [2] - fixes and updates in [3] - ftrace fixes and cleanups Make all flavors of ftrace available on all builds, regardless of ISA choice, unwinder choice or compiler [4]: - use ADD not POP where possible - fix a couple of Thumb2 related issues - enable HAVE_FUNCTION_GRAPH_FP_TEST for robustness - enable the graph tracer with the EABI unwinder - avoid clobbering frame pointer registers to make Clang happy - Fixes for the above" [0] https://lore.kernel.org/linux-arm-kernel/20211115084732.3704393-1-ardb@kernel.org/ [1] https://lore.kernel.org/linux-arm-kernel/20211122092816.2865873-1-ardb@kernel.org/ [2] https://lore.kernel.org/linux-arm-kernel/20211206164659.1495084-1-ardb@kernel.org/ [3] https://lore.kernel.org/linux-arm-kernel/20220124174744.1054712-1-ardb@kernel.org/ [4] https://lore.kernel.org/linux-arm-kernel/20220203082204.1176734-1-ardb@kernel.org/ * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (62 commits) ARM: fix building NOMMU ARMv4/v5 kernels ARM: unwind: only permit stack switch when unwinding call_with_stack() ARM: Revert "unwind: dump exception stack from calling frame" ARM: entry: fix unwinder problems caused by IRQ stacks ARM: unwind: set frame.pc correctly for current-thread unwinding ARM: 9184/1: return_address: disable again for CONFIG_ARM_UNWIND=y ARM: 9183/1: unwind: avoid spurious warnings on bogus code addresses Revert "ARM: 9144/1: forbid ftrace with clang and thumb2_kernel" ARM: mach-bcm: disable ftrace in SMC invocation routines ARM: cacheflush: avoid clobbering the frame pointer ARM: kprobes: treat R7 as the frame pointer register in Thumb2 builds ARM: ftrace: enable the graph tracer with the EABI unwinder ARM: unwind: track location of LR value in stack frame ARM: ftrace: enable HAVE_FUNCTION_GRAPH_FP_TEST ARM: ftrace: avoid unnecessary literal loads ARM: ftrace: avoid redundant loads or clobbering IP ARM: ftrace: use trampolines to keep .init.text in branching range ARM: ftrace: use ADD not POP to counter PUSH at entry ARM: ftrace: ensure that ADR takes the Thumb bit into account ARM: make get_current() and __my_cpu_offset() __always_inline ...
This commit is contained in:
commit
9c0e6a89b5
@ -60,6 +60,7 @@ config ARM
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
select GENERIC_IDLE_POLL_SETUP
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_IRQ_SHOW_LEVEL
|
||||
@ -94,8 +95,8 @@ config ARM
|
||||
select HAVE_EXIT_THREAD
|
||||
select HAVE_FAST_GUP if ARM_LPAE
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
|
||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !(THUMB2_KERNEL && CC_IS_CLANG)
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
|
||||
select HAVE_GCC_PLUGINS
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
@ -129,7 +130,8 @@ config ARM
|
||||
select PERF_USE_VMALLOC
|
||||
select RTC_LIB
|
||||
select SYS_SUPPORTS_APM_EMULATION
|
||||
select THREAD_INFO_IN_TASK if CURRENT_POINTER_IN_TPIDRURO
|
||||
select THREAD_INFO_IN_TASK
|
||||
select HAVE_ARCH_VMAP_STACK if MMU && ARM_HAS_GROUP_RELOCS
|
||||
select TRACE_IRQFLAGS_SUPPORT if !CPU_V7M
|
||||
# Above selects are sorted alphabetically; please add new ones
|
||||
# according to that. Thanks.
|
||||
@ -141,6 +143,17 @@ config ARM
|
||||
Europe. There is an ARM Linux project with a web page at
|
||||
<http://www.arm.linux.org.uk/>.
|
||||
|
||||
config ARM_HAS_GROUP_RELOCS
|
||||
def_bool y
|
||||
depends on !LD_IS_LLD || LLD_VERSION >= 140000
|
||||
depends on !COMPILE_TEST
|
||||
help
|
||||
Whether or not to use R_ARM_ALU_PC_Gn or R_ARM_LDR_PC_Gn group
|
||||
relocations, which have been around for a long time, but were not
|
||||
supported in LLD until version 14. The combined range is -/+ 256 MiB,
|
||||
which is usually sufficient, but not for allyesconfig, so we disable
|
||||
this feature when doing compile testing.
|
||||
|
||||
config ARM_HAS_SG_CHAIN
|
||||
bool
|
||||
|
||||
@ -229,9 +242,6 @@ config GENERIC_ISA_DMA
|
||||
config FIQ
|
||||
bool
|
||||
|
||||
config NEED_RET_TO_USER
|
||||
bool
|
||||
|
||||
config ARCH_MTD_XIP
|
||||
bool
|
||||
|
||||
@ -325,7 +335,6 @@ config ARCH_MULTIPLATFORM
|
||||
select AUTO_ZRELADDR
|
||||
select TIMER_OF
|
||||
select COMMON_CLK
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select HAVE_PCI
|
||||
select PCI_DOMAINS_GENERIC if PCI
|
||||
select SPARSE_IRQ
|
||||
@ -349,7 +358,6 @@ config ARCH_EP93XX
|
||||
select ARM_AMBA
|
||||
imply ARM_PATCH_PHYS_VIRT
|
||||
select ARM_VIC
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select AUTO_ZRELADDR
|
||||
select CLKSRC_MMIO
|
||||
select CPU_ARM920T
|
||||
@ -374,7 +382,6 @@ config ARCH_IOP32X
|
||||
select CPU_XSCALE
|
||||
select GPIO_IOP
|
||||
select GPIOLIB
|
||||
select NEED_RET_TO_USER
|
||||
select FORCE_PCI
|
||||
select PLAT_IOP
|
||||
help
|
||||
@ -388,7 +395,6 @@ config ARCH_IXP4XX
|
||||
select ARCH_SUPPORTS_BIG_ENDIAN
|
||||
select CPU_XSCALE
|
||||
select DMABOUNCE if PCI
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GPIO_IXP4XX
|
||||
select GPIOLIB
|
||||
select HAVE_PCI
|
||||
@ -404,7 +410,6 @@ config ARCH_IXP4XX
|
||||
config ARCH_DOVE
|
||||
bool "Marvell Dove"
|
||||
select CPU_PJ4
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GPIOLIB
|
||||
select HAVE_PCI
|
||||
select MVEBU_MBUS
|
||||
@ -427,7 +432,6 @@ config ARCH_PXA
|
||||
select CLKSRC_MMIO
|
||||
select TIMER_OF
|
||||
select CPU_XSCALE if !CPU_XSC3
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GPIO_PXA
|
||||
select GPIOLIB
|
||||
select IRQ_DOMAIN
|
||||
@ -466,7 +470,6 @@ config ARCH_SA1100
|
||||
select COMMON_CLK
|
||||
select CPU_FREQ
|
||||
select CPU_SA1100
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GPIOLIB
|
||||
select IRQ_DOMAIN
|
||||
select ISA
|
||||
@ -481,7 +484,6 @@ config ARCH_S3C24XX
|
||||
select CLKSRC_SAMSUNG_PWM
|
||||
select GPIO_SAMSUNG
|
||||
select GPIOLIB
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select NEED_MACH_IO_H
|
||||
select S3C2410_WATCHDOG
|
||||
select SAMSUNG_ATAGS
|
||||
@ -499,7 +501,6 @@ config ARCH_OMAP1
|
||||
select ARCH_OMAP
|
||||
select CLKSRC_MMIO
|
||||
select GENERIC_IRQ_CHIP
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GPIOLIB
|
||||
select HAVE_LEGACY_CLK
|
||||
select IRQ_DOMAIN
|
||||
@ -1166,7 +1167,12 @@ config SMP_ON_UP
|
||||
|
||||
config CURRENT_POINTER_IN_TPIDRURO
|
||||
def_bool y
|
||||
depends on SMP && CPU_32v6K && !CPU_V6
|
||||
depends on CPU_32v6K && !CPU_V6
|
||||
|
||||
config IRQSTACKS
|
||||
def_bool y
|
||||
select HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||
|
||||
config ARM_CPU_TOPOLOGY
|
||||
bool "Support cpu topology definition"
|
||||
@ -1607,10 +1613,14 @@ config XEN
|
||||
help
|
||||
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
|
||||
|
||||
config CC_HAVE_STACKPROTECTOR_TLS
|
||||
def_bool $(cc-option,-mtp=cp15 -mstack-protector-guard=tls -mstack-protector-guard-offset=0)
|
||||
|
||||
config STACKPROTECTOR_PER_TASK
|
||||
bool "Use a unique stack canary value for each task"
|
||||
depends on GCC_PLUGINS && STACKPROTECTOR && THREAD_INFO_IN_TASK && !XIP_DEFLATED_DATA
|
||||
select GCC_PLUGIN_ARM_SSP_PER_TASK
|
||||
depends on STACKPROTECTOR && CURRENT_POINTER_IN_TPIDRURO && !XIP_DEFLATED_DATA
|
||||
depends on GCC_PLUGINS || CC_HAVE_STACKPROTECTOR_TLS
|
||||
select GCC_PLUGIN_ARM_SSP_PER_TASK if !CC_HAVE_STACKPROTECTOR_TLS
|
||||
default y
|
||||
help
|
||||
Due to the fact that GCC uses an ordinary symbol reference from
|
||||
|
@ -65,7 +65,7 @@ config UNWINDER_FRAME_POINTER
|
||||
|
||||
config UNWINDER_ARM
|
||||
bool "ARM EABI stack unwinder"
|
||||
depends on AEABI && !FUNCTION_GRAPH_TRACER
|
||||
depends on AEABI
|
||||
select ARM_UNWIND
|
||||
help
|
||||
This option enables stack unwinding support in the kernel
|
||||
|
@ -275,6 +275,14 @@ endif
|
||||
|
||||
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
|
||||
prepare: stack_protector_prepare
|
||||
ifeq ($(CONFIG_CC_HAVE_STACKPROTECTOR_TLS),y)
|
||||
stack_protector_prepare: prepare0
|
||||
$(eval KBUILD_CFLAGS += \
|
||||
-mstack-protector-guard=tls \
|
||||
-mstack-protector-guard-offset=$(shell \
|
||||
awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}'\
|
||||
include/generated/asm-offsets.h))
|
||||
else
|
||||
stack_protector_prepare: prepare0
|
||||
$(eval SSP_PLUGIN_CFLAGS := \
|
||||
-fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \
|
||||
@ -283,6 +291,7 @@ stack_protector_prepare: prepare0
|
||||
$(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS))
|
||||
$(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS))
|
||||
endif
|
||||
endif
|
||||
|
||||
all: $(notdir $(KBUILD_IMAGE))
|
||||
|
||||
|
@ -92,17 +92,13 @@ ifeq ($(CONFIG_USE_OF),y)
|
||||
OBJS += $(libfdt_objs) fdt_check_mem_start.o
|
||||
endif
|
||||
|
||||
# -fstack-protector-strong triggers protection checks in this code,
|
||||
# but it is being used too early to link to meaningful stack_chk logic.
|
||||
$(foreach o, $(libfdt_objs) atags_to_fdt.o fdt_check_mem_start.o, \
|
||||
$(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt -fno-stack-protector))
|
||||
|
||||
targets := vmlinux vmlinux.lds piggy_data piggy.o \
|
||||
head.o $(OBJS)
|
||||
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
|
||||
ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
|
||||
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
|
||||
-I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
|
||||
ccflags-remove-$(CONFIG_FUNCTION_TRACER) += -pg
|
||||
asflags-y := -DZIMAGE
|
||||
|
@ -128,13 +128,6 @@ asmlinkage void __div0(void)
|
||||
error("Attempting division by 0!");
|
||||
}
|
||||
|
||||
const unsigned long __stack_chk_guard = 0x000a0dff;
|
||||
|
||||
void __stack_chk_fail(void)
|
||||
{
|
||||
error("stack-protector: Kernel stack is corrupted\n");
|
||||
}
|
||||
|
||||
extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x));
|
||||
|
||||
|
||||
|
@ -86,6 +86,10 @@
|
||||
|
||||
#define IMM12_MASK 0xfff
|
||||
|
||||
/* the frame pointer used for stack unwinding */
|
||||
ARM( fpreg .req r11 )
|
||||
THUMB( fpreg .req r7 )
|
||||
|
||||
/*
|
||||
* Enable and disable interrupts
|
||||
*/
|
||||
@ -209,43 +213,12 @@
|
||||
.endm
|
||||
.endr
|
||||
|
||||
.macro get_current, rd
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
|
||||
#else
|
||||
get_thread_info \rd
|
||||
ldr \rd, [\rd, #TI_TASK]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro set_current, rn
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro reload_current, t1:req, t2:req
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
adr_l \t1, __entry_task @ get __entry_task base address
|
||||
mrc p15, 0, \t2, c13, c0, 4 @ get per-CPU offset
|
||||
ldr \t1, [\t1, \t2] @ load variable
|
||||
mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Get current thread_info.
|
||||
*/
|
||||
.macro get_thread_info, rd
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
/* thread_info is the first member of struct task_struct */
|
||||
get_current \rd
|
||||
#else
|
||||
ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
|
||||
THUMB( mov \rd, sp )
|
||||
THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
|
||||
mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
@ -320,6 +293,80 @@
|
||||
#define ALT_UP_B(label) b label
|
||||
#endif
|
||||
|
||||
/*
|
||||
* this_cpu_offset - load the per-CPU offset of this CPU into
|
||||
* register 'rd'
|
||||
*/
|
||||
.macro this_cpu_offset, rd:req
|
||||
#ifdef CONFIG_SMP
|
||||
ALT_SMP(mrc p15, 0, \rd, c13, c0, 4)
|
||||
#ifdef CONFIG_CPU_V6
|
||||
ALT_UP_B(.L1_\@)
|
||||
.L0_\@:
|
||||
.subsection 1
|
||||
.L1_\@: ldr_va \rd, __per_cpu_offset
|
||||
b .L0_\@
|
||||
.previous
|
||||
#endif
|
||||
#else
|
||||
mov \rd, #0
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* set_current - store the task pointer of this CPU's current task
|
||||
*/
|
||||
.macro set_current, rn:req, tmp:req
|
||||
#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
|
||||
9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
|
||||
#ifdef CONFIG_CPU_V6
|
||||
ALT_UP_B(.L0_\@)
|
||||
.subsection 1
|
||||
.L0_\@: str_va \rn, __current, \tmp
|
||||
b .L1_\@
|
||||
.previous
|
||||
.L1_\@:
|
||||
#endif
|
||||
#else
|
||||
str_va \rn, __current, \tmp
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* get_current - load the task pointer of this CPU's current task
|
||||
*/
|
||||
.macro get_current, rd:req
|
||||
#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
|
||||
9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
|
||||
#ifdef CONFIG_CPU_V6
|
||||
ALT_UP_B(.L0_\@)
|
||||
.subsection 1
|
||||
.L0_\@: ldr_va \rd, __current
|
||||
b .L1_\@
|
||||
.previous
|
||||
.L1_\@:
|
||||
#endif
|
||||
#else
|
||||
ldr_va \rd, __current
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* reload_current - reload the task pointer of this CPU's current task
|
||||
* into the TLS register
|
||||
*/
|
||||
.macro reload_current, t1:req, t2:req
|
||||
#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
|
||||
#ifdef CONFIG_CPU_V6
|
||||
ALT_SMP(nop)
|
||||
ALT_UP_B(.L0_\@)
|
||||
#endif
|
||||
ldr_this_cpu \t1, __entry_task, \t1, \t2
|
||||
mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
|
||||
.L0_\@:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Instruction barrier
|
||||
*/
|
||||
@ -576,12 +623,12 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
/*
|
||||
* mov_l - move a constant value or [relocated] address into a register
|
||||
*/
|
||||
.macro mov_l, dst:req, imm:req
|
||||
.macro mov_l, dst:req, imm:req, cond
|
||||
.if __LINUX_ARM_ARCH__ < 7
|
||||
ldr \dst, =\imm
|
||||
ldr\cond \dst, =\imm
|
||||
.else
|
||||
movw \dst, #:lower16:\imm
|
||||
movt \dst, #:upper16:\imm
|
||||
movw\cond \dst, #:lower16:\imm
|
||||
movt\cond \dst, #:upper16:\imm
|
||||
.endif
|
||||
.endm
|
||||
|
||||
@ -619,6 +666,78 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
__adldst_l str, \src, \sym, \tmp, \cond
|
||||
.endm
|
||||
|
||||
.macro __ldst_va, op, reg, tmp, sym, cond
|
||||
#if __LINUX_ARM_ARCH__ >= 7 || \
|
||||
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
|
||||
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
||||
mov_l \tmp, \sym, \cond
|
||||
\op\cond \reg, [\tmp]
|
||||
#else
|
||||
/*
|
||||
* Avoid a literal load, by emitting a sequence of ADD/LDR instructions
|
||||
* with the appropriate relocations. The combined sequence has a range
|
||||
* of -/+ 256 MiB, which should be sufficient for the core kernel and
|
||||
* for modules loaded into the module region.
|
||||
*/
|
||||
.globl \sym
|
||||
.reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
|
||||
.reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
|
||||
.reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
|
||||
.L0_\@: sub\cond \tmp, pc, #8
|
||||
.L1_\@: sub\cond \tmp, \tmp, #4
|
||||
.L2_\@: \op\cond \reg, [\tmp, #0]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* ldr_va - load a 32-bit word from the virtual address of \sym
|
||||
*/
|
||||
.macro ldr_va, rd:req, sym:req, cond
|
||||
__ldst_va ldr, \rd, \rd, \sym, \cond
|
||||
.endm
|
||||
|
||||
/*
|
||||
* str_va - store a 32-bit word to the virtual address of \sym
|
||||
*/
|
||||
.macro str_va, rn:req, sym:req, tmp:req, cond
|
||||
__ldst_va str, \rn, \tmp, \sym, \cond
|
||||
.endm
|
||||
|
||||
/*
|
||||
* ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
|
||||
* without using a temp register. Supported in ARM mode
|
||||
* only.
|
||||
*/
|
||||
.macro ldr_this_cpu_armv6, rd:req, sym:req
|
||||
this_cpu_offset \rd
|
||||
.globl \sym
|
||||
.reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
|
||||
.reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
|
||||
.reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
|
||||
add \rd, \rd, pc
|
||||
.L0_\@: sub \rd, \rd, #4
|
||||
.L1_\@: sub \rd, \rd, #0
|
||||
.L2_\@: ldr \rd, [\rd, #4]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
|
||||
* into register 'rd', which may be the stack pointer,
|
||||
* using 't1' and 't2' as general temp registers. These
|
||||
* are permitted to overlap with 'rd' if != sp
|
||||
*/
|
||||
.macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
|
||||
#if __LINUX_ARM_ARCH__ >= 7 || \
|
||||
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
|
||||
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
||||
this_cpu_offset \t1
|
||||
mov_l \t2, \sym
|
||||
ldr \rd, [\t1, \t2]
|
||||
#else
|
||||
ldr_this_cpu_armv6 \rd, \sym
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* rev_l - byte-swap a 32-bit value
|
||||
*
|
||||
@ -636,4 +755,19 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* bl_r - branch and link to register
|
||||
*
|
||||
* @dst: target to branch to
|
||||
* @c: conditional opcode suffix
|
||||
*/
|
||||
.macro bl_r, dst:req, c
|
||||
.if __LINUX_ARM_ARCH__ < 6
|
||||
mov\c lr, pc
|
||||
mov\c pc, \dst
|
||||
.else
|
||||
blx\c \dst
|
||||
.endif
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H__ */
|
||||
|
@ -445,15 +445,10 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
|
||||
* however some exceptions may exist. Caveat emptor.
|
||||
*
|
||||
* - The clobber list is dictated by the call to v7_flush_dcache_*.
|
||||
* fp is preserved to the stack explicitly prior disabling the cache
|
||||
* since adding it to the clobber list is incompatible with having
|
||||
* CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
|
||||
* trampoline are inserted by the linker and to keep sp 64-bit aligned.
|
||||
*/
|
||||
#define v7_exit_coherency_flush(level) \
|
||||
asm volatile( \
|
||||
".arch armv7-a \n\t" \
|
||||
"stmfd sp!, {fp, ip} \n\t" \
|
||||
"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
|
||||
"bic r0, r0, #"__stringify(CR_C)" \n\t" \
|
||||
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
|
||||
@ -463,10 +458,9 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
|
||||
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
|
||||
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
|
||||
"isb \n\t" \
|
||||
"dsb \n\t" \
|
||||
"ldmfd sp!, {fp, ip}" \
|
||||
: : : "r0","r1","r2","r3","r4","r5","r6","r7", \
|
||||
"r9","r10","lr","memory" )
|
||||
"dsb" \
|
||||
: : : "r0","r1","r2","r3","r4","r5","r6", \
|
||||
"r9","r10","ip","lr","memory" )
|
||||
|
||||
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
|
||||
void *kaddr, unsigned long len);
|
||||
|
@ -8,25 +8,18 @@
|
||||
#define _ASM_ARM_CURRENT_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/insn.h>
|
||||
|
||||
struct task_struct;
|
||||
|
||||
static inline void set_current(struct task_struct *cur)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO))
|
||||
return;
|
||||
extern struct task_struct *__current;
|
||||
|
||||
/* Set TPIDRURO */
|
||||
asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
|
||||
static inline struct task_struct *get_current(void)
|
||||
static __always_inline __attribute_const__ struct task_struct *get_current(void)
|
||||
{
|
||||
struct task_struct *cur;
|
||||
|
||||
#if __has_builtin(__builtin_thread_pointer) && \
|
||||
defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) && \
|
||||
!(defined(CONFIG_THUMB2_KERNEL) && \
|
||||
defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 130001)
|
||||
/*
|
||||
@ -39,16 +32,39 @@ static inline struct task_struct *get_current(void)
|
||||
* https://github.com/ClangBuiltLinux/linux/issues/1485
|
||||
*/
|
||||
cur = __builtin_thread_pointer();
|
||||
#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
|
||||
asm("0: mrc p15, 0, %0, c13, c0, 3 \n\t"
|
||||
#ifdef CONFIG_CPU_V6
|
||||
"1: \n\t"
|
||||
" .subsection 1 \n\t"
|
||||
#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
|
||||
!(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
||||
"2: " LOAD_SYM_ARMV6(%0, __current) " \n\t"
|
||||
" b 1b \n\t"
|
||||
#else
|
||||
asm("mrc p15, 0, %0, c13, c0, 3" : "=r"(cur));
|
||||
"2: ldr %0, 3f \n\t"
|
||||
" ldr %0, [%0] \n\t"
|
||||
" b 1b \n\t"
|
||||
"3: .long __current \n\t"
|
||||
#endif
|
||||
" .previous \n\t"
|
||||
" .pushsection \".alt.smp.init\", \"a\" \n\t"
|
||||
" .long 0b - . \n\t"
|
||||
" b . + (2b - 0b) \n\t"
|
||||
" .popsection \n\t"
|
||||
#endif
|
||||
: "=r"(cur));
|
||||
#elif __LINUX_ARM_ARCH__>= 7 || \
|
||||
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
|
||||
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
||||
cur = __current;
|
||||
#else
|
||||
asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur));
|
||||
#endif
|
||||
return cur;
|
||||
}
|
||||
|
||||
#define current get_current()
|
||||
#else
|
||||
#include <asm-generic/current.h>
|
||||
#endif /* CONFIG_CURRENT_POINTER_IN_TPIDRURO */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@ -61,6 +61,9 @@ typedef struct user_fp elf_fpregset_t;
|
||||
#define R_ARM_MOVT_ABS 44
|
||||
#define R_ARM_MOVW_PREL_NC 45
|
||||
#define R_ARM_MOVT_PREL 46
|
||||
#define R_ARM_ALU_PC_G0_NC 57
|
||||
#define R_ARM_ALU_PC_G1_NC 59
|
||||
#define R_ARM_LDR_PC_G2 63
|
||||
|
||||
#define R_ARM_THM_CALL 10
|
||||
#define R_ARM_THM_JUMP24 30
|
||||
|
@ -1,40 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <asm/assembler.h>
|
||||
|
||||
/*
|
||||
* Interrupt handling. Preserves r7, r8, r9
|
||||
*/
|
||||
.macro arch_irq_handler_default
|
||||
get_irqnr_preamble r6, lr
|
||||
1: get_irqnr_and_base r0, r2, r6, lr
|
||||
movne r1, sp
|
||||
@
|
||||
@ routine called with r0 = irq number, r1 = struct pt_regs *
|
||||
@
|
||||
badrne lr, 1b
|
||||
bne asm_do_IRQ
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* XXX
|
||||
*
|
||||
* this macro assumes that irqstat (r2) and base (r6) are
|
||||
* preserved from get_irqnr_and_base above
|
||||
*/
|
||||
ALT_SMP(test_for_ipi r0, r2, r6, lr)
|
||||
ALT_UP_B(9997f)
|
||||
movne r1, sp
|
||||
badrne lr, 1b
|
||||
bne do_IPI
|
||||
#endif
|
||||
9997:
|
||||
.endm
|
||||
|
||||
.macro arch_irq_handler, symbol_name
|
||||
.align 5
|
||||
.global \symbol_name
|
||||
\symbol_name:
|
||||
mov r8, lr
|
||||
arch_irq_handler_default
|
||||
ret r8
|
||||
.endm
|
@ -2,6 +2,8 @@
|
||||
#ifndef _ASM_ARM_FTRACE
|
||||
#define _ASM_ARM_FTRACE
|
||||
|
||||
#define HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
#endif
|
||||
@ -48,7 +50,7 @@ void *return_address(unsigned int);
|
||||
|
||||
static inline void *return_address(unsigned int level)
|
||||
{
|
||||
return NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,131 +0,0 @@
|
||||
/*
|
||||
* arch/arm/include/asm/hardware/entry-macro-iomd.S
|
||||
*
|
||||
* Low-level IRQ helper macros for IOC/IOMD based platforms
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
/* IOC / IOMD based hardware */
|
||||
#include <asm/hardware/iomd.h>
|
||||
|
||||
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
|
||||
ldrb \irqstat, [\base, #IOMD_IRQREQB] @ get high priority first
|
||||
ldr \tmp, =irq_prio_h
|
||||
teq \irqstat, #0
|
||||
#ifdef IOMD_BASE
|
||||
ldrbeq \irqstat, [\base, #IOMD_DMAREQ] @ get dma
|
||||
addeq \tmp, \tmp, #256 @ irq_prio_h table size
|
||||
teqeq \irqstat, #0
|
||||
bne 2406f
|
||||
#endif
|
||||
ldrbeq \irqstat, [\base, #IOMD_IRQREQA] @ get low priority
|
||||
addeq \tmp, \tmp, #256 @ irq_prio_d table size
|
||||
teqeq \irqstat, #0
|
||||
#ifdef IOMD_IRQREQC
|
||||
ldrbeq \irqstat, [\base, #IOMD_IRQREQC]
|
||||
addeq \tmp, \tmp, #256 @ irq_prio_l table size
|
||||
teqeq \irqstat, #0
|
||||
#endif
|
||||
#ifdef IOMD_IRQREQD
|
||||
ldrbeq \irqstat, [\base, #IOMD_IRQREQD]
|
||||
addeq \tmp, \tmp, #256 @ irq_prio_lc table size
|
||||
teqeq \irqstat, #0
|
||||
#endif
|
||||
2406: ldrbne \irqnr, [\tmp, \irqstat] @ get IRQ number
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Interrupt table (incorporates priority). Please note that we
|
||||
* rely on the order of these tables (see above code).
|
||||
*/
|
||||
.align 5
|
||||
irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
#ifdef IOMD_BASE
|
||||
irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
#endif
|
||||
irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
|
||||
.byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
|
||||
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
|
||||
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
|
||||
.byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
|
||||
.byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
|
||||
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
|
||||
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
#ifdef IOMD_IRQREQC
|
||||
irq_prio_lc: .byte 24,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
|
||||
.byte 28,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
|
||||
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
|
||||
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
|
||||
.byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
|
||||
.byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
|
||||
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
|
||||
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
#endif
|
||||
#ifdef IOMD_IRQREQD
|
||||
irq_prio_ld: .byte 40,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
|
||||
.byte 44,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
|
||||
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
|
||||
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
|
||||
.byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
|
||||
.byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
|
||||
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
|
||||
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
#endif
|
||||
|
@ -2,6 +2,23 @@
|
||||
#ifndef __ASM_ARM_INSN_H
|
||||
#define __ASM_ARM_INSN_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Avoid a literal load by emitting a sequence of ADD/LDR instructions with the
|
||||
* appropriate relocations. The combined sequence has a range of -/+ 256 MiB,
|
||||
* which should be sufficient for the core kernel as well as modules loaded
|
||||
* into the module region. (Not supported by LLD before release 14)
|
||||
*/
|
||||
#define LOAD_SYM_ARMV6(reg, sym) \
|
||||
" .globl " #sym " \n\t" \
|
||||
" .reloc 10f, R_ARM_ALU_PC_G0_NC, " #sym " \n\t" \
|
||||
" .reloc 11f, R_ARM_ALU_PC_G1_NC, " #sym " \n\t" \
|
||||
" .reloc 12f, R_ARM_LDR_PC_G2, " #sym " \n\t" \
|
||||
"10: sub " #reg ", pc, #8 \n\t" \
|
||||
"11: sub " #reg ", " #reg ", #4 \n\t" \
|
||||
"12: ldr " #reg ", [" #reg ", #0] \n\t"
|
||||
|
||||
static inline unsigned long
|
||||
arm_gen_nop(void)
|
||||
{
|
||||
|
@ -26,7 +26,6 @@
|
||||
struct irqaction;
|
||||
struct pt_regs;
|
||||
|
||||
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
|
||||
void handle_IRQ(unsigned int, struct pt_regs *);
|
||||
void init_IRQ(void);
|
||||
|
||||
|
@ -56,9 +56,7 @@ struct machine_desc {
|
||||
void (*init_time)(void);
|
||||
void (*init_machine)(void);
|
||||
void (*init_late)(void);
|
||||
#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
|
||||
void (*handle_irq)(struct pt_regs *);
|
||||
#endif
|
||||
void (*restart)(enum reboot_mode, const char *);
|
||||
};
|
||||
|
||||
|
@ -10,7 +10,7 @@ typedef struct {
|
||||
#else
|
||||
int switch_pending;
|
||||
#endif
|
||||
unsigned int vmalloc_seq;
|
||||
atomic_t vmalloc_seq;
|
||||
unsigned long sigpage;
|
||||
#ifdef CONFIG_VDSO
|
||||
unsigned long vdso;
|
||||
|
@ -23,6 +23,16 @@
|
||||
|
||||
void __check_vmalloc_seq(struct mm_struct *mm);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static inline void check_vmalloc_seq(struct mm_struct *mm)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
|
||||
unlikely(atomic_read(&mm->context.vmalloc_seq) !=
|
||||
atomic_read(&init_mm.context.vmalloc_seq)))
|
||||
__check_vmalloc_seq(mm);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||
@ -52,8 +62,7 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
|
||||
static inline void check_and_switch_context(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
|
||||
__check_vmalloc_seq(mm);
|
||||
check_vmalloc_seq(mm);
|
||||
|
||||
if (irqs_disabled())
|
||||
/*
|
||||
@ -129,6 +138,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
if (mm != &init_mm)
|
||||
check_vmalloc_seq(mm);
|
||||
}
|
||||
#define enter_lazy_tlb enter_lazy_tlb
|
||||
#endif
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif
|
||||
|
@ -147,6 +147,9 @@ extern void copy_page(void *to, const void *from);
|
||||
#include <asm/pgtable-3level-types.h>
|
||||
#else
|
||||
#include <asm/pgtable-2level-types.h>
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
@ -5,20 +5,27 @@
|
||||
#ifndef _ASM_ARM_PERCPU_H_
|
||||
#define _ASM_ARM_PERCPU_H_
|
||||
|
||||
#include <asm/insn.h>
|
||||
|
||||
register unsigned long current_stack_pointer asm ("sp");
|
||||
|
||||
/*
|
||||
* Same as asm-generic/percpu.h, except that we store the per cpu offset
|
||||
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
|
||||
*/
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void set_my_cpu_offset(unsigned long off)
|
||||
{
|
||||
extern unsigned int smp_on_up;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
|
||||
return;
|
||||
|
||||
/* Set TPIDRPRW */
|
||||
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
|
||||
}
|
||||
|
||||
static inline unsigned long __my_cpu_offset(void)
|
||||
static __always_inline unsigned long __my_cpu_offset(void)
|
||||
{
|
||||
unsigned long off;
|
||||
|
||||
@ -27,8 +34,28 @@ static inline unsigned long __my_cpu_offset(void)
|
||||
* We want to allow caching the value, so avoid using volatile and
|
||||
* instead use a fake stack read to hazard against barrier().
|
||||
*/
|
||||
asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off)
|
||||
: "Q" (*(const unsigned long *)current_stack_pointer));
|
||||
asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t"
|
||||
#ifdef CONFIG_CPU_V6
|
||||
"1: \n\t"
|
||||
" .subsection 1 \n\t"
|
||||
#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
|
||||
!(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
||||
"2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
|
||||
" b 1b \n\t"
|
||||
#else
|
||||
"2: ldr %0, 3f \n\t"
|
||||
" ldr %0, [%0] \n\t"
|
||||
" b 1b \n\t"
|
||||
"3: .long __per_cpu_offset \n\t"
|
||||
#endif
|
||||
" .previous \n\t"
|
||||
" .pushsection \".alt.smp.init\", \"a\" \n\t"
|
||||
" .long 0b - . \n\t"
|
||||
" b . + (2b - 0b) \n\t"
|
||||
" .popsection \n\t"
|
||||
#endif
|
||||
: "=r" (off)
|
||||
: "Q" (*(const unsigned long *)current_stack_pointer));
|
||||
|
||||
return off;
|
||||
}
|
||||
|
@ -24,11 +24,6 @@ struct seq_file;
|
||||
*/
|
||||
extern void show_ipi_list(struct seq_file *, int);
|
||||
|
||||
/*
|
||||
* Called from assembly code, this handles an IPI.
|
||||
*/
|
||||
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* Called from C code, this handles an IPI.
|
||||
*/
|
||||
|
@ -14,6 +14,9 @@ struct stackframe {
|
||||
unsigned long sp;
|
||||
unsigned long lr;
|
||||
unsigned long pc;
|
||||
|
||||
/* address of the LR value on the stack */
|
||||
unsigned long *lr_addr;
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
struct llist_node *kr_cur;
|
||||
struct task_struct *tsk;
|
||||
@ -36,5 +39,7 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
|
||||
extern int unwind_frame(struct stackframe *frame);
|
||||
extern void walk_stackframe(struct stackframe *frame,
|
||||
int (*fn)(struct stackframe *, void *), void *data);
|
||||
extern void dump_mem(const char *lvl, const char *str, unsigned long bottom,
|
||||
unsigned long top);
|
||||
|
||||
#endif /* __ASM_STACKTRACE_H */
|
||||
|
@ -3,6 +3,7 @@
|
||||
#define __ASM_ARM_SWITCH_TO_H
|
||||
|
||||
#include <linux/thread_info.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
/*
|
||||
* For v7 SMP cores running a preemptible kernel we may be pre-empted
|
||||
@ -26,7 +27,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
|
||||
#define switch_to(prev,next,last) \
|
||||
do { \
|
||||
__complete_pending_tlbi(); \
|
||||
if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO)) \
|
||||
if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || is_smp()) \
|
||||
__this_cpu_write(__entry_task, next); \
|
||||
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
||||
} while (0)
|
||||
|
@ -25,6 +25,14 @@
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
||||
#define THREAD_START_SP (THREAD_SIZE - 8)
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
#define THREAD_ALIGN (2 * THREAD_SIZE)
|
||||
#else
|
||||
#define THREAD_ALIGN THREAD_SIZE
|
||||
#endif
|
||||
|
||||
#define OVERFLOW_STACK_SIZE SZ_4K
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct task_struct;
|
||||
@ -54,9 +62,6 @@ struct cpu_context_save {
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
int preempt_count; /* 0 => preemptable, <0 => bug */
|
||||
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
||||
struct task_struct *task; /* main task structure */
|
||||
#endif
|
||||
__u32 cpu; /* cpu */
|
||||
__u32 cpu_domain; /* cpu domain */
|
||||
struct cpu_context_save cpu_context; /* cpu context */
|
||||
@ -72,39 +77,15 @@ struct thread_info {
|
||||
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
INIT_THREAD_INFO_TASK(tsk) \
|
||||
.flags = 0, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
#define INIT_THREAD_INFO_TASK(tsk)
|
||||
|
||||
static inline struct task_struct *thread_task(struct thread_info* ti)
|
||||
{
|
||||
return (struct task_struct *)ti;
|
||||
}
|
||||
|
||||
#else
|
||||
#define INIT_THREAD_INFO_TASK(tsk) .task = &(tsk),
|
||||
|
||||
static inline struct task_struct *thread_task(struct thread_info* ti)
|
||||
{
|
||||
return ti->task;
|
||||
}
|
||||
|
||||
/*
|
||||
* how to get the thread information struct from C
|
||||
*/
|
||||
static inline struct thread_info *current_thread_info(void) __attribute_const__;
|
||||
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
return (struct thread_info *)
|
||||
(current_stack_pointer & ~(THREAD_SIZE - 1));
|
||||
}
|
||||
#endif
|
||||
|
||||
#define thread_saved_pc(tsk) \
|
||||
((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
|
||||
#define thread_saved_sp(tsk) \
|
||||
|
@ -18,21 +18,32 @@
|
||||
.endm
|
||||
|
||||
.macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
|
||||
ldr \tmp1, =elf_hwcap
|
||||
ldr \tmp1, [\tmp1, #0]
|
||||
#ifdef CONFIG_SMP
|
||||
ALT_SMP(nop)
|
||||
ALT_UP_B(.L0_\@)
|
||||
.subsection 1
|
||||
#endif
|
||||
.L0_\@:
|
||||
ldr_va \tmp1, elf_hwcap
|
||||
mov \tmp2, #0xffff0fff
|
||||
tst \tmp1, #HWCAP_TLS @ hardware TLS available?
|
||||
streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
|
||||
mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
|
||||
mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
|
||||
mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register
|
||||
strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
|
||||
beq .L2_\@
|
||||
mcr p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
|
||||
#ifdef CONFIG_SMP
|
||||
b .L1_\@
|
||||
.previous
|
||||
#endif
|
||||
.L1_\@: switch_tls_v6k \base, \tp, \tpuser, \tmp1, \tmp2
|
||||
.L2_\@:
|
||||
.endm
|
||||
|
||||
.macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
|
||||
mov \tmp1, #0xffff0fff
|
||||
str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
|
||||
.endm
|
||||
#else
|
||||
#include <asm/smp_plat.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TLS_REG_EMUL
|
||||
@ -43,7 +54,7 @@
|
||||
#elif defined(CONFIG_CPU_V6)
|
||||
#define tls_emu 0
|
||||
#define has_tls_reg (elf_hwcap & HWCAP_TLS)
|
||||
#define defer_tls_reg_update 0
|
||||
#define defer_tls_reg_update is_smp()
|
||||
#define switch_tls switch_tls_v6
|
||||
#elif defined(CONFIG_CPU_32v6K)
|
||||
#define tls_emu 0
|
||||
@ -81,11 +92,11 @@ static inline void set_tls(unsigned long val)
|
||||
*/
|
||||
barrier();
|
||||
|
||||
if (!tls_emu && !defer_tls_reg_update) {
|
||||
if (has_tls_reg) {
|
||||
if (!tls_emu) {
|
||||
if (has_tls_reg && !defer_tls_reg_update) {
|
||||
asm("mcr p15, 0, %0, c13, c0, 3"
|
||||
: : "r" (val));
|
||||
} else {
|
||||
} else if (!has_tls_reg) {
|
||||
#ifdef CONFIG_KUSER_HELPERS
|
||||
/*
|
||||
* User space must never try to access this
|
||||
|
@ -13,6 +13,7 @@
|
||||
#define V7M_SCB_ICSR_PENDSVSET (1 << 28)
|
||||
#define V7M_SCB_ICSR_PENDSVCLR (1 << 27)
|
||||
#define V7M_SCB_ICSR_RETTOBASE (1 << 11)
|
||||
#define V7M_SCB_ICSR_VECTACTIVE 0x000001ff
|
||||
|
||||
#define V7M_SCB_VTOR 0x08
|
||||
|
||||
@ -38,7 +39,7 @@
|
||||
#define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16)
|
||||
|
||||
#define V7M_xPSR_FRAMEPTRALIGN 0x00000200
|
||||
#define V7M_xPSR_EXCEPTIONNO 0x000001ff
|
||||
#define V7M_xPSR_EXCEPTIONNO V7M_SCB_ICSR_VECTACTIVE
|
||||
|
||||
/*
|
||||
* When branching to an address that has bits [31:28] == 0xf an exception return
|
||||
|
@ -10,6 +10,7 @@ ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
CFLAGS_REMOVE_insn.o = -pg
|
||||
CFLAGS_REMOVE_patch.o = -pg
|
||||
CFLAGS_REMOVE_unwind.o = -pg
|
||||
endif
|
||||
|
||||
CFLAGS_REMOVE_return_address.o = -pg
|
||||
|
@ -43,9 +43,6 @@ int main(void)
|
||||
BLANK();
|
||||
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
||||
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
|
||||
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
||||
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
||||
#endif
|
||||
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
||||
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
|
||||
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
|
||||
|
@ -19,9 +19,6 @@
|
||||
#include <asm/glue-df.h>
|
||||
#include <asm/glue-pf.h>
|
||||
#include <asm/vfpmacros.h>
|
||||
#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
|
||||
#include <mach/entry-macro.S>
|
||||
#endif
|
||||
#include <asm/thread_notify.h>
|
||||
#include <asm/unwind.h>
|
||||
#include <asm/unistd.h>
|
||||
@ -30,19 +27,35 @@
|
||||
#include <asm/uaccess-asm.h>
|
||||
|
||||
#include "entry-header.S"
|
||||
#include <asm/entry-macro-multi.S>
|
||||
#include <asm/probes.h>
|
||||
|
||||
/*
|
||||
* Interrupt handling.
|
||||
*/
|
||||
.macro irq_handler
|
||||
#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
|
||||
mov r0, sp
|
||||
bl generic_handle_arch_irq
|
||||
#else
|
||||
arch_irq_handler_default
|
||||
.macro irq_handler, from_user:req
|
||||
mov r1, sp
|
||||
ldr_this_cpu r2, irq_stack_ptr, r2, r3
|
||||
.if \from_user == 0
|
||||
@
|
||||
@ If we took the interrupt while running in the kernel, we may already
|
||||
@ be using the IRQ stack, so revert to the original value in that case.
|
||||
@
|
||||
subs r3, r2, r1 @ SP above bottom of IRQ stack?
|
||||
rsbscs r3, r3, #THREAD_SIZE @ ... and below the top?
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
ldr_va r3, high_memory, cc @ End of the linear region
|
||||
cmpcc r3, r1 @ Stack pointer was below it?
|
||||
#endif
|
||||
bcc 0f @ If not, switch to the IRQ stack
|
||||
mov r0, r1
|
||||
bl generic_handle_arch_irq
|
||||
b 1f
|
||||
0:
|
||||
.endif
|
||||
|
||||
mov_l r0, generic_handle_arch_irq
|
||||
bl call_with_stack
|
||||
1:
|
||||
.endm
|
||||
|
||||
.macro pabt_helper
|
||||
@ -140,27 +153,35 @@ ENDPROC(__und_invalid)
|
||||
#define SPFIX(code...)
|
||||
#endif
|
||||
|
||||
.macro svc_entry, stack_hole=0, trace=1, uaccess=1
|
||||
.macro svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
|
||||
UNWIND(.fnstart )
|
||||
UNWIND(.save {r0 - pc} )
|
||||
sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
|
||||
sub sp, sp, #(SVC_REGS_SIZE + \stack_hole)
|
||||
THUMB( add sp, r1 ) @ get SP in a GPR without
|
||||
THUMB( sub r1, sp, r1 ) @ using a temp register
|
||||
|
||||
.if \overflow_check
|
||||
UNWIND(.save {r0 - pc} )
|
||||
do_overflow_check (SVC_REGS_SIZE + \stack_hole)
|
||||
.endif
|
||||
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
SPFIX( str r0, [sp] ) @ temporarily saved
|
||||
SPFIX( mov r0, sp )
|
||||
SPFIX( tst r0, #4 ) @ test original stack alignment
|
||||
SPFIX( ldr r0, [sp] ) @ restored
|
||||
tst r1, #4 @ test stack pointer alignment
|
||||
sub r1, sp, r1 @ restore original R1
|
||||
sub sp, r1 @ restore original SP
|
||||
#else
|
||||
SPFIX( tst sp, #4 )
|
||||
#endif
|
||||
SPFIX( subeq sp, sp, #4 )
|
||||
stmia sp, {r1 - r12}
|
||||
SPFIX( subne sp, sp, #4 )
|
||||
|
||||
ARM( stmib sp, {r1 - r12} )
|
||||
THUMB( stmia sp, {r0 - r12} ) @ No STMIB in Thumb-2
|
||||
|
||||
ldmia r0, {r3 - r5}
|
||||
add r7, sp, #S_SP - 4 @ here for interlock avoidance
|
||||
add r7, sp, #S_SP @ here for interlock avoidance
|
||||
mov r6, #-1 @ "" "" "" ""
|
||||
add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
|
||||
SPFIX( addeq r2, r2, #4 )
|
||||
str r3, [sp, #-4]! @ save the "real" r0 copied
|
||||
add r2, sp, #(SVC_REGS_SIZE + \stack_hole)
|
||||
SPFIX( addne r2, r2, #4 )
|
||||
str r3, [sp] @ save the "real" r0 copied
|
||||
@ from the exception stack
|
||||
|
||||
mov r3, lr
|
||||
@ -199,7 +220,7 @@ ENDPROC(__dabt_svc)
|
||||
.align 5
|
||||
__irq_svc:
|
||||
svc_entry
|
||||
irq_handler
|
||||
irq_handler from_user=0
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
|
||||
@ -426,7 +447,7 @@ ENDPROC(__dabt_usr)
|
||||
__irq_usr:
|
||||
usr_entry
|
||||
kuser_cmpxchg_check
|
||||
irq_handler
|
||||
irq_handler from_user=1
|
||||
get_thread_info tsk
|
||||
mov why, #0
|
||||
b ret_to_user_from_irq
|
||||
@ -752,16 +773,17 @@ ENTRY(__switch_to)
|
||||
ldr r6, [r2, #TI_CPU_DOMAIN]
|
||||
#endif
|
||||
switch_tls r1, r4, r5, r3, r7
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
ldr r7, [r2, #TI_TASK]
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
|
||||
!defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||
ldr r8, =__stack_chk_guard
|
||||
.if (TSK_STACK_CANARY > IMM12_MASK)
|
||||
add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
|
||||
add r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
|
||||
ldr r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
|
||||
.else
|
||||
ldr r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
|
||||
.endif
|
||||
ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
|
||||
#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO)
|
||||
mov r7, r2 @ Preserve 'next'
|
||||
#endif
|
||||
mov r7, r2 @ Preserve 'next'
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
|
||||
#endif
|
||||
@ -770,19 +792,102 @@ ENTRY(__switch_to)
|
||||
ldr r0, =thread_notify_head
|
||||
mov r1, #THREAD_NOTIFY_SWITCH
|
||||
bl atomic_notifier_call_chain
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
str r7, [r8]
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
|
||||
!defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||
str r9, [r8]
|
||||
#endif
|
||||
THUMB( mov ip, r4 )
|
||||
mov r0, r5
|
||||
set_current r7
|
||||
ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
|
||||
THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
|
||||
THUMB( ldr sp, [ip], #4 )
|
||||
THUMB( ldr pc, [ip] )
|
||||
#if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
|
||||
set_current r7, r8
|
||||
ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
|
||||
#else
|
||||
mov r1, r7
|
||||
ldmia r4, {r4 - sl, fp, ip, lr} @ Load all regs saved previously
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
@
|
||||
@ Do a dummy read from the new stack while running from the old one so
|
||||
@ that we can rely on do_translation_fault() to fix up any stale PMD
|
||||
@ entries covering the vmalloc region.
|
||||
@
|
||||
ldr r2, [ip]
|
||||
#endif
|
||||
|
||||
@ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
|
||||
@ effectuates the task switch, as that is what causes the observable
|
||||
@ values of current and current_thread_info to change. When
|
||||
@ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
|
||||
@ current_thread_info) is done explicitly, and the update of SP just
|
||||
@ switches us to another stack, with few other side effects. In order
|
||||
@ to prevent this distinction from causing any inconsistencies, let's
|
||||
@ keep the 'set_current' call as close as we can to the update of SP.
|
||||
set_current r1, r2
|
||||
mov sp, ip
|
||||
ret lr
|
||||
#endif
|
||||
UNWIND(.fnend )
|
||||
ENDPROC(__switch_to)
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
.text
|
||||
.align 2
|
||||
__bad_stack:
|
||||
@
|
||||
@ We've just detected an overflow. We need to load the address of this
|
||||
@ CPU's overflow stack into the stack pointer register. We have only one
|
||||
@ scratch register so let's use a sequence of ADDs including one
|
||||
@ involving the PC, and decorate them with PC-relative group
|
||||
@ relocations. As these are ARM only, switch to ARM mode first.
|
||||
@
|
||||
@ We enter here with IP clobbered and its value stashed on the mode
|
||||
@ stack.
|
||||
@
|
||||
THUMB( bx pc )
|
||||
THUMB( nop )
|
||||
THUMB( .arm )
|
||||
ldr_this_cpu_armv6 ip, overflow_stack_ptr
|
||||
|
||||
str sp, [ip, #-4]! @ Preserve original SP value
|
||||
mov sp, ip @ Switch to overflow stack
|
||||
pop {ip} @ Original SP in IP
|
||||
|
||||
#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
|
||||
mov ip, ip @ mov expected by unwinder
|
||||
push {fp, ip, lr, pc} @ GCC flavor frame record
|
||||
#else
|
||||
str ip, [sp, #-8]! @ store original SP
|
||||
push {fpreg, lr} @ Clang flavor frame record
|
||||
#endif
|
||||
UNWIND( ldr ip, [r0, #4] ) @ load exception LR
|
||||
UNWIND( str ip, [sp, #12] ) @ store in the frame record
|
||||
ldr ip, [r0, #12] @ reload IP
|
||||
|
||||
@ Store the original GPRs to the new stack.
|
||||
svc_entry uaccess=0, overflow_check=0
|
||||
|
||||
UNWIND( .save {sp, pc} )
|
||||
UNWIND( .save {fpreg, lr} )
|
||||
UNWIND( .setfp fpreg, sp )
|
||||
|
||||
ldr fpreg, [sp, #S_SP] @ Add our frame record
|
||||
@ to the linked list
|
||||
#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
|
||||
ldr r1, [fp, #4] @ reload SP at entry
|
||||
add fp, fp, #12
|
||||
#else
|
||||
ldr r1, [fpreg, #8]
|
||||
#endif
|
||||
str r1, [sp, #S_SP] @ store in pt_regs
|
||||
|
||||
@ Stash the regs for handle_bad_stack
|
||||
mov r0, sp
|
||||
|
||||
@ Time to die
|
||||
bl handle_bad_stack
|
||||
nop
|
||||
UNWIND( .fnend )
|
||||
ENDPROC(__bad_stack)
|
||||
#endif
|
||||
|
||||
__INIT
|
||||
|
||||
/*
|
||||
|
@ -16,12 +16,14 @@
|
||||
|
||||
.equ NR_syscalls, __NR_syscalls
|
||||
|
||||
#ifdef CONFIG_NEED_RET_TO_USER
|
||||
#include <mach/entry-macro.S>
|
||||
#else
|
||||
.macro arch_ret_to_user, tmp1, tmp2
|
||||
.endm
|
||||
.macro arch_ret_to_user, tmp
|
||||
#ifdef CONFIG_ARCH_IOP32X
|
||||
mrc p15, 0, \tmp, c15, c1, 0
|
||||
tst \tmp, #(1 << 6)
|
||||
bicne \tmp, \tmp, #(1 << 6)
|
||||
mcrne p15, 0, \tmp, c15, c1, 0 @ Disable cp6 access
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#include "entry-header.S"
|
||||
|
||||
@ -55,7 +57,7 @@ __ret_fast_syscall:
|
||||
|
||||
|
||||
/* perform architecture specific actions before user return */
|
||||
arch_ret_to_user r1, lr
|
||||
arch_ret_to_user r1
|
||||
|
||||
restore_user_regs fast = 1, offset = S_OFF
|
||||
UNWIND(.fnend )
|
||||
@ -128,7 +130,7 @@ no_work_pending:
|
||||
asm_trace_hardirqs_on save = 0
|
||||
|
||||
/* perform architecture specific actions before user return */
|
||||
arch_ret_to_user r1, lr
|
||||
arch_ret_to_user r1
|
||||
ct_user_enter save = 0
|
||||
|
||||
restore_user_regs fast = 0, offset = 0
|
||||
|
@ -22,12 +22,9 @@
|
||||
* mcount can be thought of as a function called in the middle of a subroutine
|
||||
* call. As such, it needs to be transparent for both the caller and the
|
||||
* callee: the original lr needs to be restored when leaving mcount, and no
|
||||
* registers should be clobbered. (In the __gnu_mcount_nc implementation, we
|
||||
* clobber the ip register. This is OK because the ARM calling convention
|
||||
* allows it to be clobbered in subroutines and doesn't use it to hold
|
||||
* parameters.)
|
||||
* registers should be clobbered.
|
||||
*
|
||||
* When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
|
||||
* When using dynamic ftrace, we patch out the mcount call by a "add sp, #4"
|
||||
* instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
|
||||
*/
|
||||
|
||||
@ -38,23 +35,20 @@
|
||||
|
||||
.macro __mcount suffix
|
||||
mcount_enter
|
||||
ldr r0, =ftrace_trace_function
|
||||
ldr r2, [r0]
|
||||
adr r0, .Lftrace_stub
|
||||
ldr_va r2, ftrace_trace_function
|
||||
badr r0, .Lftrace_stub
|
||||
cmp r0, r2
|
||||
bne 1f
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ldr r1, =ftrace_graph_return
|
||||
ldr r2, [r1]
|
||||
cmp r0, r2
|
||||
bne ftrace_graph_caller\suffix
|
||||
ldr_va r2, ftrace_graph_return
|
||||
cmp r0, r2
|
||||
bne ftrace_graph_caller\suffix
|
||||
|
||||
ldr r1, =ftrace_graph_entry
|
||||
ldr r2, [r1]
|
||||
ldr r0, =ftrace_graph_entry_stub
|
||||
cmp r0, r2
|
||||
bne ftrace_graph_caller\suffix
|
||||
ldr_va r2, ftrace_graph_entry
|
||||
mov_l r0, ftrace_graph_entry_stub
|
||||
cmp r0, r2
|
||||
bne ftrace_graph_caller\suffix
|
||||
#endif
|
||||
|
||||
mcount_exit
|
||||
@ -70,29 +64,27 @@
|
||||
|
||||
.macro __ftrace_regs_caller
|
||||
|
||||
sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
|
||||
str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0,
|
||||
@ OLD_R0 will overwrite previous LR
|
||||
|
||||
add ip, sp, #12 @ move in IP the value of SP as it was
|
||||
@ before the push {lr} of the mcount mechanism
|
||||
|
||||
str lr, [sp, #0] @ store LR instead of PC
|
||||
|
||||
ldr lr, [sp, #8] @ get previous LR
|
||||
ldr lr, [sp, #8] @ get previous LR
|
||||
|
||||
str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
|
||||
|
||||
stmdb sp!, {ip, lr}
|
||||
stmdb sp!, {r0-r11, lr}
|
||||
str lr, [sp, #-4]! @ store previous LR as LR
|
||||
|
||||
add lr, sp, #16 @ move in LR the value of SP as it was
|
||||
@ before the push {lr} of the mcount mechanism
|
||||
|
||||
push {r0-r11, ip, lr}
|
||||
|
||||
@ stack content at this point:
|
||||
@ 0 4 48 52 56 60 64 68 72
|
||||
@ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
|
||||
@ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 |
|
||||
|
||||
mov r3, sp @ struct pt_regs*
|
||||
mov r3, sp @ struct pt_regs*
|
||||
|
||||
ldr r2, =function_trace_op
|
||||
ldr r2, [r2] @ pointer to the current
|
||||
ldr_va r2, function_trace_op @ pointer to the current
|
||||
@ function tracing op
|
||||
|
||||
ldr r1, [sp, #S_LR] @ lr of instrumented func
|
||||
@ -108,35 +100,37 @@ ftrace_regs_call:
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.globl ftrace_graph_regs_call
|
||||
ftrace_graph_regs_call:
|
||||
mov r0, r0
|
||||
ARM( mov r0, r0 )
|
||||
THUMB( nop.w )
|
||||
#endif
|
||||
|
||||
@ pop saved regs
|
||||
ldmia sp!, {r0-r12} @ restore r0 through r12
|
||||
ldr ip, [sp, #8] @ restore PC
|
||||
ldr lr, [sp, #4] @ restore LR
|
||||
ldr sp, [sp, #0] @ restore SP
|
||||
mov pc, ip @ return
|
||||
pop {r0-r11, ip, lr} @ restore r0 through r12
|
||||
ldr lr, [sp], #4 @ restore LR
|
||||
ldr pc, [sp], #12
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.macro __ftrace_graph_regs_caller
|
||||
|
||||
sub r0, fp, #4 @ lr of instrumented routine (parent)
|
||||
#ifdef CONFIG_UNWINDER_FRAME_POINTER
|
||||
sub r0, fp, #4 @ lr of instrumented routine (parent)
|
||||
#else
|
||||
add r0, sp, #S_LR
|
||||
#endif
|
||||
|
||||
@ called from __ftrace_regs_caller
|
||||
ldr r1, [sp, #S_PC] @ instrumented routine (func)
|
||||
ldr r1, [sp, #S_PC] @ instrumented routine (func)
|
||||
mcount_adjust_addr r1, r1
|
||||
|
||||
mov r2, fp @ frame pointer
|
||||
mov r2, fpreg @ frame pointer
|
||||
add r3, sp, #PT_REGS_SIZE
|
||||
bl prepare_ftrace_return
|
||||
|
||||
@ pop registers saved in ftrace_regs_caller
|
||||
ldmia sp!, {r0-r12} @ restore r0 through r12
|
||||
ldr ip, [sp, #8] @ restore PC
|
||||
ldr lr, [sp, #4] @ restore LR
|
||||
ldr sp, [sp, #0] @ restore SP
|
||||
mov pc, ip @ return
|
||||
pop {r0-r11, ip, lr} @ restore r0 through r12
|
||||
ldr lr, [sp], #4 @ restore LR
|
||||
ldr pc, [sp], #12
|
||||
|
||||
.endm
|
||||
#endif
|
||||
@ -149,8 +143,7 @@ ftrace_graph_regs_call:
|
||||
mcount_adjust_addr r0, lr @ instrumented function
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
ldr r2, =function_trace_op
|
||||
ldr r2, [r2] @ pointer to the current
|
||||
ldr_va r2, function_trace_op @ pointer to the current
|
||||
@ function tracing op
|
||||
mov r3, #0 @ regs is NULL
|
||||
#endif
|
||||
@ -162,14 +155,19 @@ ftrace_call\suffix:
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.globl ftrace_graph_call\suffix
|
||||
ftrace_graph_call\suffix:
|
||||
mov r0, r0
|
||||
ARM( mov r0, r0 )
|
||||
THUMB( nop.w )
|
||||
#endif
|
||||
|
||||
mcount_exit
|
||||
.endm
|
||||
|
||||
.macro __ftrace_graph_caller
|
||||
#ifdef CONFIG_UNWINDER_FRAME_POINTER
|
||||
sub r0, fp, #4 @ &lr of instrumented routine (&parent)
|
||||
#else
|
||||
add r0, sp, #20
|
||||
#endif
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
@ called from __ftrace_caller, saved in mcount_enter
|
||||
ldr r1, [sp, #16] @ instrumented routine (func)
|
||||
@ -178,7 +176,8 @@ ftrace_graph_call\suffix:
|
||||
@ called from __mcount, untouched in lr
|
||||
mcount_adjust_addr r1, lr @ instrumented routine (func)
|
||||
#endif
|
||||
mov r2, fp @ frame pointer
|
||||
mov r2, fpreg @ frame pointer
|
||||
add r3, sp, #24
|
||||
bl prepare_ftrace_return
|
||||
mcount_exit
|
||||
.endm
|
||||
@ -202,16 +201,17 @@ ftrace_graph_call\suffix:
|
||||
.endm
|
||||
|
||||
.macro mcount_exit
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
ret ip
|
||||
ldmia sp!, {r0-r3}
|
||||
ldr lr, [sp, #4]
|
||||
ldr pc, [sp], #8
|
||||
.endm
|
||||
|
||||
ENTRY(__gnu_mcount_nc)
|
||||
UNWIND(.fnstart)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
mov ip, lr
|
||||
ldmia sp!, {lr}
|
||||
ret ip
|
||||
push {lr}
|
||||
ldr lr, [sp, #4]
|
||||
ldr pc, [sp], #8
|
||||
#else
|
||||
__mcount
|
||||
#endif
|
||||
@ -256,17 +256,33 @@ ENDPROC(ftrace_graph_regs_caller)
|
||||
.purgem mcount_exit
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.globl return_to_handler
|
||||
return_to_handler:
|
||||
ENTRY(return_to_handler)
|
||||
stmdb sp!, {r0-r3}
|
||||
mov r0, fp @ frame pointer
|
||||
add r0, sp, #16 @ sp at exit of instrumented routine
|
||||
bl ftrace_return_to_handler
|
||||
mov lr, r0 @ r0 has real ret addr
|
||||
ldmia sp!, {r0-r3}
|
||||
ret lr
|
||||
ENDPROC(return_to_handler)
|
||||
#endif
|
||||
|
||||
ENTRY(ftrace_stub)
|
||||
.Lftrace_stub:
|
||||
ret lr
|
||||
ENDPROC(ftrace_stub)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
__INIT
|
||||
|
||||
.macro init_tramp, dst:req
|
||||
ENTRY(\dst\()_from_init)
|
||||
ldr pc, =\dst
|
||||
ENDPROC(\dst\()_from_init)
|
||||
.endm
|
||||
|
||||
init_tramp ftrace_caller
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
init_tramp ftrace_regs_caller
|
||||
#endif
|
||||
#endif
|
||||
|
@ -292,12 +292,18 @@
|
||||
|
||||
|
||||
.macro restore_user_regs, fast = 0, offset = 0
|
||||
#if defined(CONFIG_CPU_32v6K) && !defined(CONFIG_CPU_V6)
|
||||
#if defined(CONFIG_CPU_32v6K) && \
|
||||
(!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP))
|
||||
#ifdef CONFIG_CPU_V6
|
||||
ALT_SMP(nop)
|
||||
ALT_UP_B(.L1_\@)
|
||||
#endif
|
||||
@ The TLS register update is deferred until return to user space so we
|
||||
@ can use it for other things while running in the kernel
|
||||
get_thread_info r1
|
||||
mrc p15, 0, r1, c13, c0, 3 @ get current_thread_info pointer
|
||||
ldr r1, [r1, #TI_TP_VALUE]
|
||||
mcr p15, 0, r1, c13, c0, 3 @ set TLS register
|
||||
.L1_\@:
|
||||
#endif
|
||||
|
||||
uaccess_enable r1, isb=0
|
||||
@ -423,3 +429,40 @@ scno .req r7 @ syscall number
|
||||
tbl .req r8 @ syscall table pointer
|
||||
why .req r8 @ Linux syscall (!= 0)
|
||||
tsk .req r9 @ current thread_info
|
||||
|
||||
.macro do_overflow_check, frame_size:req
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
@
|
||||
@ Test whether the SP has overflowed. Task and IRQ stacks are aligned
|
||||
@ so that SP & BIT(THREAD_SIZE_ORDER + PAGE_SHIFT) should always be
|
||||
@ zero.
|
||||
@
|
||||
ARM( tst sp, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
|
||||
THUMB( tst r1, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
|
||||
THUMB( it ne )
|
||||
bne .Lstack_overflow_check\@
|
||||
|
||||
.pushsection .text
|
||||
.Lstack_overflow_check\@:
|
||||
@
|
||||
@ The stack pointer is not pointing to a valid vmap'ed stack, but it
|
||||
@ may be pointing into the linear map instead, which may happen if we
|
||||
@ are already running from the overflow stack. We cannot detect overflow
|
||||
@ in such cases so just carry on.
|
||||
@
|
||||
str ip, [r0, #12] @ Stash IP on the mode stack
|
||||
ldr_va ip, high_memory @ Start of VMALLOC space
|
||||
ARM( cmp sp, ip ) @ SP in vmalloc space?
|
||||
THUMB( cmp r1, ip )
|
||||
THUMB( itt lo )
|
||||
ldrlo ip, [r0, #12] @ Restore IP
|
||||
blo .Lout\@ @ Carry on
|
||||
|
||||
THUMB( sub r1, sp, r1 ) @ Restore original R1
|
||||
THUMB( sub sp, r1 ) @ Restore original SP
|
||||
add sp, sp, #\frame_size @ Undo svc_entry's SP change
|
||||
b __bad_stack @ Handle VMAP stack overflow
|
||||
.popsection
|
||||
.Lout\@:
|
||||
#endif
|
||||
.endm
|
||||
|
@ -39,16 +39,25 @@ __irq_entry:
|
||||
@
|
||||
@ Invoke the IRQ handler
|
||||
@
|
||||
mrs r0, ipsr
|
||||
ldr r1, =V7M_xPSR_EXCEPTIONNO
|
||||
and r0, r1
|
||||
sub r0, #16
|
||||
mov r1, sp
|
||||
stmdb sp!, {lr}
|
||||
@ routine called with r0 = irq number, r1 = struct pt_regs *
|
||||
bl nvic_handle_irq
|
||||
mov r0, sp
|
||||
ldr_this_cpu sp, irq_stack_ptr, r1, r2
|
||||
|
||||
@
|
||||
@ If we took the interrupt while running in the kernel, we may already
|
||||
@ be using the IRQ stack, so revert to the original value in that case.
|
||||
@
|
||||
subs r2, sp, r0 @ SP above bottom of IRQ stack?
|
||||
rsbscs r2, r2, #THREAD_SIZE @ ... and below the top?
|
||||
movcs sp, r0
|
||||
|
||||
push {r0, lr} @ preserve LR and original SP
|
||||
|
||||
@ routine called with r0 = struct pt_regs *
|
||||
bl generic_handle_arch_irq
|
||||
|
||||
pop {r0, lr}
|
||||
mov sp, r0
|
||||
|
||||
pop {lr}
|
||||
@
|
||||
@ Check for any pending work if returning to user
|
||||
@
|
||||
@ -101,15 +110,17 @@ ENTRY(__switch_to)
|
||||
str sp, [ip], #4
|
||||
str lr, [ip], #4
|
||||
mov r5, r0
|
||||
mov r6, r2 @ Preserve 'next'
|
||||
add r4, r2, #TI_CPU_SAVE
|
||||
ldr r0, =thread_notify_head
|
||||
mov r1, #THREAD_NOTIFY_SWITCH
|
||||
bl atomic_notifier_call_chain
|
||||
mov ip, r4
|
||||
mov r0, r5
|
||||
ldmia ip!, {r4 - r11} @ Load all regs saved previously
|
||||
ldr sp, [ip]
|
||||
ldr pc, [ip, #4]!
|
||||
mov r1, r6
|
||||
ldmia r4, {r4 - r12, lr} @ Load all regs saved previously
|
||||
set_current r1, r2
|
||||
mov sp, ip
|
||||
bx lr
|
||||
.fnend
|
||||
ENDPROC(__switch_to)
|
||||
|
||||
|
@ -22,12 +22,24 @@
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/patch.h>
|
||||
|
||||
/*
|
||||
* The compiler emitted profiling hook consists of
|
||||
*
|
||||
* PUSH {LR}
|
||||
* BL __gnu_mcount_nc
|
||||
*
|
||||
* To turn this combined sequence into a NOP, we need to restore the value of
|
||||
* SP before the PUSH. Let's use an ADD rather than a POP into LR, as LR is not
|
||||
* modified anyway, and reloading LR from memory is highly likely to be less
|
||||
* efficient.
|
||||
*/
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
#define NOP 0xf85deb04 /* pop.w {lr} */
|
||||
#define NOP 0xf10d0d04 /* add.w sp, sp, #4 */
|
||||
#else
|
||||
#define NOP 0xe8bd4000 /* pop {lr} */
|
||||
#define NOP 0xe28dd004 /* add sp, sp, #4 */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
@ -51,9 +63,20 @@ static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
|
||||
return NOP;
|
||||
}
|
||||
|
||||
static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
|
||||
void ftrace_caller_from_init(void);
|
||||
void ftrace_regs_caller_from_init(void);
|
||||
|
||||
static unsigned long __ref adjust_address(struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
return addr;
|
||||
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE) ||
|
||||
system_state >= SYSTEM_FREEING_INITMEM ||
|
||||
likely(!is_kernel_inittext(rec->ip)))
|
||||
return addr;
|
||||
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) ||
|
||||
addr == (unsigned long)&ftrace_caller)
|
||||
return (unsigned long)&ftrace_caller_from_init;
|
||||
return (unsigned long)&ftrace_regs_caller_from_init;
|
||||
}
|
||||
|
||||
int ftrace_arch_code_modify_prepare(void)
|
||||
@ -189,15 +212,23 @@ int ftrace_make_nop(struct module *mod,
|
||||
#endif
|
||||
|
||||
new = ftrace_nop_replace(rec);
|
||||
ret = ftrace_modify_code(ip, old, new, true);
|
||||
/*
|
||||
* Locations in .init.text may call __gnu_mcount_mc via a linker
|
||||
* emitted veneer if they are too far away from its implementation, and
|
||||
* so validation may fail spuriously in such cases. Let's work around
|
||||
* this by omitting those from validation.
|
||||
*/
|
||||
ret = ftrace_modify_code(ip, old, new, !is_kernel_inittext(ip));
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
asmlinkage
|
||||
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||
unsigned long frame_pointer)
|
||||
unsigned long frame_pointer,
|
||||
unsigned long stack_pointer)
|
||||
{
|
||||
unsigned long return_hooker = (unsigned long) &return_to_handler;
|
||||
unsigned long old;
|
||||
@ -205,6 +236,23 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
|
||||
/* FP points one word below parent's top of stack */
|
||||
frame_pointer += 4;
|
||||
} else {
|
||||
struct stackframe frame = {
|
||||
.fp = frame_pointer,
|
||||
.sp = stack_pointer,
|
||||
.lr = self_addr,
|
||||
.pc = self_addr,
|
||||
};
|
||||
if (unwind_frame(&frame) < 0)
|
||||
return;
|
||||
if (frame.lr != self_addr)
|
||||
parent = frame.lr_addr;
|
||||
frame_pointer = frame.sp;
|
||||
}
|
||||
|
||||
old = *parent;
|
||||
*parent = return_hooker;
|
||||
|
||||
@ -225,7 +273,7 @@ static int __ftrace_modify_caller(unsigned long *callsite,
|
||||
unsigned long caller_fn = (unsigned long) func;
|
||||
unsigned long pc = (unsigned long) callsite;
|
||||
unsigned long branch = arm_gen_branch(pc, caller_fn);
|
||||
unsigned long nop = 0xe1a00000; /* mov r0, r0 */
|
||||
unsigned long nop = arm_gen_nop();
|
||||
unsigned long old = enable ? nop : branch;
|
||||
unsigned long new = enable ? branch : nop;
|
||||
|
||||
|
@ -105,10 +105,8 @@ __mmap_switched:
|
||||
mov r1, #0
|
||||
bl __memset @ clear .bss
|
||||
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
adr_l r0, init_task @ get swapper task_struct
|
||||
set_current r0
|
||||
#endif
|
||||
set_current r0, r1
|
||||
|
||||
ldmia r4, {r0, r1, r2, r3}
|
||||
str r9, [r0] @ Save processor ID
|
||||
|
@ -424,6 +424,13 @@ ENDPROC(secondary_startup)
|
||||
ENDPROC(secondary_startup_arm)
|
||||
|
||||
ENTRY(__secondary_switched)
|
||||
#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
|
||||
@ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
|
||||
@ as the ID map does not cover the vmalloc region.
|
||||
mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
|
||||
mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
|
||||
instr_sync
|
||||
#endif
|
||||
adr_l r7, secondary_data + 12 @ get secondary_data.stack
|
||||
ldr sp, [r7]
|
||||
ldr r0, [r7, #4] @ get secondary_data.task
|
||||
|
@ -36,13 +36,53 @@
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
#include <asm/hardware/cache-uniphier.h>
|
||||
#include <asm/outercache.h>
|
||||
#include <asm/softirq_stack.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
||||
#include "reboot.h"
|
||||
|
||||
unsigned long irq_err_count;
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
|
||||
asmlinkage DEFINE_PER_CPU_READ_MOSTLY(u8 *, irq_stack_ptr);
|
||||
|
||||
static void __init init_irq_stacks(void)
|
||||
{
|
||||
u8 *stack;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (!IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
stack = (u8 *)__get_free_pages(GFP_KERNEL,
|
||||
THREAD_SIZE_ORDER);
|
||||
else
|
||||
stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN,
|
||||
THREADINFO_GFP, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
|
||||
if (WARN_ON(!stack))
|
||||
break;
|
||||
per_cpu(irq_stack_ptr, cpu) = &stack[THREAD_SIZE];
|
||||
}
|
||||
}
|
||||
|
||||
static void ____do_softirq(void *arg)
|
||||
{
|
||||
__do_softirq();
|
||||
}
|
||||
|
||||
void do_softirq_own_stack(void)
|
||||
{
|
||||
call_with_stack(____do_softirq, NULL,
|
||||
__this_cpu_read(irq_stack_ptr));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
{
|
||||
#ifdef CONFIG_FIQ
|
||||
@ -80,27 +120,14 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
ack_bad_irq(irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* asm_do_IRQ is the interface to be used from assembly code.
|
||||
*/
|
||||
asmlinkage void __exception_irq_entry
|
||||
asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
irq_enter();
|
||||
old_regs = set_irq_regs(regs);
|
||||
|
||||
handle_IRQ(irq, regs);
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
init_irq_stacks();
|
||||
#endif
|
||||
|
||||
if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
|
||||
irqchip_init();
|
||||
else
|
||||
|
@ -68,6 +68,44 @@ bool module_exit_section(const char *name)
|
||||
strstarts(name, ".ARM.exidx.exit");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
|
||||
/*
|
||||
* This implements the partitioning algorithm for group relocations as
|
||||
* documented in the ARM AArch32 ELF psABI (IHI 0044).
|
||||
*
|
||||
* A single PC-relative symbol reference is divided in up to 3 add or subtract
|
||||
* operations, where the final one could be incorporated into a load/store
|
||||
* instruction with immediate offset. E.g.,
|
||||
*
|
||||
* ADD Rd, PC, #... or ADD Rd, PC, #...
|
||||
* ADD Rd, Rd, #... ADD Rd, Rd, #...
|
||||
* LDR Rd, [Rd, #...] ADD Rd, Rd, #...
|
||||
*
|
||||
* The latter has a guaranteed range of only 16 MiB (3x8 == 24 bits), so it is
|
||||
* of limited use in the kernel. However, the ADD/ADD/LDR combo has a range of
|
||||
* -/+ 256 MiB, (2x8 + 12 == 28 bits), which means it has sufficient range for
|
||||
* any in-kernel symbol reference (unless module PLTs are being used).
|
||||
*
|
||||
* The main advantage of this approach over the typical pattern using a literal
|
||||
* load is that literal loads may miss in the D-cache, and generally lead to
|
||||
* lower cache efficiency for variables that are referenced often from many
|
||||
* different places in the code.
|
||||
*/
|
||||
static u32 get_group_rem(u32 group, u32 *offset)
|
||||
{
|
||||
u32 val = *offset;
|
||||
u32 shift;
|
||||
do {
|
||||
shift = val ? (31 - __fls(val)) & ~1 : 32;
|
||||
*offset = val;
|
||||
if (!val)
|
||||
break;
|
||||
val &= 0xffffff >> shift;
|
||||
} while (group--);
|
||||
return shift;
|
||||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||
unsigned int relindex, struct module *module)
|
||||
@ -82,6 +120,9 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||
unsigned long loc;
|
||||
Elf32_Sym *sym;
|
||||
const char *symname;
|
||||
#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
|
||||
u32 shift, group = 1;
|
||||
#endif
|
||||
s32 offset;
|
||||
u32 tmp;
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
@ -212,6 +253,55 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||
*(u32 *)loc = __opcode_to_mem_arm(tmp);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
|
||||
case R_ARM_ALU_PC_G0_NC:
|
||||
group = 0;
|
||||
fallthrough;
|
||||
case R_ARM_ALU_PC_G1_NC:
|
||||
tmp = __mem_to_opcode_arm(*(u32 *)loc);
|
||||
offset = ror32(tmp & 0xff, (tmp & 0xf00) >> 7);
|
||||
if (tmp & BIT(22))
|
||||
offset = -offset;
|
||||
offset += sym->st_value - loc;
|
||||
if (offset < 0) {
|
||||
offset = -offset;
|
||||
tmp = (tmp & ~BIT(23)) | BIT(22); // SUB opcode
|
||||
} else {
|
||||
tmp = (tmp & ~BIT(22)) | BIT(23); // ADD opcode
|
||||
}
|
||||
|
||||
shift = get_group_rem(group, &offset);
|
||||
if (shift < 24) {
|
||||
offset >>= 24 - shift;
|
||||
offset |= (shift + 8) << 7;
|
||||
}
|
||||
*(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
|
||||
break;
|
||||
|
||||
case R_ARM_LDR_PC_G2:
|
||||
tmp = __mem_to_opcode_arm(*(u32 *)loc);
|
||||
offset = tmp & 0xfff;
|
||||
if (~tmp & BIT(23)) // U bit cleared?
|
||||
offset = -offset;
|
||||
offset += sym->st_value - loc;
|
||||
if (offset < 0) {
|
||||
offset = -offset;
|
||||
tmp &= ~BIT(23); // clear U bit
|
||||
} else {
|
||||
tmp |= BIT(23); // set U bit
|
||||
}
|
||||
get_group_rem(2, &offset);
|
||||
|
||||
if (offset > 0xfff) {
|
||||
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
|
||||
module->name, relindex, i, symname,
|
||||
ELF32_R_TYPE(rel->r_info), loc,
|
||||
sym->st_value);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
*(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
case R_ARM_THM_CALL:
|
||||
case R_ARM_THM_JUMP24:
|
||||
|
@ -36,7 +36,7 @@
|
||||
|
||||
#include "signal.h"
|
||||
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
|
||||
DEFINE_PER_CPU(struct task_struct *, __entry_task);
|
||||
#endif
|
||||
|
||||
@ -46,6 +46,11 @@ unsigned long __stack_chk_guard __read_mostly;
|
||||
EXPORT_SYMBOL(__stack_chk_guard);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
asmlinkage struct task_struct *__current;
|
||||
EXPORT_SYMBOL(__current);
|
||||
#endif
|
||||
|
||||
static const char *processor_modes[] __maybe_unused = {
|
||||
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
|
||||
"UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
|
||||
|
@ -41,7 +41,8 @@ void *return_address(unsigned int level)
|
||||
frame.fp = (unsigned long)__builtin_frame_address(0);
|
||||
frame.sp = current_stack_pointer;
|
||||
frame.lr = (unsigned long)__builtin_return_address(0);
|
||||
frame.pc = (unsigned long)return_address;
|
||||
here:
|
||||
frame.pc = (unsigned long)&&here;
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
frame.kr_cur = NULL;
|
||||
frame.tsk = current;
|
||||
|
@ -141,10 +141,10 @@ EXPORT_SYMBOL(outer_cache);
|
||||
int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
|
||||
|
||||
struct stack {
|
||||
u32 irq[3];
|
||||
u32 abt[3];
|
||||
u32 und[3];
|
||||
u32 fiq[3];
|
||||
u32 irq[4];
|
||||
u32 abt[4];
|
||||
u32 und[4];
|
||||
u32 fiq[4];
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
|
@ -67,6 +67,12 @@ ENTRY(__cpu_suspend)
|
||||
ldr r4, =cpu_suspend_size
|
||||
#endif
|
||||
mov r5, sp @ current virtual SP
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
@ Run the suspend code from the overflow stack so we don't have to rely
|
||||
@ on vmalloc-to-phys conversions anywhere in the arch suspend code.
|
||||
@ The original SP value captured in R5 will be restored on the way out.
|
||||
ldr_this_cpu sp, overflow_stack_ptr, r6, r7
|
||||
#endif
|
||||
add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
|
||||
sub sp, sp, r4 @ allocate CPU state on stack
|
||||
ldr r3, =sleep_save_sp
|
||||
@ -113,6 +119,13 @@ ENTRY(cpu_resume_mmu)
|
||||
ENDPROC(cpu_resume_mmu)
|
||||
.popsection
|
||||
cpu_resume_after_mmu:
|
||||
#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
|
||||
@ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
|
||||
@ as the ID map does not cover the vmalloc region.
|
||||
mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
|
||||
mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
|
||||
instr_sync
|
||||
#endif
|
||||
bl cpu_init @ restore the und/abt/irq banked regs
|
||||
mov r0, #0 @ return zero on success
|
||||
ldmfd sp!, {r4 - r11, pc}
|
||||
|
@ -400,6 +400,12 @@ static void smp_store_cpu_info(unsigned int cpuid)
|
||||
check_cpu_icache_size(cpuid);
|
||||
}
|
||||
|
||||
static void set_current(struct task_struct *cur)
|
||||
{
|
||||
/* Set TPIDRURO */
|
||||
asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the secondary CPU boot entry. We're using this CPUs
|
||||
* idle thread stack, but a set of temporary page tables.
|
||||
@ -628,11 +634,6 @@ static void ipi_complete(unsigned int cpu)
|
||||
/*
|
||||
* Main handler for inter-processor interrupts
|
||||
*/
|
||||
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
|
||||
{
|
||||
handle_IPI(ipinr, regs);
|
||||
}
|
||||
|
||||
static void do_handle_IPI(int ipinr)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
@ -160,7 +160,8 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
|
||||
frame.fp = (unsigned long)__builtin_frame_address(0);
|
||||
frame.sp = current_stack_pointer;
|
||||
frame.lr = (unsigned long)__builtin_return_address(0);
|
||||
frame.pc = (unsigned long)__save_stack_trace;
|
||||
here:
|
||||
frame.pc = (unsigned long)&&here;
|
||||
}
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
frame.kr_cur = NULL;
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/unwind.h>
|
||||
#include <asm/tls.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
@ -61,13 +62,24 @@ static int __init user_debug_setup(char *str)
|
||||
__setup("user_debug=", user_debug_setup);
|
||||
#endif
|
||||
|
||||
static void dump_mem(const char *, const char *, unsigned long, unsigned long);
|
||||
|
||||
void dump_backtrace_entry(unsigned long where, unsigned long from,
|
||||
unsigned long frame, const char *loglvl)
|
||||
{
|
||||
unsigned long end = frame + 4 + sizeof(struct pt_regs);
|
||||
|
||||
if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) &&
|
||||
IS_ENABLED(CONFIG_CC_IS_GCC) &&
|
||||
end > ALIGN(frame, THREAD_SIZE)) {
|
||||
/*
|
||||
* If we are walking past the end of the stack, it may be due
|
||||
* to the fact that we are on an IRQ or overflow stack. In this
|
||||
* case, we can load the address of the other stack from the
|
||||
* frame record.
|
||||
*/
|
||||
frame = ((unsigned long *)frame)[-2] - 4;
|
||||
end = frame + 4 + sizeof(struct pt_regs);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_KALLSYMS
|
||||
printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
|
||||
loglvl, where, from);
|
||||
@ -111,7 +123,8 @@ void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
|
||||
static int verify_stack(unsigned long sp)
|
||||
{
|
||||
if (sp < PAGE_OFFSET ||
|
||||
(sp > (unsigned long)high_memory && high_memory != NULL))
|
||||
(!IS_ENABLED(CONFIG_VMAP_STACK) &&
|
||||
sp > (unsigned long)high_memory && high_memory != NULL))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
@ -121,8 +134,8 @@ static int verify_stack(unsigned long sp)
|
||||
/*
|
||||
* Dump out the contents of some memory nicely...
|
||||
*/
|
||||
static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
|
||||
unsigned long top)
|
||||
void dump_mem(const char *lvl, const char *str, unsigned long bottom,
|
||||
unsigned long top)
|
||||
{
|
||||
unsigned long first;
|
||||
int i;
|
||||
@ -281,7 +294,8 @@ static int __die(const char *str, int err, struct pt_regs *regs)
|
||||
|
||||
if (!user_mode(regs) || in_interrupt()) {
|
||||
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
|
||||
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
|
||||
ALIGN(regs->ARM_sp - THREAD_SIZE, THREAD_ALIGN)
|
||||
+ THREAD_SIZE);
|
||||
dump_backtrace(regs, tsk, KERN_EMERG);
|
||||
dump_instr(KERN_EMERG, regs);
|
||||
}
|
||||
@ -880,3 +894,70 @@ void __init early_trap_init(void *vectors_base)
|
||||
*/
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
|
||||
DECLARE_PER_CPU(u8 *, irq_stack_ptr);
|
||||
|
||||
asmlinkage DEFINE_PER_CPU(u8 *, overflow_stack_ptr);
|
||||
|
||||
static int __init allocate_overflow_stacks(void)
|
||||
{
|
||||
u8 *stack;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
stack = (u8 *)__get_free_page(GFP_KERNEL);
|
||||
if (WARN_ON(!stack))
|
||||
return -ENOMEM;
|
||||
per_cpu(overflow_stack_ptr, cpu) = &stack[OVERFLOW_STACK_SIZE];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
early_initcall(allocate_overflow_stacks);
|
||||
|
||||
asmlinkage void handle_bad_stack(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long tsk_stk = (unsigned long)current->stack;
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
|
||||
#endif
|
||||
unsigned long ovf_stk = (unsigned long)this_cpu_read(overflow_stack_ptr);
|
||||
|
||||
console_verbose();
|
||||
pr_emerg("Insufficient stack space to handle exception!");
|
||||
|
||||
pr_emerg("Task stack: [0x%08lx..0x%08lx]\n",
|
||||
tsk_stk, tsk_stk + THREAD_SIZE);
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
pr_emerg("IRQ stack: [0x%08lx..0x%08lx]\n",
|
||||
irq_stk - THREAD_SIZE, irq_stk);
|
||||
#endif
|
||||
pr_emerg("Overflow stack: [0x%08lx..0x%08lx]\n",
|
||||
ovf_stk - OVERFLOW_STACK_SIZE, ovf_stk);
|
||||
|
||||
die("kernel stack overflow", regs, 0);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
/*
|
||||
* Normally, we rely on the logic in do_translation_fault() to update stale PMD
|
||||
* entries covering the vmalloc space in a task's page tables when it first
|
||||
* accesses the region in question. Unfortunately, this is not sufficient when
|
||||
* the task stack resides in the vmalloc region, as do_translation_fault() is a
|
||||
* C function that needs a stack to run.
|
||||
*
|
||||
* So we need to ensure that these PMD entries are up to date *before* the MM
|
||||
* switch. As we already have some logic in the MM switch path that takes care
|
||||
* of this, let's trigger it by bumping the counter every time the core vmalloc
|
||||
* code modifies a PMD entry in the vmalloc region. Use release semantics on
|
||||
* the store so that other CPUs observing the counter's new value are
|
||||
* guaranteed to see the updated page table entries as well.
|
||||
*/
|
||||
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (start < VMALLOC_END && end > VMALLOC_START)
|
||||
atomic_inc_return_release(&init_mm.context.vmalloc_seq);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
@ -33,6 +33,8 @@
|
||||
#include <asm/traps.h>
|
||||
#include <asm/unwind.h>
|
||||
|
||||
#include "reboot.h"
|
||||
|
||||
/* Dummy functions to avoid linker complaints */
|
||||
void __aeabi_unwind_cpp_pr0(void)
|
||||
{
|
||||
@ -53,6 +55,7 @@ struct unwind_ctrl_block {
|
||||
unsigned long vrs[16]; /* virtual register set */
|
||||
const unsigned long *insn; /* pointer to the current instructions word */
|
||||
unsigned long sp_high; /* highest value of sp allowed */
|
||||
unsigned long *lr_addr; /* address of LR value on the stack */
|
||||
/*
|
||||
* 1 : check for stack overflow for each register pop.
|
||||
* 0 : save overhead if there is plenty of stack remaining.
|
||||
@ -237,6 +240,8 @@ static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
|
||||
* from being tracked by KASAN.
|
||||
*/
|
||||
ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp));
|
||||
if (reg == 14)
|
||||
ctrl->lr_addr = *vsp;
|
||||
(*vsp)++;
|
||||
return URC_OK;
|
||||
}
|
||||
@ -256,8 +261,9 @@ static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
|
||||
mask >>= 1;
|
||||
reg++;
|
||||
}
|
||||
if (!load_sp)
|
||||
if (!load_sp) {
|
||||
ctrl->vrs[SP] = (unsigned long)vsp;
|
||||
}
|
||||
|
||||
return URC_OK;
|
||||
}
|
||||
@ -313,9 +319,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
|
||||
|
||||
if ((insn & 0xc0) == 0x00)
|
||||
ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
|
||||
else if ((insn & 0xc0) == 0x40)
|
||||
else if ((insn & 0xc0) == 0x40) {
|
||||
ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
|
||||
else if ((insn & 0xf0) == 0x80) {
|
||||
} else if ((insn & 0xf0) == 0x80) {
|
||||
unsigned long mask;
|
||||
|
||||
insn = (insn << 8) | unwind_get_byte(ctrl);
|
||||
@ -330,9 +336,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
|
||||
if (ret)
|
||||
goto error;
|
||||
} else if ((insn & 0xf0) == 0x90 &&
|
||||
(insn & 0x0d) != 0x0d)
|
||||
(insn & 0x0d) != 0x0d) {
|
||||
ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
|
||||
else if ((insn & 0xf0) == 0xa0) {
|
||||
} else if ((insn & 0xf0) == 0xa0) {
|
||||
ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
|
||||
if (ret)
|
||||
goto error;
|
||||
@ -375,23 +381,22 @@ error:
|
||||
*/
|
||||
int unwind_frame(struct stackframe *frame)
|
||||
{
|
||||
unsigned long low;
|
||||
const struct unwind_idx *idx;
|
||||
struct unwind_ctrl_block ctrl;
|
||||
unsigned long sp_low;
|
||||
|
||||
/* store the highest address on the stack to avoid crossing it*/
|
||||
low = frame->sp;
|
||||
ctrl.sp_high = ALIGN(low, THREAD_SIZE);
|
||||
sp_low = frame->sp;
|
||||
ctrl.sp_high = ALIGN(sp_low - THREAD_SIZE, THREAD_ALIGN)
|
||||
+ THREAD_SIZE;
|
||||
|
||||
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
|
||||
frame->pc, frame->lr, frame->sp);
|
||||
|
||||
if (!kernel_text_address(frame->pc))
|
||||
return -URC_FAILURE;
|
||||
|
||||
idx = unwind_find_idx(frame->pc);
|
||||
if (!idx) {
|
||||
pr_warn("unwind: Index not found %08lx\n", frame->pc);
|
||||
if (frame->pc && kernel_text_address(frame->pc))
|
||||
pr_warn("unwind: Index not found %08lx\n", frame->pc);
|
||||
return -URC_FAILURE;
|
||||
}
|
||||
|
||||
@ -403,7 +408,20 @@ int unwind_frame(struct stackframe *frame)
|
||||
if (idx->insn == 1)
|
||||
/* can't unwind */
|
||||
return -URC_FAILURE;
|
||||
else if ((idx->insn & 0x80000000) == 0)
|
||||
else if (frame->pc == prel31_to_addr(&idx->addr_offset)) {
|
||||
/*
|
||||
* Unwinding is tricky when we're halfway through the prologue,
|
||||
* since the stack frame that the unwinder expects may not be
|
||||
* fully set up yet. However, one thing we do know for sure is
|
||||
* that if we are unwinding from the very first instruction of
|
||||
* a function, we are still effectively in the stack frame of
|
||||
* the caller, and the unwind info has no relevance yet.
|
||||
*/
|
||||
if (frame->pc == frame->lr)
|
||||
return -URC_FAILURE;
|
||||
frame->pc = frame->lr;
|
||||
return URC_OK;
|
||||
} else if ((idx->insn & 0x80000000) == 0)
|
||||
/* prel31 to the unwind table */
|
||||
ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
|
||||
else if ((idx->insn & 0xff000000) == 0x80000000)
|
||||
@ -430,6 +448,16 @@ int unwind_frame(struct stackframe *frame)
|
||||
|
||||
ctrl.check_each_pop = 0;
|
||||
|
||||
if (prel31_to_addr(&idx->addr_offset) == (u32)&call_with_stack) {
|
||||
/*
|
||||
* call_with_stack() is the only place where we permit SP to
|
||||
* jump from one stack to another, and since we know it is
|
||||
* guaranteed to happen, set up the SP bounds accordingly.
|
||||
*/
|
||||
sp_low = frame->fp;
|
||||
ctrl.sp_high = ALIGN(frame->fp, THREAD_SIZE);
|
||||
}
|
||||
|
||||
while (ctrl.entries > 0) {
|
||||
int urc;
|
||||
if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
|
||||
@ -437,7 +465,7 @@ int unwind_frame(struct stackframe *frame)
|
||||
urc = unwind_exec_insn(&ctrl);
|
||||
if (urc < 0)
|
||||
return urc;
|
||||
if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= ctrl.sp_high)
|
||||
if (ctrl.vrs[SP] < sp_low || ctrl.vrs[SP] > ctrl.sp_high)
|
||||
return -URC_FAILURE;
|
||||
}
|
||||
|
||||
@ -452,6 +480,7 @@ int unwind_frame(struct stackframe *frame)
|
||||
frame->sp = ctrl.vrs[SP];
|
||||
frame->lr = ctrl.vrs[LR];
|
||||
frame->pc = ctrl.vrs[PC];
|
||||
frame->lr_addr = ctrl.lr_addr;
|
||||
|
||||
return URC_OK;
|
||||
}
|
||||
@ -475,7 +504,12 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
frame.fp = (unsigned long)__builtin_frame_address(0);
|
||||
frame.sp = current_stack_pointer;
|
||||
frame.lr = (unsigned long)__builtin_return_address(0);
|
||||
frame.pc = (unsigned long)unwind_backtrace;
|
||||
/* We are saving the stack and execution state at this
|
||||
* point, so we should ensure that frame.pc is within
|
||||
* this block of code.
|
||||
*/
|
||||
here:
|
||||
frame.pc = (unsigned long)&&here;
|
||||
} else {
|
||||
/* task blocked in __switch_to */
|
||||
frame.fp = thread_saved_fp(tsk);
|
||||
|
@ -138,12 +138,12 @@ SECTIONS
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
. = ALIGN(1<<SECTION_SHIFT);
|
||||
#else
|
||||
. = ALIGN(THREAD_SIZE);
|
||||
. = ALIGN(THREAD_ALIGN);
|
||||
#endif
|
||||
__init_end = .;
|
||||
|
||||
_sdata = .;
|
||||
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
||||
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
|
||||
_edata = .;
|
||||
|
||||
BSS_SECTION(0, 0, 0)
|
||||
|
@ -144,7 +144,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
|
||||
*/
|
||||
1003: ldr sv_lr, [sv_fp, #4] @ get saved lr from next frame
|
||||
|
||||
ldr r0, [sv_lr, #-4] @ get call instruction
|
||||
1004: ldr r0, [sv_lr, #-4] @ get call instruction
|
||||
ldr r3, .Lopcode+4
|
||||
and r2, r3, r0 @ is this a bl call
|
||||
teq r2, r3
|
||||
@ -164,7 +164,7 @@ finished_setup:
|
||||
/*
|
||||
* Print the function (sv_pc) and where it was called from (sv_lr).
|
||||
*/
|
||||
1004: mov r0, sv_pc
|
||||
mov r0, sv_pc
|
||||
|
||||
mov r1, sv_lr
|
||||
mov r2, frame
|
||||
@ -197,6 +197,14 @@ finished_setup:
|
||||
|
||||
cmp sv_fp, frame @ next frame must be
|
||||
mov frame, sv_fp @ above the current frame
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
@
|
||||
@ Kernel stacks may be discontiguous in memory. If the next
|
||||
@ frame is below the previous frame, accept it as long as it
|
||||
@ lives in kernel memory.
|
||||
@
|
||||
cmpls sv_fp, #PAGE_OFFSET
|
||||
#endif
|
||||
bhi for_each_frame
|
||||
|
||||
1006: adr r0, .Lbad
|
||||
@ -210,7 +218,7 @@ ENDPROC(c_backtrace)
|
||||
.long 1001b, 1006b
|
||||
.long 1002b, 1006b
|
||||
.long 1003b, 1006b
|
||||
.long 1004b, 1006b
|
||||
.long 1004b, finished_setup
|
||||
.long 1005b, 1006b
|
||||
.popsection
|
||||
|
||||
|
@ -98,6 +98,14 @@ for_each_frame: tst frame, mask @ Check for address exceptions
|
||||
|
||||
cmp sv_fp, frame @ next frame must be
|
||||
mov frame, sv_fp @ above the current frame
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
@
|
||||
@ Kernel stacks may be discontiguous in memory. If the next
|
||||
@ frame is below the previous frame, accept it as long as it
|
||||
@ lives in kernel memory.
|
||||
@
|
||||
cmpls sv_fp, #PAGE_OFFSET
|
||||
#endif
|
||||
bhi for_each_frame
|
||||
|
||||
1006: adr r0, .Lbad
|
||||
|
@ -8,25 +8,42 @@
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/unwind.h>
|
||||
|
||||
/*
|
||||
* void call_with_stack(void (*fn)(void *), void *arg, void *sp)
|
||||
*
|
||||
* Change the stack to that pointed at by sp, then invoke fn(arg) with
|
||||
* the new stack.
|
||||
*
|
||||
* The sequence below follows the APCS frame convention for frame pointer
|
||||
* unwinding, and implements the unwinder annotations needed by the EABI
|
||||
* unwinder.
|
||||
*/
|
||||
ENTRY(call_with_stack)
|
||||
str sp, [r2, #-4]!
|
||||
str lr, [r2, #-4]!
|
||||
|
||||
ENTRY(call_with_stack)
|
||||
#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
|
||||
mov ip, sp
|
||||
push {fp, ip, lr, pc}
|
||||
sub fp, ip, #4
|
||||
#else
|
||||
UNWIND( .fnstart )
|
||||
UNWIND( .save {fpreg, lr} )
|
||||
push {fpreg, lr}
|
||||
UNWIND( .setfp fpreg, sp )
|
||||
mov fpreg, sp
|
||||
#endif
|
||||
mov sp, r2
|
||||
mov r2, r0
|
||||
mov r0, r1
|
||||
|
||||
badr lr, 1f
|
||||
ret r2
|
||||
bl_r r2
|
||||
|
||||
1: ldr lr, [sp]
|
||||
ldr sp, [sp, #4]
|
||||
ret lr
|
||||
#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
|
||||
ldmdb fp, {fp, sp, pc}
|
||||
#else
|
||||
mov sp, fpreg
|
||||
pop {fpreg, pc}
|
||||
UNWIND( .fnend )
|
||||
#endif
|
||||
ENDPROC(call_with_stack)
|
||||
|
@ -91,18 +91,15 @@
|
||||
strb\cond \reg, [\ptr], #1
|
||||
.endm
|
||||
|
||||
.macro enter reg1 reg2
|
||||
.macro enter regs:vararg
|
||||
mov r3, #0
|
||||
stmdb sp!, {r0, r2, r3, \reg1, \reg2}
|
||||
UNWIND( .save {r0, r2, r3, \regs} )
|
||||
stmdb sp!, {r0, r2, r3, \regs}
|
||||
.endm
|
||||
|
||||
.macro usave reg1 reg2
|
||||
UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
|
||||
.endm
|
||||
|
||||
.macro exit reg1 reg2
|
||||
.macro exit regs:vararg
|
||||
add sp, sp, #8
|
||||
ldmfd sp!, {r0, \reg1, \reg2}
|
||||
ldmfd sp!, {r0, \regs}
|
||||
.endm
|
||||
|
||||
.text
|
||||
|
@ -69,13 +69,10 @@
|
||||
* than one 32bit instruction in Thumb-2)
|
||||
*/
|
||||
|
||||
|
||||
UNWIND( .fnstart )
|
||||
enter r4, lr
|
||||
UNWIND( .fnend )
|
||||
|
||||
UNWIND( .fnstart )
|
||||
usave r4, lr @ in first stmdb block
|
||||
enter r4, UNWIND(fpreg,) lr
|
||||
UNWIND( .setfp fpreg, sp )
|
||||
UNWIND( mov fpreg, sp )
|
||||
|
||||
subs r2, r2, #4
|
||||
blt 8f
|
||||
@ -86,12 +83,7 @@
|
||||
bne 10f
|
||||
|
||||
1: subs r2, r2, #(28)
|
||||
stmfd sp!, {r5 - r8}
|
||||
UNWIND( .fnend )
|
||||
|
||||
UNWIND( .fnstart )
|
||||
usave r4, lr
|
||||
UNWIND( .save {r5 - r8} ) @ in second stmfd block
|
||||
stmfd sp!, {r5, r6, r8, r9}
|
||||
blt 5f
|
||||
|
||||
CALGN( ands ip, r0, #31 )
|
||||
@ -110,9 +102,9 @@
|
||||
PLD( pld [r1, #92] )
|
||||
|
||||
3: PLD( pld [r1, #124] )
|
||||
4: ldr8w r1, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f
|
||||
4: ldr8w r1, r3, r4, r5, r6, r8, r9, ip, lr, abort=20f
|
||||
subs r2, r2, #32
|
||||
str8w r0, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f
|
||||
str8w r0, r3, r4, r5, r6, r8, r9, ip, lr, abort=20f
|
||||
bge 3b
|
||||
PLD( cmn r2, #96 )
|
||||
PLD( bge 4b )
|
||||
@ -132,8 +124,8 @@
|
||||
ldr1w r1, r4, abort=20f
|
||||
ldr1w r1, r5, abort=20f
|
||||
ldr1w r1, r6, abort=20f
|
||||
ldr1w r1, r7, abort=20f
|
||||
ldr1w r1, r8, abort=20f
|
||||
ldr1w r1, r9, abort=20f
|
||||
ldr1w r1, lr, abort=20f
|
||||
|
||||
#if LDR1W_SHIFT < STR1W_SHIFT
|
||||
@ -150,17 +142,14 @@
|
||||
str1w r0, r4, abort=20f
|
||||
str1w r0, r5, abort=20f
|
||||
str1w r0, r6, abort=20f
|
||||
str1w r0, r7, abort=20f
|
||||
str1w r0, r8, abort=20f
|
||||
str1w r0, r9, abort=20f
|
||||
str1w r0, lr, abort=20f
|
||||
|
||||
CALGN( bcs 2b )
|
||||
|
||||
7: ldmfd sp!, {r5 - r8}
|
||||
UNWIND( .fnend ) @ end of second stmfd block
|
||||
7: ldmfd sp!, {r5, r6, r8, r9}
|
||||
|
||||
UNWIND( .fnstart )
|
||||
usave r4, lr @ still in first stmdb block
|
||||
8: movs r2, r2, lsl #31
|
||||
ldr1b r1, r3, ne, abort=21f
|
||||
ldr1b r1, r4, cs, abort=21f
|
||||
@ -169,7 +158,7 @@
|
||||
str1b r0, r4, cs, abort=21f
|
||||
str1b r0, ip, cs, abort=21f
|
||||
|
||||
exit r4, pc
|
||||
exit r4, UNWIND(fpreg,) pc
|
||||
|
||||
9: rsb ip, ip, #4
|
||||
cmp ip, #2
|
||||
@ -189,13 +178,10 @@
|
||||
ldr1w r1, lr, abort=21f
|
||||
beq 17f
|
||||
bgt 18f
|
||||
UNWIND( .fnend )
|
||||
|
||||
|
||||
.macro forward_copy_shift pull push
|
||||
|
||||
UNWIND( .fnstart )
|
||||
usave r4, lr @ still in first stmdb block
|
||||
subs r2, r2, #28
|
||||
blt 14f
|
||||
|
||||
@ -205,12 +191,8 @@
|
||||
CALGN( subcc r2, r2, ip )
|
||||
CALGN( bcc 15f )
|
||||
|
||||
11: stmfd sp!, {r5 - r9}
|
||||
UNWIND( .fnend )
|
||||
11: stmfd sp!, {r5, r6, r8 - r10}
|
||||
|
||||
UNWIND( .fnstart )
|
||||
usave r4, lr
|
||||
UNWIND( .save {r5 - r9} ) @ in new second stmfd block
|
||||
PLD( pld [r1, #0] )
|
||||
PLD( subs r2, r2, #96 )
|
||||
PLD( pld [r1, #28] )
|
||||
@ -219,35 +201,32 @@
|
||||
PLD( pld [r1, #92] )
|
||||
|
||||
12: PLD( pld [r1, #124] )
|
||||
13: ldr4w r1, r4, r5, r6, r7, abort=19f
|
||||
13: ldr4w r1, r4, r5, r6, r8, abort=19f
|
||||
mov r3, lr, lspull #\pull
|
||||
subs r2, r2, #32
|
||||
ldr4w r1, r8, r9, ip, lr, abort=19f
|
||||
ldr4w r1, r9, r10, ip, lr, abort=19f
|
||||
orr r3, r3, r4, lspush #\push
|
||||
mov r4, r4, lspull #\pull
|
||||
orr r4, r4, r5, lspush #\push
|
||||
mov r5, r5, lspull #\pull
|
||||
orr r5, r5, r6, lspush #\push
|
||||
mov r6, r6, lspull #\pull
|
||||
orr r6, r6, r7, lspush #\push
|
||||
mov r7, r7, lspull #\pull
|
||||
orr r7, r7, r8, lspush #\push
|
||||
orr r6, r6, r8, lspush #\push
|
||||
mov r8, r8, lspull #\pull
|
||||
orr r8, r8, r9, lspush #\push
|
||||
mov r9, r9, lspull #\pull
|
||||
orr r9, r9, ip, lspush #\push
|
||||
orr r9, r9, r10, lspush #\push
|
||||
mov r10, r10, lspull #\pull
|
||||
orr r10, r10, ip, lspush #\push
|
||||
mov ip, ip, lspull #\pull
|
||||
orr ip, ip, lr, lspush #\push
|
||||
str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, abort=19f
|
||||
str8w r0, r3, r4, r5, r6, r8, r9, r10, ip, abort=19f
|
||||
bge 12b
|
||||
PLD( cmn r2, #96 )
|
||||
PLD( bge 13b )
|
||||
|
||||
ldmfd sp!, {r5 - r9}
|
||||
UNWIND( .fnend ) @ end of the second stmfd block
|
||||
ldmfd sp!, {r5, r6, r8 - r10}
|
||||
|
||||
UNWIND( .fnstart )
|
||||
usave r4, lr @ still in first stmdb block
|
||||
14: ands ip, r2, #28
|
||||
beq 16f
|
||||
|
||||
@ -262,7 +241,6 @@
|
||||
|
||||
16: sub r1, r1, #(\push / 8)
|
||||
b 8b
|
||||
UNWIND( .fnend )
|
||||
|
||||
.endm
|
||||
|
||||
@ -273,6 +251,7 @@
|
||||
|
||||
18: forward_copy_shift pull=24 push=8
|
||||
|
||||
UNWIND( .fnend )
|
||||
|
||||
/*
|
||||
* Abort preamble and completion macros.
|
||||
@ -282,13 +261,13 @@
|
||||
*/
|
||||
|
||||
.macro copy_abort_preamble
|
||||
19: ldmfd sp!, {r5 - r9}
|
||||
19: ldmfd sp!, {r5, r6, r8 - r10}
|
||||
b 21f
|
||||
20: ldmfd sp!, {r5 - r8}
|
||||
20: ldmfd sp!, {r5, r6, r8, r9}
|
||||
21:
|
||||
.endm
|
||||
|
||||
.macro copy_abort_end
|
||||
ldmfd sp!, {r4, pc}
|
||||
ldmfd sp!, {r4, UNWIND(fpreg,) pc}
|
||||
.endm
|
||||
|
||||
|
@ -90,18 +90,15 @@
|
||||
strusr \reg, \ptr, 1, \cond, abort=\abort
|
||||
.endm
|
||||
|
||||
.macro enter reg1 reg2
|
||||
.macro enter regs:vararg
|
||||
mov r3, #0
|
||||
stmdb sp!, {r0, r2, r3, \reg1, \reg2}
|
||||
UNWIND( .save {r0, r2, r3, \regs} )
|
||||
stmdb sp!, {r0, r2, r3, \regs}
|
||||
.endm
|
||||
|
||||
.macro usave reg1 reg2
|
||||
UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
|
||||
.endm
|
||||
|
||||
.macro exit reg1 reg2
|
||||
.macro exit regs:vararg
|
||||
add sp, sp, #8
|
||||
ldmfd sp!, {r0, \reg1, \reg2}
|
||||
ldmfd sp!, {r0, \regs}
|
||||
.endm
|
||||
|
||||
.text
|
||||
|
@ -42,16 +42,13 @@
|
||||
strb\cond \reg, [\ptr], #1
|
||||
.endm
|
||||
|
||||
.macro enter reg1 reg2
|
||||
stmdb sp!, {r0, \reg1, \reg2}
|
||||
.macro enter regs:vararg
|
||||
UNWIND( .save {r0, \regs} )
|
||||
stmdb sp!, {r0, \regs}
|
||||
.endm
|
||||
|
||||
.macro usave reg1 reg2
|
||||
UNWIND( .save {r0, \reg1, \reg2} )
|
||||
.endm
|
||||
|
||||
.macro exit reg1 reg2
|
||||
ldmfd sp!, {r0, \reg1, \reg2}
|
||||
.macro exit regs:vararg
|
||||
ldmfd sp!, {r0, \regs}
|
||||
.endm
|
||||
|
||||
.text
|
||||
|
@ -31,12 +31,13 @@ WEAK(memmove)
|
||||
subs ip, r0, r1
|
||||
cmphi r2, ip
|
||||
bls __memcpy
|
||||
|
||||
stmfd sp!, {r0, r4, lr}
|
||||
UNWIND( .fnend )
|
||||
|
||||
UNWIND( .fnstart )
|
||||
UNWIND( .save {r0, r4, lr} ) @ in first stmfd block
|
||||
UNWIND( .save {r0, r4, fpreg, lr} )
|
||||
stmfd sp!, {r0, r4, UNWIND(fpreg,) lr}
|
||||
UNWIND( .setfp fpreg, sp )
|
||||
UNWIND( mov fpreg, sp )
|
||||
add r1, r1, r2
|
||||
add r0, r0, r2
|
||||
subs r2, r2, #4
|
||||
@ -48,12 +49,7 @@ WEAK(memmove)
|
||||
bne 10f
|
||||
|
||||
1: subs r2, r2, #(28)
|
||||
stmfd sp!, {r5 - r8}
|
||||
UNWIND( .fnend )
|
||||
|
||||
UNWIND( .fnstart )
|
||||
UNWIND( .save {r0, r4, lr} )
|
||||
UNWIND( .save {r5 - r8} ) @ in second stmfd block
|
||||
stmfd sp!, {r5, r6, r8, r9}
|
||||
blt 5f
|
||||
|
||||
CALGN( ands ip, r0, #31 )
|
||||
@ -72,9 +68,9 @@ WEAK(memmove)
|
||||
PLD( pld [r1, #-96] )
|
||||
|
||||
3: PLD( pld [r1, #-128] )
|
||||
4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
|
||||
4: ldmdb r1!, {r3, r4, r5, r6, r8, r9, ip, lr}
|
||||
subs r2, r2, #32
|
||||
stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
|
||||
stmdb r0!, {r3, r4, r5, r6, r8, r9, ip, lr}
|
||||
bge 3b
|
||||
PLD( cmn r2, #96 )
|
||||
PLD( bge 4b )
|
||||
@ -88,8 +84,8 @@ WEAK(memmove)
|
||||
W(ldr) r4, [r1, #-4]!
|
||||
W(ldr) r5, [r1, #-4]!
|
||||
W(ldr) r6, [r1, #-4]!
|
||||
W(ldr) r7, [r1, #-4]!
|
||||
W(ldr) r8, [r1, #-4]!
|
||||
W(ldr) r9, [r1, #-4]!
|
||||
W(ldr) lr, [r1, #-4]!
|
||||
|
||||
add pc, pc, ip
|
||||
@ -99,17 +95,13 @@ WEAK(memmove)
|
||||
W(str) r4, [r0, #-4]!
|
||||
W(str) r5, [r0, #-4]!
|
||||
W(str) r6, [r0, #-4]!
|
||||
W(str) r7, [r0, #-4]!
|
||||
W(str) r8, [r0, #-4]!
|
||||
W(str) r9, [r0, #-4]!
|
||||
W(str) lr, [r0, #-4]!
|
||||
|
||||
CALGN( bcs 2b )
|
||||
|
||||
7: ldmfd sp!, {r5 - r8}
|
||||
UNWIND( .fnend ) @ end of second stmfd block
|
||||
|
||||
UNWIND( .fnstart )
|
||||
UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
|
||||
7: ldmfd sp!, {r5, r6, r8, r9}
|
||||
|
||||
8: movs r2, r2, lsl #31
|
||||
ldrbne r3, [r1, #-1]!
|
||||
@ -118,7 +110,7 @@ WEAK(memmove)
|
||||
strbne r3, [r0, #-1]!
|
||||
strbcs r4, [r0, #-1]!
|
||||
strbcs ip, [r0, #-1]
|
||||
ldmfd sp!, {r0, r4, pc}
|
||||
ldmfd sp!, {r0, r4, UNWIND(fpreg,) pc}
|
||||
|
||||
9: cmp ip, #2
|
||||
ldrbgt r3, [r1, #-1]!
|
||||
@ -137,13 +129,10 @@ WEAK(memmove)
|
||||
ldr r3, [r1, #0]
|
||||
beq 17f
|
||||
blt 18f
|
||||
UNWIND( .fnend )
|
||||
|
||||
|
||||
.macro backward_copy_shift push pull
|
||||
|
||||
UNWIND( .fnstart )
|
||||
UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
|
||||
subs r2, r2, #28
|
||||
blt 14f
|
||||
|
||||
@ -152,12 +141,7 @@ WEAK(memmove)
|
||||
CALGN( subcc r2, r2, ip )
|
||||
CALGN( bcc 15f )
|
||||
|
||||
11: stmfd sp!, {r5 - r9}
|
||||
UNWIND( .fnend )
|
||||
|
||||
UNWIND( .fnstart )
|
||||
UNWIND( .save {r0, r4, lr} )
|
||||
UNWIND( .save {r5 - r9} ) @ in new second stmfd block
|
||||
11: stmfd sp!, {r5, r6, r8 - r10}
|
||||
|
||||
PLD( pld [r1, #-4] )
|
||||
PLD( subs r2, r2, #96 )
|
||||
@ -167,35 +151,31 @@ WEAK(memmove)
|
||||
PLD( pld [r1, #-96] )
|
||||
|
||||
12: PLD( pld [r1, #-128] )
|
||||
13: ldmdb r1!, {r7, r8, r9, ip}
|
||||
13: ldmdb r1!, {r8, r9, r10, ip}
|
||||
mov lr, r3, lspush #\push
|
||||
subs r2, r2, #32
|
||||
ldmdb r1!, {r3, r4, r5, r6}
|
||||
orr lr, lr, ip, lspull #\pull
|
||||
mov ip, ip, lspush #\push
|
||||
orr ip, ip, r9, lspull #\pull
|
||||
orr ip, ip, r10, lspull #\pull
|
||||
mov r10, r10, lspush #\push
|
||||
orr r10, r10, r9, lspull #\pull
|
||||
mov r9, r9, lspush #\push
|
||||
orr r9, r9, r8, lspull #\pull
|
||||
mov r8, r8, lspush #\push
|
||||
orr r8, r8, r7, lspull #\pull
|
||||
mov r7, r7, lspush #\push
|
||||
orr r7, r7, r6, lspull #\pull
|
||||
orr r8, r8, r6, lspull #\pull
|
||||
mov r6, r6, lspush #\push
|
||||
orr r6, r6, r5, lspull #\pull
|
||||
mov r5, r5, lspush #\push
|
||||
orr r5, r5, r4, lspull #\pull
|
||||
mov r4, r4, lspush #\push
|
||||
orr r4, r4, r3, lspull #\pull
|
||||
stmdb r0!, {r4 - r9, ip, lr}
|
||||
stmdb r0!, {r4 - r6, r8 - r10, ip, lr}
|
||||
bge 12b
|
||||
PLD( cmn r2, #96 )
|
||||
PLD( bge 13b )
|
||||
|
||||
ldmfd sp!, {r5 - r9}
|
||||
UNWIND( .fnend ) @ end of the second stmfd block
|
||||
|
||||
UNWIND( .fnstart )
|
||||
UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
|
||||
ldmfd sp!, {r5, r6, r8 - r10}
|
||||
|
||||
14: ands ip, r2, #28
|
||||
beq 16f
|
||||
@ -211,7 +191,6 @@ WEAK(memmove)
|
||||
|
||||
16: add r1, r1, #(\pull / 8)
|
||||
b 8b
|
||||
UNWIND( .fnend )
|
||||
|
||||
.endm
|
||||
|
||||
@ -222,5 +201,6 @@ WEAK(memmove)
|
||||
|
||||
18: backward_copy_shift push=24 pull=8
|
||||
|
||||
UNWIND( .fnend )
|
||||
ENDPROC(memmove)
|
||||
ENDPROC(__memmove)
|
||||
|
@ -28,16 +28,16 @@ UNWIND( .fnstart )
|
||||
mov r3, r1
|
||||
7: cmp r2, #16
|
||||
blt 4f
|
||||
UNWIND( .fnend )
|
||||
|
||||
#if ! CALGN(1)+0
|
||||
|
||||
/*
|
||||
* We need 2 extra registers for this loop - use r8 and the LR
|
||||
*/
|
||||
stmfd sp!, {r8, lr}
|
||||
UNWIND( .fnend )
|
||||
UNWIND( .fnstart )
|
||||
UNWIND( .save {r8, lr} )
|
||||
stmfd sp!, {r8, lr}
|
||||
mov r8, r1
|
||||
mov lr, r3
|
||||
|
||||
@ -66,10 +66,9 @@ UNWIND( .fnend )
|
||||
* whole cache lines at once.
|
||||
*/
|
||||
|
||||
stmfd sp!, {r4-r8, lr}
|
||||
UNWIND( .fnend )
|
||||
UNWIND( .fnstart )
|
||||
UNWIND( .save {r4-r8, lr} )
|
||||
stmfd sp!, {r4-r8, lr}
|
||||
mov r4, r1
|
||||
mov r5, r3
|
||||
mov r6, r1
|
||||
|
@ -40,6 +40,7 @@ obj-$(CONFIG_ARCH_BCM_MOBILE_L2_CACHE) += kona_l2_cache.o
|
||||
|
||||
# Support for secure monitor traps
|
||||
obj-$(CONFIG_ARCH_BCM_MOBILE_SMC) += bcm_kona_smc.o
|
||||
CFLAGS_REMOVE_bcm_kona_smc.o += $(CC_FLAGS_FTRACE)
|
||||
|
||||
# BCM2835
|
||||
ifeq ($(CONFIG_ARCH_BCM2835),y)
|
||||
|
@ -35,7 +35,6 @@ static bool secure_firmware __ro_after_init;
|
||||
*/
|
||||
#define exynos_v7_exit_coherency_flush(level) \
|
||||
asm volatile( \
|
||||
"stmfd sp!, {fp, ip}\n\t"\
|
||||
"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR\n\t" \
|
||||
"bic r0, r0, #"__stringify(CR_C)"\n\t" \
|
||||
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \
|
||||
@ -50,11 +49,10 @@ static bool secure_firmware __ro_after_init;
|
||||
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR\n\t" \
|
||||
"isb\n\t" \
|
||||
"dsb\n\t" \
|
||||
"ldmfd sp!, {fp, ip}" \
|
||||
: \
|
||||
: "Ir" (pmu_base_addr + S5P_INFORM0) \
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
|
||||
"r9", "r10", "lr", "memory")
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", \
|
||||
"r9", "r10", "ip", "lr", "memory")
|
||||
|
||||
static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
|
@ -27,6 +27,91 @@
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/irqs.h>
|
||||
#include <asm/hardware/dec21285.h>
|
||||
|
||||
static int dc21285_get_irq(void)
|
||||
{
|
||||
void __iomem *irqstatus = (void __iomem *)CSR_IRQ_STATUS;
|
||||
u32 mask = readl(irqstatus);
|
||||
|
||||
if (mask & IRQ_MASK_SDRAMPARITY)
|
||||
return IRQ_SDRAMPARITY;
|
||||
|
||||
if (mask & IRQ_MASK_UART_RX)
|
||||
return IRQ_CONRX;
|
||||
|
||||
if (mask & IRQ_MASK_DMA1)
|
||||
return IRQ_DMA1;
|
||||
|
||||
if (mask & IRQ_MASK_DMA2)
|
||||
return IRQ_DMA2;
|
||||
|
||||
if (mask & IRQ_MASK_IN0)
|
||||
return IRQ_IN0;
|
||||
|
||||
if (mask & IRQ_MASK_IN1)
|
||||
return IRQ_IN1;
|
||||
|
||||
if (mask & IRQ_MASK_IN2)
|
||||
return IRQ_IN2;
|
||||
|
||||
if (mask & IRQ_MASK_IN3)
|
||||
return IRQ_IN3;
|
||||
|
||||
if (mask & IRQ_MASK_PCI)
|
||||
return IRQ_PCI;
|
||||
|
||||
if (mask & IRQ_MASK_DOORBELLHOST)
|
||||
return IRQ_DOORBELLHOST;
|
||||
|
||||
if (mask & IRQ_MASK_I2OINPOST)
|
||||
return IRQ_I2OINPOST;
|
||||
|
||||
if (mask & IRQ_MASK_TIMER1)
|
||||
return IRQ_TIMER1;
|
||||
|
||||
if (mask & IRQ_MASK_TIMER2)
|
||||
return IRQ_TIMER2;
|
||||
|
||||
if (mask & IRQ_MASK_TIMER3)
|
||||
return IRQ_TIMER3;
|
||||
|
||||
if (mask & IRQ_MASK_UART_TX)
|
||||
return IRQ_CONTX;
|
||||
|
||||
if (mask & IRQ_MASK_PCI_ABORT)
|
||||
return IRQ_PCI_ABORT;
|
||||
|
||||
if (mask & IRQ_MASK_PCI_SERR)
|
||||
return IRQ_PCI_SERR;
|
||||
|
||||
if (mask & IRQ_MASK_DISCARD_TIMER)
|
||||
return IRQ_DISCARD_TIMER;
|
||||
|
||||
if (mask & IRQ_MASK_PCI_DPERR)
|
||||
return IRQ_PCI_DPERR;
|
||||
|
||||
if (mask & IRQ_MASK_PCI_PERR)
|
||||
return IRQ_PCI_PERR;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dc21285_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
int irq;
|
||||
do {
|
||||
irq = dc21285_get_irq();
|
||||
if (!irq)
|
||||
break;
|
||||
|
||||
generic_handle_irq(irq);
|
||||
} while (1);
|
||||
}
|
||||
|
||||
|
||||
unsigned int mem_fclk_21285 = 50000000;
|
||||
|
||||
EXPORT_SYMBOL(mem_fclk_21285);
|
||||
@ -108,6 +193,8 @@ static void __init __fb_init_irq(void)
|
||||
|
||||
void __init footbridge_init_irq(void)
|
||||
{
|
||||
set_handle_irq(dc21285_handle_irq);
|
||||
|
||||
__fb_init_irq();
|
||||
|
||||
if (!footbridge_cfn_mode())
|
||||
|
@ -1,107 +0,0 @@
|
||||
/*
|
||||
* arch/arm/mach-footbridge/include/mach/entry-macro.S
|
||||
*
|
||||
* Low-level IRQ helper macros for footbridge-based platforms
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/irqs.h>
|
||||
#include <asm/hardware/dec21285.h>
|
||||
|
||||
.equ dc21285_high, ARMCSR_BASE & 0xff000000
|
||||
.equ dc21285_low, ARMCSR_BASE & 0x00ffffff
|
||||
|
||||
.macro get_irqnr_preamble, base, tmp
|
||||
mov \base, #dc21285_high
|
||||
.if dc21285_low
|
||||
orr \base, \base, #dc21285_low
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
|
||||
ldr \irqstat, [\base, #0x180] @ get interrupts
|
||||
|
||||
mov \irqnr, #IRQ_SDRAMPARITY
|
||||
tst \irqstat, #IRQ_MASK_SDRAMPARITY
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_UART_RX
|
||||
movne \irqnr, #IRQ_CONRX
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_DMA1
|
||||
movne \irqnr, #IRQ_DMA1
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_DMA2
|
||||
movne \irqnr, #IRQ_DMA2
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_IN0
|
||||
movne \irqnr, #IRQ_IN0
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_IN1
|
||||
movne \irqnr, #IRQ_IN1
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_IN2
|
||||
movne \irqnr, #IRQ_IN2
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_IN3
|
||||
movne \irqnr, #IRQ_IN3
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_PCI
|
||||
movne \irqnr, #IRQ_PCI
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_DOORBELLHOST
|
||||
movne \irqnr, #IRQ_DOORBELLHOST
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_I2OINPOST
|
||||
movne \irqnr, #IRQ_I2OINPOST
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_TIMER1
|
||||
movne \irqnr, #IRQ_TIMER1
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_TIMER2
|
||||
movne \irqnr, #IRQ_TIMER2
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_TIMER3
|
||||
movne \irqnr, #IRQ_TIMER3
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_UART_TX
|
||||
movne \irqnr, #IRQ_CONTX
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_PCI_ABORT
|
||||
movne \irqnr, #IRQ_PCI_ABORT
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_PCI_SERR
|
||||
movne \irqnr, #IRQ_PCI_SERR
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_DISCARD_TIMER
|
||||
movne \irqnr, #IRQ_DISCARD_TIMER
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_PCI_DPERR
|
||||
movne \irqnr, #IRQ_PCI_DPERR
|
||||
bne 1001f
|
||||
|
||||
tst \irqstat, #IRQ_MASK_PCI_PERR
|
||||
movne \irqnr, #IRQ_PCI_PERR
|
||||
1001:
|
||||
.endm
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <asm/traps.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
static int cp6_trap(struct pt_regs *regs, unsigned int instr)
|
||||
void iop_enable_cp6(void)
|
||||
{
|
||||
u32 temp;
|
||||
|
||||
@ -16,7 +16,15 @@ static int cp6_trap(struct pt_regs *regs, unsigned int instr)
|
||||
"mrc p15, 0, %0, c15, c1, 0\n\t"
|
||||
"orr %0, %0, #(1 << 6)\n\t"
|
||||
"mcr p15, 0, %0, c15, c1, 0\n\t"
|
||||
"mrc p15, 0, %0, c15, c1, 0\n\t"
|
||||
"mov %0, %0\n\t"
|
||||
"sub pc, pc, #4 @ cp_wait\n\t"
|
||||
: "=r"(temp));
|
||||
}
|
||||
|
||||
static int cp6_trap(struct pt_regs *regs, unsigned int instr)
|
||||
{
|
||||
iop_enable_cp6();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,31 +0,0 @@
|
||||
/*
|
||||
* arch/arm/mach-iop32x/include/mach/entry-macro.S
|
||||
*
|
||||
* Low-level IRQ helper macros for IOP32x-based platforms
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
.macro get_irqnr_preamble, base, tmp
|
||||
mrc p15, 0, \tmp, c15, c1, 0
|
||||
orr \tmp, \tmp, #(1 << 6)
|
||||
mcr p15, 0, \tmp, c15, c1, 0 @ Enable cp6 access
|
||||
mrc p15, 0, \tmp, c15, c1, 0
|
||||
mov \tmp, \tmp
|
||||
sub pc, pc, #4 @ cp_wait
|
||||
.endm
|
||||
|
||||
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
|
||||
mrc p6, 0, \irqstat, c8, c0, 0 @ Read IINTSRC
|
||||
cmp \irqstat, #0
|
||||
clzne \irqnr, \irqstat
|
||||
rsbne \irqnr, \irqnr, #31
|
||||
.endm
|
||||
|
||||
.macro arch_ret_to_user, tmp1, tmp2
|
||||
mrc p15, 0, \tmp1, c15, c1, 0
|
||||
ands \tmp2, \tmp1, #(1 << 6)
|
||||
bicne \tmp1, \tmp1, #(1 << 6)
|
||||
mcrne p15, 0, \tmp1, c15, c1, 0 @ Disable cp6 access
|
||||
.endm
|
@ -9,6 +9,6 @@
|
||||
#ifndef __IRQS_H
|
||||
#define __IRQS_H
|
||||
|
||||
#define NR_IRQS 32
|
||||
#define NR_IRQS 33
|
||||
|
||||
#endif
|
||||
|
@ -225,6 +225,7 @@ extern int iop3xx_get_init_atu(void);
|
||||
#include <linux/reboot.h>
|
||||
|
||||
void iop3xx_map_io(void);
|
||||
void iop_enable_cp6(void);
|
||||
void iop_init_cp6_handler(void);
|
||||
void iop_init_time(unsigned long tickrate);
|
||||
void iop3xx_restart(enum reboot_mode, const char *);
|
||||
|
@ -29,17 +29,26 @@ static void intstr_write(u32 val)
|
||||
asm volatile("mcr p6, 0, %0, c4, c0, 0" : : "r" (val));
|
||||
}
|
||||
|
||||
static u32 iintsrc_read(void)
|
||||
{
|
||||
int irq;
|
||||
|
||||
asm volatile("mrc p6, 0, %0, c8, c0, 0" : "=r" (irq));
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
static void
|
||||
iop32x_irq_mask(struct irq_data *d)
|
||||
{
|
||||
iop32x_mask &= ~(1 << d->irq);
|
||||
iop32x_mask &= ~(1 << (d->irq - 1));
|
||||
intctl_write(iop32x_mask);
|
||||
}
|
||||
|
||||
static void
|
||||
iop32x_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
iop32x_mask |= 1 << d->irq;
|
||||
iop32x_mask |= 1 << (d->irq - 1);
|
||||
intctl_write(iop32x_mask);
|
||||
}
|
||||
|
||||
@ -50,11 +59,25 @@ struct irq_chip ext_chip = {
|
||||
.irq_unmask = iop32x_irq_unmask,
|
||||
};
|
||||
|
||||
static void iop_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 mask;
|
||||
|
||||
iop_enable_cp6();
|
||||
|
||||
do {
|
||||
mask = iintsrc_read();
|
||||
if (mask)
|
||||
generic_handle_irq(fls(mask));
|
||||
} while (mask);
|
||||
}
|
||||
|
||||
void __init iop32x_init_irq(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
iop_init_cp6_handler();
|
||||
set_handle_irq(iop_handle_irq);
|
||||
|
||||
intctl_write(0);
|
||||
intstr_write(0);
|
||||
@ -65,7 +88,7 @@ void __init iop32x_init_irq(void)
|
||||
machine_is_em7210())
|
||||
*IOP3XX_PCIIRSR = 0x0f;
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++) {
|
||||
for (i = 1; i < NR_IRQS; i++) {
|
||||
irq_set_chip_and_handler(i, &ext_chip, handle_level_irq);
|
||||
irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
|
||||
}
|
||||
|
@ -7,36 +7,40 @@
|
||||
#ifndef __IOP32X_IRQS_H
|
||||
#define __IOP32X_IRQS_H
|
||||
|
||||
/* Interrupts in Linux start at 1, hardware starts at 0 */
|
||||
|
||||
#define IOP_IRQ(x) ((x) + 1)
|
||||
|
||||
/*
|
||||
* IOP80321 chipset interrupts
|
||||
*/
|
||||
#define IRQ_IOP32X_DMA0_EOT 0
|
||||
#define IRQ_IOP32X_DMA0_EOC 1
|
||||
#define IRQ_IOP32X_DMA1_EOT 2
|
||||
#define IRQ_IOP32X_DMA1_EOC 3
|
||||
#define IRQ_IOP32X_AA_EOT 6
|
||||
#define IRQ_IOP32X_AA_EOC 7
|
||||
#define IRQ_IOP32X_CORE_PMON 8
|
||||
#define IRQ_IOP32X_TIMER0 9
|
||||
#define IRQ_IOP32X_TIMER1 10
|
||||
#define IRQ_IOP32X_I2C_0 11
|
||||
#define IRQ_IOP32X_I2C_1 12
|
||||
#define IRQ_IOP32X_MESSAGING 13
|
||||
#define IRQ_IOP32X_ATU_BIST 14
|
||||
#define IRQ_IOP32X_PERFMON 15
|
||||
#define IRQ_IOP32X_CORE_PMU 16
|
||||
#define IRQ_IOP32X_BIU_ERR 17
|
||||
#define IRQ_IOP32X_ATU_ERR 18
|
||||
#define IRQ_IOP32X_MCU_ERR 19
|
||||
#define IRQ_IOP32X_DMA0_ERR 20
|
||||
#define IRQ_IOP32X_DMA1_ERR 21
|
||||
#define IRQ_IOP32X_AA_ERR 23
|
||||
#define IRQ_IOP32X_MSG_ERR 24
|
||||
#define IRQ_IOP32X_SSP 25
|
||||
#define IRQ_IOP32X_XINT0 27
|
||||
#define IRQ_IOP32X_XINT1 28
|
||||
#define IRQ_IOP32X_XINT2 29
|
||||
#define IRQ_IOP32X_XINT3 30
|
||||
#define IRQ_IOP32X_HPI 31
|
||||
#define IRQ_IOP32X_DMA0_EOT IOP_IRQ(0)
|
||||
#define IRQ_IOP32X_DMA0_EOC IOP_IRQ(1)
|
||||
#define IRQ_IOP32X_DMA1_EOT IOP_IRQ(2)
|
||||
#define IRQ_IOP32X_DMA1_EOC IOP_IRQ(3)
|
||||
#define IRQ_IOP32X_AA_EOT IOP_IRQ(6)
|
||||
#define IRQ_IOP32X_AA_EOC IOP_IRQ(7)
|
||||
#define IRQ_IOP32X_CORE_PMON IOP_IRQ(8)
|
||||
#define IRQ_IOP32X_TIMER0 IOP_IRQ(9)
|
||||
#define IRQ_IOP32X_TIMER1 IOP_IRQ(10)
|
||||
#define IRQ_IOP32X_I2C_0 IOP_IRQ(11)
|
||||
#define IRQ_IOP32X_I2C_1 IOP_IRQ(12)
|
||||
#define IRQ_IOP32X_MESSAGING IOP_IRQ(13)
|
||||
#define IRQ_IOP32X_ATU_BIST IOP_IRQ(14)
|
||||
#define IRQ_IOP32X_PERFMON IOP_IRQ(15)
|
||||
#define IRQ_IOP32X_CORE_PMU IOP_IRQ(16)
|
||||
#define IRQ_IOP32X_BIU_ERR IOP_IRQ(17)
|
||||
#define IRQ_IOP32X_ATU_ERR IOP_IRQ(18)
|
||||
#define IRQ_IOP32X_MCU_ERR IOP_IRQ(19)
|
||||
#define IRQ_IOP32X_DMA0_ERR IOP_IRQ(20)
|
||||
#define IRQ_IOP32X_DMA1_ERR IOP_IRQ(21)
|
||||
#define IRQ_IOP32X_AA_ERR IOP_IRQ(23)
|
||||
#define IRQ_IOP32X_MSG_ERR IOP_IRQ(24)
|
||||
#define IRQ_IOP32X_SSP IOP_IRQ(25)
|
||||
#define IRQ_IOP32X_XINT0 IOP_IRQ(27)
|
||||
#define IRQ_IOP32X_XINT1 IOP_IRQ(28)
|
||||
#define IRQ_IOP32X_XINT2 IOP_IRQ(29)
|
||||
#define IRQ_IOP32X_XINT3 IOP_IRQ(30)
|
||||
#define IRQ_IOP32X_HPI IOP_IRQ(31)
|
||||
|
||||
#endif
|
||||
|
@ -2,10 +2,11 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/entry-macro.S>
|
||||
|
||||
.equ ioc_base_high, IOC_BASE & 0xff000000
|
||||
.equ ioc_base_low, IOC_BASE & 0x00ff0000
|
||||
|
||||
.text
|
||||
|
||||
.global rpc_default_fiq_end
|
||||
ENTRY(rpc_default_fiq_start)
|
||||
mov r12, #ioc_base_high
|
||||
|
@ -1,13 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/hardware/entry-macro-iomd.S>
|
||||
|
||||
.equ ioc_base_high, IOC_BASE & 0xff000000
|
||||
.equ ioc_base_low, IOC_BASE & 0x00ff0000
|
||||
|
||||
.macro get_irqnr_preamble, base, tmp
|
||||
mov \base, #ioc_base_high @ point at IOC
|
||||
.if ioc_base_low
|
||||
orr \base, \base, #ioc_base_low
|
||||
.endif
|
||||
.endm
|
@ -14,6 +14,99 @@
|
||||
#define CLR 0x04
|
||||
#define MASK 0x08
|
||||
|
||||
static const u8 irq_prio_h[256] = {
|
||||
0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10,
|
||||
};
|
||||
|
||||
static const u8 irq_prio_d[256] = {
|
||||
0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16,
|
||||
};
|
||||
|
||||
static const u8 irq_prio_l[256] = {
|
||||
0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
|
||||
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
|
||||
6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
|
||||
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
};
|
||||
|
||||
static int iomd_get_irq_nr(void)
|
||||
{
|
||||
int irq;
|
||||
u8 reg;
|
||||
|
||||
/* get highest priority first */
|
||||
reg = readb(IOC_BASE + IOMD_IRQREQB);
|
||||
irq = irq_prio_h[reg];
|
||||
if (irq)
|
||||
return irq;
|
||||
|
||||
/* get DMA */
|
||||
reg = readb(IOC_BASE + IOMD_DMAREQ);
|
||||
irq = irq_prio_d[reg];
|
||||
if (irq)
|
||||
return irq;
|
||||
|
||||
/* get low priority */
|
||||
reg = readb(IOC_BASE + IOMD_IRQREQA);
|
||||
irq = irq_prio_l[reg];
|
||||
if (irq)
|
||||
return irq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iomd_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
int irq;
|
||||
|
||||
do {
|
||||
irq = iomd_get_irq_nr();
|
||||
if (irq)
|
||||
generic_handle_irq(irq);
|
||||
} while (irq);
|
||||
}
|
||||
|
||||
static void __iomem *iomd_get_base(struct irq_data *d)
|
||||
{
|
||||
void *cd = irq_data_get_irq_chip_data(d);
|
||||
@ -82,6 +175,8 @@ void __init rpc_init_irq(void)
|
||||
set_fiq_handler(&rpc_default_fiq_start,
|
||||
&rpc_default_fiq_end - &rpc_default_fiq_start);
|
||||
|
||||
set_handle_irq(iomd_handle_irq);
|
||||
|
||||
for (irq = 0; irq < NR_IRQS; irq++) {
|
||||
clr = IRQ_NOREQUEST;
|
||||
set = 0;
|
||||
|
@ -386,6 +386,7 @@ config CPU_V6
|
||||
select CPU_PABRT_V6
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V6 if MMU
|
||||
select SMP_ON_UP if SMP
|
||||
|
||||
# ARMv6k
|
||||
config CPU_V6K
|
||||
|
@ -90,7 +90,7 @@ ENDPROC(v7_flush_icache_all)
|
||||
*
|
||||
* Flush the D-cache up to the Level of Unification Inner Shareable
|
||||
*
|
||||
* Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
|
||||
* Corrupted registers: r0-r6, r9-r10
|
||||
*/
|
||||
|
||||
ENTRY(v7_flush_dcache_louis)
|
||||
@ -117,7 +117,7 @@ ENDPROC(v7_flush_dcache_louis)
|
||||
*
|
||||
* Flush the whole D-cache.
|
||||
*
|
||||
* Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
|
||||
* Corrupted registers: r0-r6, r9-r10
|
||||
*
|
||||
* - mm - mm_struct describing address space
|
||||
*/
|
||||
@ -149,22 +149,22 @@ flush_levels:
|
||||
movw r4, #0x3ff
|
||||
ands r4, r4, r1, lsr #3 @ find maximum number on the way size
|
||||
clz r5, r4 @ find bit position of way size increment
|
||||
movw r7, #0x7fff
|
||||
ands r7, r7, r1, lsr #13 @ extract max number of the index size
|
||||
movw r6, #0x7fff
|
||||
and r1, r6, r1, lsr #13 @ extract max number of the index size
|
||||
mov r6, #1
|
||||
movne r4, r4, lsl r5 @ # of ways shifted into bits [31:...]
|
||||
movne r6, r6, lsl r5 @ 1 shifted left by same amount
|
||||
loop1:
|
||||
mov r9, r7 @ create working copy of max index
|
||||
mov r9, r1 @ create working copy of max index
|
||||
loop2:
|
||||
ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
|
||||
THUMB( lsl r6, r4, r5 )
|
||||
THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
|
||||
ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11
|
||||
THUMB( lsl r6, r9, r2 )
|
||||
THUMB( orr r11, r11, r6 ) @ factor index number into r11
|
||||
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
|
||||
mov r5, r9, lsl r2 @ factor set number into r5
|
||||
orr r5, r5, r4 @ factor way number into r5
|
||||
orr r5, r5, r10 @ factor cache level into r5
|
||||
mcr p15, 0, r5, c7, c14, 2 @ clean & invalidate by set/way
|
||||
subs r9, r9, #1 @ decrement the index
|
||||
bge loop2
|
||||
subs r4, r4, #1 @ decrement the way
|
||||
bge loop1
|
||||
subs r4, r4, r6 @ decrement the way
|
||||
bcs loop1
|
||||
skip:
|
||||
add r10, r10, #2 @ increment cache number
|
||||
cmp r3, r10
|
||||
@ -192,14 +192,12 @@ ENDPROC(v7_flush_dcache_all)
|
||||
*
|
||||
*/
|
||||
ENTRY(v7_flush_kern_cache_all)
|
||||
ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
|
||||
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
|
||||
stmfd sp!, {r4-r6, r9-r10, lr}
|
||||
bl v7_flush_dcache_all
|
||||
mov r0, #0
|
||||
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
|
||||
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
|
||||
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
|
||||
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
|
||||
ldmfd sp!, {r4-r6, r9-r10, lr}
|
||||
ret lr
|
||||
ENDPROC(v7_flush_kern_cache_all)
|
||||
|
||||
@ -210,14 +208,12 @@ ENDPROC(v7_flush_kern_cache_all)
|
||||
* Invalidate the I-cache to the point of unification.
|
||||
*/
|
||||
ENTRY(v7_flush_kern_cache_louis)
|
||||
ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
|
||||
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
|
||||
stmfd sp!, {r4-r6, r9-r10, lr}
|
||||
bl v7_flush_dcache_louis
|
||||
mov r0, #0
|
||||
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
|
||||
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
|
||||
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
|
||||
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
|
||||
ldmfd sp!, {r4-r6, r9-r10, lr}
|
||||
ret lr
|
||||
ENDPROC(v7_flush_kern_cache_louis)
|
||||
|
||||
|
@ -240,8 +240,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
||||
unsigned int cpu = smp_processor_id();
|
||||
u64 asid;
|
||||
|
||||
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
|
||||
__check_vmalloc_seq(mm);
|
||||
check_vmalloc_seq(mm);
|
||||
|
||||
/*
|
||||
* We cannot update the pgd and the ASID atomicly with classic
|
||||
|
@ -117,16 +117,21 @@ EXPORT_SYMBOL(ioremap_page);
|
||||
|
||||
void __check_vmalloc_seq(struct mm_struct *mm)
|
||||
{
|
||||
unsigned int seq;
|
||||
int seq;
|
||||
|
||||
do {
|
||||
seq = init_mm.context.vmalloc_seq;
|
||||
seq = atomic_read(&init_mm.context.vmalloc_seq);
|
||||
memcpy(pgd_offset(mm, VMALLOC_START),
|
||||
pgd_offset_k(VMALLOC_START),
|
||||
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
|
||||
pgd_index(VMALLOC_START)));
|
||||
mm->context.vmalloc_seq = seq;
|
||||
} while (seq != init_mm.context.vmalloc_seq);
|
||||
/*
|
||||
* Use a store-release so that other CPUs that observe the
|
||||
* counter's new value are guaranteed to see the results of the
|
||||
* memcpy as well.
|
||||
*/
|
||||
atomic_set_release(&mm->context.vmalloc_seq, seq);
|
||||
} while (seq != atomic_read(&init_mm.context.vmalloc_seq));
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
||||
@ -157,7 +162,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
|
||||
* Note: this is still racy on SMP machines.
|
||||
*/
|
||||
pmd_clear(pmdp);
|
||||
init_mm.context.vmalloc_seq++;
|
||||
atomic_inc_return_release(&init_mm.context.vmalloc_seq);
|
||||
|
||||
/*
|
||||
* Free the page table, if there was one.
|
||||
@ -174,8 +179,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
|
||||
* Ensure that the active_mm is up to date - we want to
|
||||
* catch any use-after-iounmap cases.
|
||||
*/
|
||||
if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
|
||||
__check_vmalloc_seq(current->active_mm);
|
||||
check_vmalloc_seq(current->active_mm);
|
||||
|
||||
flush_tlb_kernel_range(virt, end);
|
||||
}
|
||||
|
@ -84,7 +84,8 @@ emulate_generic_r0_12_noflags(probes_opcode_t insn,
|
||||
register void *rfn asm("lr") = asi->insn_fn;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"stmdb sp!, {%[regs], r11} \n\t"
|
||||
ARM( "stmdb sp!, {%[regs], r11} \n\t" )
|
||||
THUMB( "stmdb sp!, {%[regs], r7} \n\t" )
|
||||
"ldmia %[regs], {r0-r12} \n\t"
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
"blx %[fn] \n\t"
|
||||
@ -96,10 +97,11 @@ emulate_generic_r0_12_noflags(probes_opcode_t insn,
|
||||
#endif
|
||||
"ldr lr, [sp], #4 \n\t" /* lr = regs */
|
||||
"stmia lr, {r0-r12} \n\t"
|
||||
"ldr r11, [sp], #4 \n\t"
|
||||
ARM( "ldr r11, [sp], #4 \n\t" )
|
||||
THUMB( "ldr r7, [sp], #4 \n\t" )
|
||||
: [regs] "=r" (rregs), [fn] "=r" (rfn)
|
||||
: "0" (rregs), "1" (rfn)
|
||||
: "r0", "r2", "r3", "r4", "r5", "r6", "r7",
|
||||
: "r0", "r2", "r3", "r4", "r5", "r6", ARM("r7") THUMB("r11"),
|
||||
"r8", "r9", "r10", "r12", "memory", "cc"
|
||||
);
|
||||
}
|
||||
|
@ -447,14 +447,16 @@ t16_emulate_loregs(probes_opcode_t insn,
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"msr cpsr_fs, %[oldcpsr] \n\t"
|
||||
"mov r11, r7 \n\t"
|
||||
"ldmia %[regs], {r0-r7} \n\t"
|
||||
"blx %[fn] \n\t"
|
||||
"stmia %[regs], {r0-r7} \n\t"
|
||||
"mov r7, r11 \n\t"
|
||||
"mrs %[newcpsr], cpsr \n\t"
|
||||
: [newcpsr] "=r" (newcpsr)
|
||||
: [oldcpsr] "r" (oldcpsr), [regs] "r" (regs),
|
||||
[fn] "r" (asi->insn_fn)
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r11",
|
||||
"lr", "memory", "cc"
|
||||
);
|
||||
|
||||
@ -524,14 +526,16 @@ t16_emulate_push(probes_opcode_t insn,
|
||||
struct arch_probes_insn *asi, struct pt_regs *regs)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"mov r11, r7 \n\t"
|
||||
"ldr r9, [%[regs], #13*4] \n\t"
|
||||
"ldr r8, [%[regs], #14*4] \n\t"
|
||||
"ldmia %[regs], {r0-r7} \n\t"
|
||||
"blx %[fn] \n\t"
|
||||
"str r9, [%[regs], #13*4] \n\t"
|
||||
"mov r7, r11 \n\t"
|
||||
:
|
||||
: [regs] "r" (regs), [fn] "r" (asi->insn_fn)
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r8", "r9", "r11",
|
||||
"lr", "memory", "cc"
|
||||
);
|
||||
}
|
||||
@ -558,14 +562,16 @@ t16_emulate_pop_nopc(probes_opcode_t insn,
|
||||
struct arch_probes_insn *asi, struct pt_regs *regs)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"mov r11, r7 \n\t"
|
||||
"ldr r9, [%[regs], #13*4] \n\t"
|
||||
"ldmia %[regs], {r0-r7} \n\t"
|
||||
"blx %[fn] \n\t"
|
||||
"stmia %[regs], {r0-r7} \n\t"
|
||||
"str r9, [%[regs], #13*4] \n\t"
|
||||
"mov r7, r11 \n\t"
|
||||
:
|
||||
: [regs] "r" (regs), [fn] "r" (asi->insn_fn)
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r9", "r11",
|
||||
"lr", "memory", "cc"
|
||||
);
|
||||
}
|
||||
@ -577,14 +583,16 @@ t16_emulate_pop_pc(probes_opcode_t insn,
|
||||
register unsigned long pc asm("r8");
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"mov r11, r7 \n\t"
|
||||
"ldr r9, [%[regs], #13*4] \n\t"
|
||||
"ldmia %[regs], {r0-r7} \n\t"
|
||||
"blx %[fn] \n\t"
|
||||
"stmia %[regs], {r0-r7} \n\t"
|
||||
"str r9, [%[regs], #13*4] \n\t"
|
||||
"mov r7, r11 \n\t"
|
||||
: "=r" (pc)
|
||||
: [regs] "r" (regs), [fn] "r" (asi->insn_fn)
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
|
||||
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r9", "r11",
|
||||
"lr", "memory", "cc"
|
||||
);
|
||||
|
||||
|
@ -37,27 +37,14 @@
|
||||
|
||||
static struct irq_domain *nvic_irq_domain;
|
||||
|
||||
static void __nvic_handle_irq(irq_hw_number_t hwirq)
|
||||
static void __irq_entry nvic_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long icsr = readl_relaxed(BASEADDR_V7M_SCB + V7M_SCB_ICSR);
|
||||
irq_hw_number_t hwirq = (icsr & V7M_SCB_ICSR_VECTACTIVE) - 16;
|
||||
|
||||
generic_handle_domain_irq(nvic_irq_domain, hwirq);
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: restructure the ARMv7M entry logic so that this entry logic can live
|
||||
* in arch code.
|
||||
*/
|
||||
asmlinkage void __exception_irq_entry
|
||||
nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
irq_enter();
|
||||
old_regs = set_irq_regs(regs);
|
||||
__nvic_handle_irq(hwirq);
|
||||
set_irq_regs(old_regs);
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs, void *arg)
|
||||
{
|
||||
@ -143,6 +130,7 @@ static int __init nvic_of_init(struct device_node *node,
|
||||
for (i = 0; i < irqs; i += 4)
|
||||
writel_relaxed(0, nvic_base + NVIC_IPR + i);
|
||||
|
||||
set_handle_irq(nvic_handle_irq);
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init);
|
||||
|
Loading…
x
Reference in New Issue
Block a user