2005-04-16 15:20:36 -07:00
/*
2008-08-02 10:55:55 +01:00
* arch / arm / include / asm / assembler . h
2005-04-16 15:20:36 -07:00
*
* Copyright ( C ) 1996 - 2000 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This file contains arm architecture specific defines
* for the different processors .
*
* Do not include any C declarations in this file - it is included by
* assembler source .
*/
2011-06-13 06:46:44 +01:00
# ifndef __ASM_ASSEMBLER_H__
# define __ASM_ASSEMBLER_H__
2005-04-16 15:20:36 -07:00
# ifndef __ASSEMBLY__
# error "Only include this from assembly code"
# endif
# include <asm/ptrace.h>
2010-09-13 16:03:21 +01:00
# include <asm/domain.h>
2012-02-09 08:47:17 -08:00
# include <asm/opcodes-virt.h>
2014-04-02 10:57:49 +01:00
# include <asm/asm-offsets.h>
2014-06-18 16:12:40 +01:00
# include <asm/page.h>
# include <asm/thread_info.h>
2005-04-16 15:20:36 -07:00
2012-03-10 10:30:31 -06:00
# define IOMEM(x) (x)
2005-04-16 15:20:36 -07:00
/*
* Endian independent macros for shifting bytes within registers .
*/
# ifndef __ARMEB__
2014-02-25 08:41:09 +01:00
# define lspull lsr
# define lspush lsl
2005-04-16 15:20:36 -07:00
# define get_byte_0 lsl #0
# define get_byte_1 lsr #8
# define get_byte_2 lsr #16
# define get_byte_3 lsr #24
# define put_byte_0 lsl #0
# define put_byte_1 lsl #8
# define put_byte_2 lsl #16
# define put_byte_3 lsl #24
# else
2014-02-25 08:41:09 +01:00
# define lspull lsl
# define lspush lsr
2005-04-16 15:20:36 -07:00
# define get_byte_0 lsr #24
# define get_byte_1 lsr #16
# define get_byte_2 lsr #8
# define get_byte_3 lsl #0
# define put_byte_0 lsl #24
# define put_byte_1 lsl #16
# define put_byte_2 lsl #8
# define put_byte_3 lsl #0
# endif
2013-02-12 18:59:57 +00:00
/* Select code for any configuration running in BE8 mode */
# ifdef CONFIG_CPU_ENDIAN_BE8
# define ARM_BE8(code...) code
# else
# define ARM_BE8(code...)
# endif
2005-04-16 15:20:36 -07:00
/*
* Data preload for architectures that support it
*/
# if __LINUX_ARM_ARCH__ >= 5
# define PLD(code...) code
# else
# define PLD(code...)
# endif
2008-03-31 12:38:31 -04:00
/*
* This can be used to enable code to cacheline align the destination
* pointer when bulk writing to memory . Experiments on StrongARM and
* XScale didn ' t show this a worthwhile thing to do when the cache is not
* set to write - allocate ( this would need further testing on XScale when WA
* is used ) .
*
* On Feroceon there is much to gain however , regardless of cache mode .
*/
# ifdef CONFIG_CPU_FEROCEON
# define CALGN(code...) code
# else
# define CALGN(code...)
# endif
2005-04-16 15:20:36 -07:00
/*
2006-03-23 16:59:37 +00:00
* Enable and disable interrupts
2005-04-16 15:20:36 -07:00
*/
2005-11-09 15:04:22 +00:00
# if __LINUX_ARM_ARCH__ >= 6
2009-08-13 20:38:17 +02:00
. macro disable_irq_notrace
2005-11-09 15:04:22 +00:00
cpsid i
2006-03-23 16:59:37 +00:00
. endm
2009-08-13 20:38:17 +02:00
. macro enable_irq_notrace
2006-03-23 16:59:37 +00:00
cpsie i
. endm
2005-11-09 15:04:22 +00:00
# else
2009-08-13 20:38:17 +02:00
. macro disable_irq_notrace
2006-03-23 16:59:37 +00:00
msr cpsr_c , # PSR_I_BIT | SVC_MODE
. endm
2009-08-13 20:38:17 +02:00
. macro enable_irq_notrace
2006-03-23 16:59:37 +00:00
msr cpsr_c , # SVC_MODE
. endm
2005-11-09 15:04:22 +00:00
# endif
2006-03-23 16:59:37 +00:00
2015-08-20 16:13:37 +01:00
. macro asm_trace_hardirqs_off , save = 1
2009-08-13 20:38:17 +02:00
# if defined(CONFIG_TRACE_IRQFLAGS)
2015-08-20 16:13:37 +01:00
. if \ save
2009-08-13 20:38:17 +02:00
stmdb sp ! , { r0 - r3 , ip , lr }
2015-08-20 16:13:37 +01:00
. endif
2009-08-13 20:38:17 +02:00
bl trace_hardirqs_off
2015-08-20 16:13:37 +01:00
. if \ save
2009-08-13 20:38:17 +02:00
ldmia sp ! , { r0 - r3 , ip , lr }
2015-08-20 16:13:37 +01:00
. endif
2009-08-13 20:38:17 +02:00
# endif
. endm
2015-08-20 16:13:37 +01:00
. macro asm_trace_hardirqs_on , cond = al , save = 1
2009-08-13 20:38:17 +02:00
# if defined(CONFIG_TRACE_IRQFLAGS)
/*
* actually the registers should be pushed and pop ' d conditionally , but
* after bl the flags are certainly clobbered
*/
2015-08-20 16:13:37 +01:00
. if \ save
2009-08-13 20:38:17 +02:00
stmdb sp ! , { r0 - r3 , ip , lr }
2015-08-20 16:13:37 +01:00
. endif
2009-08-13 20:38:17 +02:00
bl \ cond trace_hardirqs_on
2015-08-20 16:13:37 +01:00
. if \ save
2009-08-13 20:38:17 +02:00
ldmia sp ! , { r0 - r3 , ip , lr }
2015-08-20 16:13:37 +01:00
. endif
2009-08-13 20:38:17 +02:00
# endif
. endm
2015-08-20 16:13:37 +01:00
. macro disable_irq , save = 1
2009-08-13 20:38:17 +02:00
disable_irq_notrace
2015-08-20 16:13:37 +01:00
asm_trace_hardirqs_off \ save
2009-08-13 20:38:17 +02:00
. endm
. macro enable_irq
asm_trace_hardirqs_on
enable_irq_notrace
. endm
2006-03-23 16:59:37 +00:00
/*
* Save the current IRQ state and disable IRQs . Note that this macro
* assumes FIQs are enabled , and that the processor is in SVC mode .
*/
. macro save_and_disable_irqs , oldcpsr
2010-05-21 18:06:41 +01:00
# ifdef CONFIG_CPU_V7M
mrs \ oldcpsr , primask
# else
2006-03-23 16:59:37 +00:00
mrs \ oldcpsr , cpsr
2010-05-21 18:06:41 +01:00
# endif
2006-03-23 16:59:37 +00:00
disable_irq
2005-04-16 15:20:36 -07:00
. endm
2012-02-15 16:01:42 +01:00
. macro save_and_disable_irqs_notrace , oldcpsr
mrs \ oldcpsr , cpsr
disable_irq_notrace
. endm
2005-04-16 15:20:36 -07:00
/*
* Restore interrupt state previously stored in a register . We don ' t
* guarantee that this will preserve the flags .
*/
2009-08-13 20:38:17 +02:00
. macro restore_irqs_notrace , oldcpsr
2010-05-21 18:06:41 +01:00
# ifdef CONFIG_CPU_V7M
msr primask , \ oldcpsr
# else
2005-04-16 15:20:36 -07:00
msr cpsr_c , \ oldcpsr
2010-05-21 18:06:41 +01:00
# endif
2005-04-16 15:20:36 -07:00
. endm
2009-08-13 20:38:17 +02:00
. macro restore_irqs , oldcpsr
tst \ oldcpsr , # PSR_I_BIT
2015-08-20 14:22:48 +01:00
asm_trace_hardirqs_on cond = eq
2009-08-13 20:38:17 +02:00
restore_irqs_notrace \ oldcpsr
. endm
2015-04-21 14:17:25 +01:00
/*
* Assembly version of " adr rd, BSYM(sym) " . This should only be used to
* reference local symbols in the same assembly file which are to be
* resolved by the assembler . Other usage is undefined .
*/
. irp c , , eq , ne , cs , cc , mi , pl , vs , vc , hi , ls , ge , lt , gt , le , hs , lo
. macro badr \ c , rd , sym
# ifdef CONFIG_THUMB2_KERNEL
adr \ c \ rd , \ sym + 1
# else
adr \ c \ rd , \ sym
# endif
. endm
. endr
2014-04-02 10:57:48 +01:00
/*
* Get current thread_info .
*/
. macro get_thread_info , rd
2014-06-18 16:12:40 +01:00
ARM ( mov \ rd , sp , lsr # THREAD_SIZE_ORDER + PAGE_SHIFT )
2014-04-02 10:57:48 +01:00
THUMB ( mov \ rd , sp )
2014-06-18 16:12:40 +01:00
THUMB ( lsr \ rd , \ rd , # THREAD_SIZE_ORDER + PAGE_SHIFT )
mov \ rd , \ rd , lsl # THREAD_SIZE_ORDER + PAGE_SHIFT
2014-04-02 10:57:48 +01:00
. endm
2014-04-02 10:57:49 +01:00
/*
* Increment / decrement the preempt count .
*/
# ifdef CONFIG_PREEMPT_COUNT
. macro inc_preempt_count , ti , tmp
ldr \ tmp , [ \ ti , # TI_PREEMPT ] @ get preempt count
add \ tmp , \ tmp , # 1 @ increment it
str \ tmp , [ \ ti , # TI_PREEMPT ]
. endm
. macro dec_preempt_count , ti , tmp
ldr \ tmp , [ \ ti , # TI_PREEMPT ] @ get preempt count
sub \ tmp , \ tmp , # 1 @ decrement it
str \ tmp , [ \ ti , # TI_PREEMPT ]
. endm
. macro dec_preempt_count_ti , ti , tmp
get_thread_info \ ti
dec_preempt_count \ ti , \ tmp
. endm
# else
. macro inc_preempt_count , ti , tmp
. endm
. macro dec_preempt_count , ti , tmp
. endm
. macro dec_preempt_count_ti , ti , tmp
. endm
# endif
2005-04-16 15:20:36 -07:00
# define USER(x...) \
9999 : x ; \
2010-04-19 10:15:03 +01:00
. pushsection __ex_table , " a " ; \
2005-04-16 15:20:36 -07:00
. align 3 ; \
. long 9999 b , 9001f ; \
2010-04-19 10:15:03 +01:00
. popsection
2009-05-25 20:58:00 +01:00
2010-09-04 10:47:48 +01:00
# ifdef CONFIG_SMP
# define ALT_SMP(instr...) \
9998 : instr
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
/*
* Note : if you get assembler errors from ALT_UP ( ) when building with
* CONFIG_THUMB2_KERNEL , you almost certainly need to use
* ALT_SMP ( W ( instr ) . . . )
*/
2010-09-04 10:47:48 +01:00
# define ALT_UP(instr...) \
. pushsection " .alt.smp.init " , " a " ; \
. long 9998 b ; \
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
9997 : instr ; \
2015-04-09 12:59:35 +01:00
. if . - 9997 b = = 2 ; \
nop ; \
. endif ; \
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
. if . - 9997 b ! = 4 ; \
. error " ALT_UP() content must assemble to exactly 4 bytes " ; \
. endif ; \
2010-09-04 10:47:48 +01:00
. popsection
# define ALT_UP_B(label) \
. equ up_b_offset , label - 9998 b ; \
. pushsection " .alt.smp.init " , " a " ; \
. long 9998 b ; \
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
W ( b ) . + up_b_offset ; \
2010-09-04 10:47:48 +01:00
. popsection
# else
# define ALT_SMP(instr...)
# define ALT_UP(instr...) instr
# define ALT_UP_B(label) b label
# endif
2011-11-22 17:30:28 +00:00
/*
* Instruction barrier
*/
. macro instr_sync
# if __LINUX_ARM_ARCH__ >= 7
isb
# elif __LINUX_ARM_ARCH__ == 6
mcr p15 , 0 , r0 , c7 , c5 , 4
# endif
. endm
2009-05-25 20:58:00 +01:00
/*
* SMP data memory barrier
*/
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
. macro smp_dmb mode
2009-05-25 20:58:00 +01:00
# ifdef CONFIG_SMP
# if __LINUX_ARM_ARCH__ >= 7
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
. ifeqs " \ mode " , " arm "
2013-05-10 18:07:19 +01:00
ALT_SMP ( dmb ish )
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
. else
2013-05-10 18:07:19 +01:00
ALT_SMP ( W ( dmb ) ish )
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
. endif
2009-05-25 20:58:00 +01:00
# elif __LINUX_ARM_ARCH__ == 6
2010-09-04 10:47:48 +01:00
ALT_SMP ( mcr p15 , 0 , r0 , c7 , c10 , 5 ) @ dmb
# else
# error Incompatible SMP platform
2009-05-25 20:58:00 +01:00
# endif
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
. ifeqs " \ mode " , " arm "
2010-09-04 10:47:48 +01:00
ALT_UP ( nop )
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 15:39:23 +01:00
. else
ALT_UP ( W ( nop ) )
. endif
2009-05-25 20:58:00 +01:00
# endif
. endm
2009-07-24 12:32:54 +01:00
2010-05-21 18:06:41 +01:00
# if defined(CONFIG_CPU_V7M)
/*
* setmode is used to assert to be in svc mode during boot . For v7 - M
* this is done in __v7m_setup , so setmode can be empty here .
*/
. macro setmode , mode , reg
. endm
# elif defined(CONFIG_THUMB2_KERNEL)
2009-07-24 12:32:54 +01:00
. macro setmode , mode , reg
mov \ reg , # \ mode
msr cpsr_c , \ reg
. endm
# else
. macro setmode , mode , reg
msr cpsr_c , # \ mode
. endm
# endif
2009-07-24 12:32:57 +01:00
2012-02-09 08:47:17 -08:00
/*
* Helper macro to enter SVC mode cleanly and mask interrupts . reg is
* a scratch register for the macro to overwrite .
*
* This macro is intended for forcing the CPU into SVC mode at boot time .
* you cannot return to the original mode .
*/
. macro safe_svcmode_maskall reg : req
2014-05-08 17:31:40 +01:00
# if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
2012-02-09 08:47:17 -08:00
mrs \ reg , cpsr
2012-12-03 15:39:43 +00:00
eor \ reg , \ reg , # HYP_MODE
tst \ reg , # MODE_MASK
2012-02-09 08:47:17 -08:00
bic \ reg , \ reg , # MODE_MASK
2012-12-03 15:39:43 +00:00
orr \ reg , \ reg , # PSR_I_BIT | PSR_F_BIT | SVC_MODE
2012-02-09 08:47:17 -08:00
THUMB ( orr \ reg , \ reg , # PSR_T_BIT )
bne 1f
2012-10-06 17:03:17 +01:00
orr \ reg , \ reg , # PSR_A_BIT
2015-04-21 14:17:25 +01:00
badr lr , 2f
2012-10-06 17:03:17 +01:00
msr spsr_cxsf , \ reg
2012-02-09 08:47:17 -08:00
__MSR_ELR_HYP ( 14 )
__ERET
2012-10-06 17:03:17 +01:00
1 : msr cpsr_c , \ reg
2012-02-09 08:47:17 -08:00
2 :
2012-12-10 18:35:22 +01:00
# else
/*
* workaround for possibly broken pre - v6 hardware
* ( akita , Sharp Zaurus C - 1000 , PXA270 - based )
*/
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE , \ reg
# endif
2012-02-09 08:47:17 -08:00
. endm
2009-07-24 12:32:57 +01:00
/*
* STRT / LDRT access macros with ARM and Thumb - 2 variants
*/
# ifdef CONFIG_THUMB2_KERNEL
2012-01-25 11:38:13 +01:00
. macro usraccoff , instr , reg , ptr , inc , off , cond , abort , t = TUSER ( )
2009-07-24 12:32:57 +01:00
9999 :
. if \ inc = = 1
2010-09-13 16:03:21 +01:00
\ instr \ cond \ ( ) b \ ( ) \ t \ ( ) . w \ reg , [ \ ptr , # \ off ]
2009-07-24 12:32:57 +01:00
. elseif \ inc = = 4
2010-09-13 16:03:21 +01:00
\ instr \ cond \ ( ) \ t \ ( ) . w \ reg , [ \ ptr , # \ off ]
2009-07-24 12:32:57 +01:00
. else
. error " Unsupported inc macro argument "
. endif
2010-04-19 10:15:03 +01:00
. pushsection __ex_table , " a "
2009-07-24 12:32:57 +01:00
. align 3
. long 9999 b , \ abort
2010-04-19 10:15:03 +01:00
. popsection
2009-07-24 12:32:57 +01:00
. endm
. macro usracc , instr , reg , ptr , inc , cond , rept , abort
@ explicit IT instruction needed because of the label
@ introduced by the USER macro
. ifnc \ cond , al
. if \ rept = = 1
itt \ cond
. elseif \ rept = = 2
ittt \ cond
. else
. error " Unsupported rept macro argument "
. endif
. endif
@ Slightly optimised to avoid incrementing the pointer twice
usraccoff \ instr , \ reg , \ ptr , \ inc , 0 , \ cond , \ abort
. if \ rept = = 2
2010-11-19 13:18:31 +01:00
usraccoff \ instr , \ reg , \ ptr , \ inc , \ inc , \ cond , \ abort
2009-07-24 12:32:57 +01:00
. endif
add \ cond \ ptr , # \ rept * \ inc
. endm
# else /* !CONFIG_THUMB2_KERNEL */
2012-01-25 11:38:13 +01:00
. macro usracc , instr , reg , ptr , inc , cond , rept , abort , t = TUSER ( )
2009-07-24 12:32:57 +01:00
. rept \ rept
9999 :
. if \ inc = = 1
2010-09-13 16:03:21 +01:00
\ instr \ cond \ ( ) b \ ( ) \ t \ reg , [ \ ptr ] , # \ inc
2009-07-24 12:32:57 +01:00
. elseif \ inc = = 4
2010-09-13 16:03:21 +01:00
\ instr \ cond \ ( ) \ t \ reg , [ \ ptr ] , # \ inc
2009-07-24 12:32:57 +01:00
. else
. error " Unsupported inc macro argument "
. endif
2010-04-19 10:15:03 +01:00
. pushsection __ex_table , " a "
2009-07-24 12:32:57 +01:00
. align 3
. long 9999 b , \ abort
2010-04-19 10:15:03 +01:00
. popsection
2009-07-24 12:32:57 +01:00
. endr
. endm
# endif /* CONFIG_THUMB2_KERNEL */
. macro strusr , reg , ptr , inc , cond = al , rept = 1 , abort = 9001f
usracc str , \ reg , \ ptr , \ inc , \ cond , \ rept , \ abort
. endm
. macro ldrusr , reg , ptr , inc , cond = al , rept = 1 , abort = 9001f
usracc ldr , \ reg , \ ptr , \ inc , \ cond , \ rept , \ abort
. endm
2011-06-23 17:10:05 +01:00
/* Utility macro for declaring string literals */
. macro string name : req , string
. type \ name , # object
\ name :
. asciz " \ string "
. size \ name , . - \ name
. endm
2012-09-07 18:22:28 +01:00
. macro check_uaccess , addr : req , size : req , limit : req , tmp : req , bad : req
# ifndef CONFIG_CPU_USE_DOMAINS
adds \ tmp , \ addr , # \ size - 1
sbcccs \ tmp , \ tmp , \ limit
bcs \ bad
# endif
. endm
2015-08-20 10:32:02 +01:00
. macro uaccess_disable , tmp , isb = 1
2015-08-19 20:40:41 +01:00
# ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re - enter userspace , the domains should always be
* set appropriately .
*/
mov \ tmp , # DACR_UACCESS_DISABLE
mcr p15 , 0 , \ tmp , c3 , c0 , 0 @ Set domain register
. if \ isb
instr_sync
. endif
# endif
2015-08-20 10:32:02 +01:00
. endm
. macro uaccess_enable , tmp , isb = 1
2015-08-19 20:40:41 +01:00
# ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re - enter userspace , the domains should always be
* set appropriately .
*/
mov \ tmp , # DACR_UACCESS_ENABLE
mcr p15 , 0 , \ tmp , c3 , c0 , 0
. if \ isb
instr_sync
. endif
# endif
2015-08-20 10:32:02 +01:00
. endm
. macro uaccess_save , tmp
2015-08-19 20:40:41 +01:00
# ifdef CONFIG_CPU_SW_DOMAIN_PAN
mrc p15 , 0 , \ tmp , c3 , c0 , 0
str \ tmp , [ sp , # S_FRAME_SIZE ]
# endif
2015-08-20 10:32:02 +01:00
. endm
. macro uaccess_restore
2015-08-19 20:40:41 +01:00
# ifdef CONFIG_CPU_SW_DOMAIN_PAN
ldr r0 , [ sp , # S_FRAME_SIZE ]
mcr p15 , 0 , r0 , c3 , c0 , 0
# endif
2015-08-20 10:32:02 +01:00
. endm
2014-06-30 16:29:12 +01:00
. irp c , , eq , ne , cs , cc , mi , pl , vs , vc , hi , ls , ge , lt , gt , le , hs , lo
. macro ret \ c , reg
# if __LINUX_ARM_ARCH__ < 6
mov \ c pc , \ reg
# else
. ifeqs " \r eg " , " lr "
bx \ c \ reg
. else
mov \ c pc , \ reg
. endif
# endif
. endm
. endr
. macro ret . w , reg
ret \ reg
# ifdef CONFIG_THUMB2_KERNEL
nop
# endif
. endm
2011-06-13 06:46:44 +01:00
# endif /* __ASM_ASSEMBLER_H__ */