2005-04-17 02:20:36 +04:00
/*
2008-08-02 13:55:55 +04:00
* arch / arm / include / asm / assembler . h
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 1996 - 2000 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This file contains arm architecture specific defines
* for the different processors .
*
* Do not include any C declarations in this file - it is included by
* assembler source .
*/
# ifndef __ASSEMBLY__
# error "Only include this from assembly code"
# endif
# include <asm/ptrace.h>
/*
* Endian independent macros for shifting bytes within registers .
*/
# ifndef __ARMEB__
# define pull lsr
# define push lsl
# define get_byte_0 lsl #0
# define get_byte_1 lsr #8
# define get_byte_2 lsr #16
# define get_byte_3 lsr #24
# define put_byte_0 lsl #0
# define put_byte_1 lsl #8
# define put_byte_2 lsl #16
# define put_byte_3 lsl #24
# else
# define pull lsl
# define push lsr
# define get_byte_0 lsr #24
# define get_byte_1 lsr #16
# define get_byte_2 lsr #8
# define get_byte_3 lsl #0
# define put_byte_0 lsl #24
# define put_byte_1 lsl #16
# define put_byte_2 lsl #8
# define put_byte_3 lsl #0
# endif
/*
* Data preload for architectures that support it
*/
# if __LINUX_ARM_ARCH__ >= 5
# define PLD(code...) code
# else
# define PLD(code...)
# endif
2008-03-31 20:38:31 +04:00
/*
* This can be used to enable code to cacheline align the destination
* pointer when bulk writing to memory . Experiments on StrongARM and
* XScale didn ' t show this a worthwhile thing to do when the cache is not
* set to write - allocate ( this would need further testing on XScale when WA
* is used ) .
*
* On Feroceon there is much to gain however , regardless of cache mode .
*/
# ifdef CONFIG_CPU_FEROCEON
# define CALGN(code...) code
# else
# define CALGN(code...)
# endif
2005-04-17 02:20:36 +04:00
/*
2006-03-23 19:59:37 +03:00
* Enable and disable interrupts
2005-04-17 02:20:36 +04:00
*/
2005-11-09 18:04:22 +03:00
# if __LINUX_ARM_ARCH__ >= 6
2006-03-23 19:59:37 +03:00
. macro disable_irq
2005-11-09 18:04:22 +03:00
cpsid i
2006-03-23 19:59:37 +03:00
. endm
. macro enable_irq
cpsie i
. endm
2005-11-09 18:04:22 +03:00
# else
2006-03-23 19:59:37 +03:00
. macro disable_irq
msr cpsr_c , # PSR_I_BIT | SVC_MODE
. endm
. macro enable_irq
msr cpsr_c , # SVC_MODE
. endm
2005-11-09 18:04:22 +03:00
# endif
2006-03-23 19:59:37 +03:00
/*
* Save the current IRQ state and disable IRQs . Note that this macro
* assumes FIQs are enabled , and that the processor is in SVC mode .
*/
. macro save_and_disable_irqs , oldcpsr
mrs \ oldcpsr , cpsr
disable_irq
2005-04-17 02:20:36 +04:00
. endm
/*
* Restore interrupt state previously stored in a register . We don ' t
* guarantee that this will preserve the flags .
*/
. macro restore_irqs , oldcpsr
msr cpsr_c , \ oldcpsr
. endm
# define USER(x...) \
9999 : x ; \
. section __ex_table , " a " ; \
. align 3 ; \
. long 9999 b , 9001f ; \
. previous
2009-05-25 23:58:00 +04:00
/*
* SMP data memory barrier
*/
. macro smp_dmb
# ifdef CONFIG_SMP
# if __LINUX_ARM_ARCH__ >= 7
dmb
# elif __LINUX_ARM_ARCH__ == 6
mcr p15 , 0 , r0 , c7 , c10 , 5 @ dmb
# endif
# endif
. endm
2009-07-24 15:32:54 +04:00
# ifdef CONFIG_THUMB2_KERNEL
. macro setmode , mode , reg
mov \ reg , # \ mode
msr cpsr_c , \ reg
. endm
# else
. macro setmode , mode , reg
msr cpsr_c , # \ mode
. endm
# endif