2005-04-17 02:20:36 +04:00
# ifndef __ASM_ARM_SYSTEM_H
# define __ASM_ARM_SYSTEM_H
# ifdef __KERNEL__
# include <linux/config.h>
# define CPU_ARCH_UNKNOWN 0
# define CPU_ARCH_ARMv3 1
# define CPU_ARCH_ARMv4 2
# define CPU_ARCH_ARMv4T 3
# define CPU_ARCH_ARMv5 4
# define CPU_ARCH_ARMv5T 5
# define CPU_ARCH_ARMv5TE 6
# define CPU_ARCH_ARMv5TEJ 7
# define CPU_ARCH_ARMv6 8
/*
* CR1 bits ( CP # 15 CR1 )
*/
# define CR_M (1 << 0) /* MMU enable */
# define CR_A (1 << 1) /* Alignment abort enable */
# define CR_C (1 << 2) /* Dcache enable */
# define CR_W (1 << 3) /* Write buffer enable */
# define CR_P (1 << 4) /* 32-bit exception handler */
# define CR_D (1 << 5) /* 32-bit data address range */
# define CR_L (1 << 6) /* Implementation defined */
# define CR_B (1 << 7) /* Big endian */
# define CR_S (1 << 8) /* System MMU protection */
# define CR_R (1 << 9) /* ROM MMU protection */
# define CR_F (1 << 10) /* Implementation defined */
# define CR_Z (1 << 11) /* Implementation defined */
# define CR_I (1 << 12) /* Icache enable */
# define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
# define CR_RR (1 << 14) /* Round Robin cache replacement */
# define CR_L4 (1 << 15) /* LDR pc can set T bit */
# define CR_DT (1 << 16)
# define CR_IT (1 << 18)
# define CR_ST (1 << 19)
# define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
# define CR_U (1 << 22) /* Unaligned access operation */
# define CR_XP (1 << 23) /* Extended page tables */
# define CR_VE (1 << 24) /* Vectored interrupts */
# define CPUID_ID 0
# define CPUID_CACHETYPE 1
# define CPUID_TCM 2
# define CPUID_TLBTYPE 3
# define read_cpuid(reg) \
( { \
unsigned int __val ; \
asm ( " mrc p15, 0, %0, c0, c0, " __stringify ( reg ) \
: " =r " ( __val ) \
: \
: " cc " ) ; \
__val ; \
} )
/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences . Apparently we can ' t trust
* the compiler from one version to another so a bit of paranoia won ' t hurt .
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch .
* ( for details , see gcc PR 15089 )
*/
# define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
# ifndef __ASSEMBLY__
# include <linux/linkage.h>
struct thread_info ;
struct task_struct ;
/* information about the system we're running on */
extern unsigned int system_rev ;
extern unsigned int system_serial_low ;
extern unsigned int system_serial_high ;
extern unsigned int mem_fclk_21285 ;
struct pt_regs ;
void die ( const char * msg , struct pt_regs * regs , int err )
__attribute__ ( ( noreturn ) ) ;
2005-06-30 14:06:49 +04:00
struct siginfo ;
void notify_die ( const char * str , struct pt_regs * regs , struct siginfo * info ,
unsigned long err , unsigned long trap ) ;
2005-04-17 02:20:36 +04:00
void hook_fault_code ( int nr , int ( * fn ) ( unsigned long , unsigned int ,
struct pt_regs * ) ,
int sig , const char * name ) ;
# include <asm/proc-fns.h>
# define xchg(ptr,x) \
( ( __typeof__ ( * ( ptr ) ) ) __xchg ( ( unsigned long ) ( x ) , ( ptr ) , sizeof ( * ( ptr ) ) ) )
# define tas(ptr) (xchg((ptr),1))
extern asmlinkage void __backtrace ( void ) ;
2005-04-17 18:50:36 +04:00
extern asmlinkage void c_backtrace ( unsigned long fp , int pmode ) ;
extern void show_pte ( struct mm_struct * mm , unsigned long addr ) ;
extern void __show_regs ( struct pt_regs * ) ;
2005-04-17 02:20:36 +04:00
extern int cpu_architecture ( void ) ;
2005-06-19 21:39:33 +04:00
extern void cpu_init ( void ) ;
2005-04-17 02:20:36 +04:00
# define set_cr(x) \
__asm__ __volatile__ ( \
" mcr p15, 0, %0, c1, c0, 0 @ set CR " \
: : " r " ( x ) : " cc " )
# define get_cr() \
( { \
unsigned int __val ; \
__asm__ __volatile__ ( \
" mrc p15, 0, %0, c1, c0, 0 @ get CR " \
: " =r " ( __val ) : : " cc " ) ; \
__val ; \
} )
extern unsigned long cr_no_alignment ; /* defined in entry-armv.S */
extern unsigned long cr_alignment ; /* defined in entry-armv.S */
# define UDBG_UNDEFINED (1 << 0)
# define UDBG_SYSCALL (1 << 1)
# define UDBG_BADABORT (1 << 2)
# define UDBG_SEGV (1 << 3)
# define UDBG_BUS (1 << 4)
extern unsigned int user_debug ;
# if __LINUX_ARM_ARCH__ >= 4
# define vectors_high() (cr_alignment & CR_V)
# else
# define vectors_high() (0)
# endif
# define mb() __asm__ __volatile__ ("" : : : "memory")
# define rmb() mb()
# define wmb() mb()
# define read_barrier_depends() do { } while(0)
# define set_mb(var, value) do { var = value; mb(); } while (0)
# define set_wmb(var, value) do { var = value; wmb(); } while (0)
# define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
/*
2005-06-26 01:57:23 +04:00
* switch_mm ( ) may do a full cache flush over the context switch ,
* so enable interrupts over the context switch to avoid high
* latency .
2005-04-17 02:20:36 +04:00
*/
2005-06-26 01:57:23 +04:00
# define __ARCH_WANT_INTERRUPTS_ON_CTXSW
2005-04-17 02:20:36 +04:00
/*
* switch_to ( prev , next ) should switch from task ` prev ' to ` next '
* ` prev ' will never be the same as ` next ' . schedule ( ) itself
* contains the memory barrier to tell GCC not to cache ` current ' .
*/
extern struct task_struct * __switch_to ( struct task_struct * , struct thread_info * , struct thread_info * ) ;
# define switch_to(prev,next,last) \
do { \
last = __switch_to ( prev , prev - > thread_info , next - > thread_info ) ; \
} while ( 0 )
/*
* CPU interrupt mask handling .
*/
# if __LINUX_ARM_ARCH__ >= 6
# define local_irq_save(x) \
( { \
__asm__ __volatile__ ( \
" mrs %0, cpsr @ local_irq_save \n " \
" cpsid i " \
: " =r " ( x ) : : " memory " , " cc " ) ; \
} )
# define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
# define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
# define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
# define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
# else
/*
* Save the current interrupt enable state & disable IRQs
*/
# define local_irq_save(x) \
( { \
unsigned long temp ; \
( void ) ( & temp = = & x ) ; \
__asm__ __volatile__ ( \
" mrs %0, cpsr @ local_irq_save \n " \
" orr %1, %0, #128 \n " \
" msr cpsr_c, %1 " \
: " =r " ( x ) , " =r " ( temp ) \
: \
: " memory " , " cc " ) ; \
} )
/*
* Enable IRQs
*/
# define local_irq_enable() \
( { \
unsigned long temp ; \
__asm__ __volatile__ ( \
" mrs %0, cpsr @ local_irq_enable \n " \
" bic %0, %0, #128 \n " \
" msr cpsr_c, %0 " \
: " =r " ( temp ) \
: \
: " memory " , " cc " ) ; \
} )
/*
* Disable IRQs
*/
# define local_irq_disable() \
( { \
unsigned long temp ; \
__asm__ __volatile__ ( \
" mrs %0, cpsr @ local_irq_disable \n " \
" orr %0, %0, #128 \n " \
" msr cpsr_c, %0 " \
: " =r " ( temp ) \
: \
: " memory " , " cc " ) ; \
} )
/*
* Enable FIQs
*/
# define local_fiq_enable() \
( { \
unsigned long temp ; \
__asm__ __volatile__ ( \
" mrs %0, cpsr @ stf \n " \
" bic %0, %0, #64 \n " \
" msr cpsr_c, %0 " \
: " =r " ( temp ) \
: \
: " memory " , " cc " ) ; \
} )
/*
* Disable FIQs
*/
# define local_fiq_disable() \
( { \
unsigned long temp ; \
__asm__ __volatile__ ( \
" mrs %0, cpsr @ clf \n " \
" orr %0, %0, #64 \n " \
" msr cpsr_c, %0 " \
: " =r " ( temp ) \
: \
: " memory " , " cc " ) ; \
} )
# endif
/*
* Save the current interrupt enable state .
*/
# define local_save_flags(x) \
( { \
__asm__ __volatile__ ( \
" mrs %0, cpsr @ local_save_flags " \
: " =r " ( x ) : : " memory " , " cc " ) ; \
} )
/*
* restore saved IRQ & FIQ state
*/
# define local_irq_restore(x) \
__asm__ __volatile__ ( \
" msr cpsr_c, %0 @ local_irq_restore \n " \
: \
: " r " ( x ) \
: " memory " , " cc " )
# define irqs_disabled() \
( { \
unsigned long flags ; \
local_save_flags ( flags ) ; \
2005-06-22 04:14:28 +04:00
( int ) ( flags & PSR_I_BIT ) ; \
2005-04-17 02:20:36 +04:00
} )
# ifdef CONFIG_SMP
# define smp_mb() mb()
# define smp_rmb() rmb()
# define smp_wmb() wmb()
# define smp_read_barrier_depends() read_barrier_depends()
# else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0)
2005-06-28 22:22:25 +04:00
# endif /* CONFIG_SMP */
2005-04-17 02:20:36 +04:00
# if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM , " swp " is terminally broken since it bypasses the
* cache totally . This means that the cache becomes inconsistent , and ,
* since we use normal loads / stores as well , this is really bad .
* Typically , this causes oopsen in filp_close , but could have other ,
* more disasterous effects . There are two work - arounds :
* 1. Disable interrupts and emulate the atomic swap
* 2. Clean the cache , perform atomic swap , flush the cache
*
* We choose ( 1 ) since its the " easiest " to achieve here and is not
* dependent on the processor type .
2005-06-28 22:22:25 +04:00
*
* NOTE that this solution won ' t work on an SMP system , so explcitly
* forbid it here .
2005-04-17 02:20:36 +04:00
*/
2005-06-28 22:22:25 +04:00
# ifdef CONFIG_SMP
# error SMP is not supported on SA1100 / SA110
# else
2005-04-17 02:20:36 +04:00
# define swp_is_buggy
# endif
2005-06-28 22:22:25 +04:00
# endif
2005-04-17 02:20:36 +04:00
static inline unsigned long __xchg ( unsigned long x , volatile void * ptr , int size )
{
extern void __bad_xchg ( volatile void * , int ) ;
unsigned long ret ;
# ifdef swp_is_buggy
unsigned long flags ;
# endif
switch ( size ) {
# ifdef swp_is_buggy
case 1 :
local_irq_save ( flags ) ;
ret = * ( volatile unsigned char * ) ptr ;
* ( volatile unsigned char * ) ptr = x ;
local_irq_restore ( flags ) ;
break ;
case 4 :
local_irq_save ( flags ) ;
ret = * ( volatile unsigned long * ) ptr ;
* ( volatile unsigned long * ) ptr = x ;
local_irq_restore ( flags ) ;
break ;
# else
case 1 : __asm__ __volatile__ ( " swpb %0, %1, [%2] "
: " =&r " ( ret )
: " r " ( x ) , " r " ( ptr )
: " memory " , " cc " ) ;
break ;
case 4 : __asm__ __volatile__ ( " swp %0, %1, [%2] "
: " =&r " ( ret )
: " r " ( x ) , " r " ( ptr )
: " memory " , " cc " ) ;
break ;
# endif
default : __bad_xchg ( ptr , size ) , ret = 0 ;
}
return ret ;
}
# endif /* __ASSEMBLY__ */
# define arch_align_stack(x) (x)
# endif /* __KERNEL__ */
# endif