2005-04-17 02:20:36 +04:00
# ifndef __ASM_ARM_SYSTEM_H
# define __ASM_ARM_SYSTEM_H
# ifdef __KERNEL__
2007-03-31 15:03:20 +04:00
# include <asm/memory.h>
2005-04-17 02:20:36 +04:00
# define CPU_ARCH_UNKNOWN 0
# define CPU_ARCH_ARMv3 1
# define CPU_ARCH_ARMv4 2
# define CPU_ARCH_ARMv4T 3
# define CPU_ARCH_ARMv5 4
# define CPU_ARCH_ARMv5T 5
# define CPU_ARCH_ARMv5TE 6
# define CPU_ARCH_ARMv5TEJ 7
# define CPU_ARCH_ARMv6 8
2007-05-09 01:27:46 +04:00
# define CPU_ARCH_ARMv7 9
2005-04-17 02:20:36 +04:00
/*
* CR1 bits ( CP # 15 CR1 )
*/
# define CR_M (1 << 0) /* MMU enable */
# define CR_A (1 << 1) /* Alignment abort enable */
# define CR_C (1 << 2) /* Dcache enable */
# define CR_W (1 << 3) /* Write buffer enable */
# define CR_P (1 << 4) /* 32-bit exception handler */
# define CR_D (1 << 5) /* 32-bit data address range */
# define CR_L (1 << 6) /* Implementation defined */
# define CR_B (1 << 7) /* Big endian */
# define CR_S (1 << 8) /* System MMU protection */
# define CR_R (1 << 9) /* ROM MMU protection */
# define CR_F (1 << 10) /* Implementation defined */
# define CR_Z (1 << 11) /* Implementation defined */
# define CR_I (1 << 12) /* Icache enable */
# define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
# define CR_RR (1 << 14) /* Round Robin cache replacement */
# define CR_L4 (1 << 15) /* LDR pc can set T bit */
# define CR_DT (1 << 16)
# define CR_IT (1 << 18)
# define CR_ST (1 << 19)
# define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
# define CR_U (1 << 22) /* Unaligned access operation */
# define CR_XP (1 << 23) /* Extended page tables */
# define CR_VE (1 << 24) /* Vectored interrupts */
# define CPUID_ID 0
# define CPUID_CACHETYPE 1
# define CPUID_TCM 2
# define CPUID_TLBTYPE 3
/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences . Apparently we can ' t trust
* the compiler from one version to another so a bit of paranoia won ' t hurt .
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch .
* ( for details , see gcc PR 15089 )
*/
# define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
# ifndef __ASSEMBLY__
# include <linux/linkage.h>
2008-01-10 15:33:54 +03:00
# include <linux/stringify.h>
2006-12-18 03:12:47 +03:00
# include <linux/irqflags.h>
2005-04-17 02:20:36 +04:00
2008-05-23 11:31:39 +04:00
# ifdef CONFIG_CPU_CP15
# define read_cpuid(reg) \
( { \
unsigned int __val ; \
asm ( " mrc p15, 0, %0, c0, c0, " __stringify ( reg ) \
: " =r " ( __val ) \
: \
: " cc " ) ; \
__val ; \
} )
# else
extern unsigned int processor_id ;
# define read_cpuid(reg) (processor_id)
# endif
2008-01-10 15:33:54 +03:00
/*
* The CPU ID never changes at run time , so we might as well tell the
* compiler that it ' s constant . Use this function to read the CPU ID
* rather than directly reading processor_id or read_cpuid ( ) directly .
*/
static inline unsigned int read_cpuid_id ( void ) __attribute_const__ ;
static inline unsigned int read_cpuid_id ( void )
{
return read_cpuid ( CPUID_ID ) ;
}
2007-03-02 18:01:36 +03:00
# define __exception __attribute__((section(".exception.text")))
2005-04-17 02:20:36 +04:00
struct thread_info ;
struct task_struct ;
/* information about the system we're running on */
extern unsigned int system_rev ;
extern unsigned int system_serial_low ;
extern unsigned int system_serial_high ;
extern unsigned int mem_fclk_21285 ;
struct pt_regs ;
void die ( const char * msg , struct pt_regs * regs , int err )
__attribute__ ( ( noreturn ) ) ;
2005-06-30 14:06:49 +04:00
struct siginfo ;
2007-05-08 11:27:03 +04:00
void arm_notify_die ( const char * str , struct pt_regs * regs , struct siginfo * info ,
2005-06-30 14:06:49 +04:00
unsigned long err , unsigned long trap ) ;
2005-04-17 02:20:36 +04:00
void hook_fault_code ( int nr , int ( * fn ) ( unsigned long , unsigned int ,
struct pt_regs * ) ,
int sig , const char * name ) ;
# define xchg(ptr,x) \
( ( __typeof__ ( * ( ptr ) ) ) __xchg ( ( unsigned long ) ( x ) , ( ptr ) , sizeof ( * ( ptr ) ) ) )
extern asmlinkage void __backtrace ( void ) ;
2005-04-17 18:50:36 +04:00
extern asmlinkage void c_backtrace ( unsigned long fp , int pmode ) ;
2005-11-16 21:36:49 +03:00
struct mm_struct ;
2005-04-17 18:50:36 +04:00
extern void show_pte ( struct mm_struct * mm , unsigned long addr ) ;
extern void __show_regs ( struct pt_regs * ) ;
2005-04-17 02:20:36 +04:00
extern int cpu_architecture ( void ) ;
2005-06-19 21:39:33 +04:00
extern void cpu_init ( void ) ;
2005-04-17 02:20:36 +04:00
2006-06-19 22:57:12 +04:00
void arm_machine_restart ( char mode ) ;
extern void ( * arm_pm_restart ) ( char str ) ;
2006-03-29 00:00:40 +04:00
/*
* Intel ' s XScale3 core supports some v6 features ( supersections , L2 )
* but advertises itself as v5 as it does not support the v6 ISA . For
* this reason , we need a way to explicitly test for this type of CPU .
*/
# ifndef CONFIG_CPU_XSC3
# define cpu_is_xsc3() 0
# else
static inline int cpu_is_xsc3 ( void )
{
extern unsigned int processor_id ;
if ( ( processor_id & 0xffffe000 ) = = 0x69056000 )
return 1 ;
return 0 ;
}
# endif
2006-06-01 03:14:05 +04:00
# if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
# define cpu_is_xscale() 0
# else
# define cpu_is_xscale() 1
# endif
2007-02-05 16:48:02 +03:00
# define UDBG_UNDEFINED (1 << 0)
# define UDBG_SYSCALL (1 << 1)
# define UDBG_BADABORT (1 << 2)
# define UDBG_SEGV (1 << 3)
# define UDBG_BUS (1 << 4)
extern unsigned int user_debug ;
# if __LINUX_ARM_ARCH__ >= 4
# define vectors_high() (cr_alignment & CR_V)
# else
# define vectors_high() (0)
# endif
2007-05-09 01:53:44 +04:00
# if __LINUX_ARM_ARCH__ >= 7
# define isb() __asm__ __volatile__ ("isb" : : : "memory")
# define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
# define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
# elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
2007-02-05 16:48:02 +03:00
# define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : " r " ( 0 ) : " memory " )
# define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : " r " ( 0 ) : " memory " )
# define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
: : " r " ( 0 ) : " memory " )
# else
# define isb() __asm__ __volatile__ ("" : : : "memory")
# define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : " r " ( 0 ) : " memory " )
# define dmb() __asm__ __volatile__ ("" : : : "memory")
# endif
2007-02-28 14:30:38 +03:00
2007-03-31 15:03:20 +04:00
# ifndef CONFIG_SMP
# define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
# define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
# define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
2007-02-28 14:30:38 +03:00
# else
2007-03-31 15:03:20 +04:00
# define mb() dmb()
# define rmb() dmb()
# define wmb() dmb()
# define smp_mb() dmb()
# define smp_rmb() dmb()
# define smp_wmb() dmb()
# endif
# define read_barrier_depends() do { } while(0)
# define smp_read_barrier_depends() do { } while(0)
2007-02-28 14:30:38 +03:00
# define set_mb(var, value) do { var = value; smp_mb(); } while (0)
2007-02-05 16:48:02 +03:00
# define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
2006-12-18 03:12:47 +03:00
extern unsigned long cr_no_alignment ; /* defined in entry-armv.S */
extern unsigned long cr_alignment ; /* defined in entry-armv.S */
2006-12-08 18:22:20 +03:00
static inline unsigned int get_cr ( void )
{
unsigned int val ;
asm ( " mrc p15, 0, %0, c1, c0, 0 @ get CR " : " =r " ( val ) : : " cc " ) ;
return val ;
}
static inline void set_cr ( unsigned int val )
{
asm volatile ( " mcr p15, 0, %0, c1, c0, 0 @ set CR "
: : " r " ( val ) : " cc " ) ;
2007-02-05 16:48:02 +03:00
isb ( ) ;
2006-12-08 18:22:20 +03:00
}
2006-12-18 03:12:47 +03:00
# ifndef CONFIG_SMP
extern void adjust_cr ( unsigned long mask , unsigned long set ) ;
# endif
2006-12-08 18:22:20 +03:00
# define CPACC_FULL(n) (3 << (n * 2))
# define CPACC_SVC(n) (1 << (n * 2))
# define CPACC_DISABLE(n) (0 << (n * 2))
static inline unsigned int get_copro_access ( void )
{
unsigned int val ;
asm ( " mrc p15, 0, %0, c1, c0, 2 @ get copro access "
: " =r " ( val ) : : " cc " ) ;
return val ;
}
static inline void set_copro_access ( unsigned int val )
{
asm volatile ( " mcr p15, 0, %0, c1, c0, 2 @ set copro access "
: : " r " ( val ) : " cc " ) ;
2007-02-05 16:48:02 +03:00
isb ( ) ;
2006-12-08 18:22:20 +03:00
}
2005-04-17 02:20:36 +04:00
/*
2005-06-26 01:57:23 +04:00
* switch_mm ( ) may do a full cache flush over the context switch ,
* so enable interrupts over the context switch to avoid high
* latency .
2005-04-17 02:20:36 +04:00
*/
2005-06-26 01:57:23 +04:00
# define __ARCH_WANT_INTERRUPTS_ON_CTXSW
2005-04-17 02:20:36 +04:00
/*
* switch_to ( prev , next ) should switch from task ` prev ' to ` next '
* ` prev ' will never be the same as ` next ' . schedule ( ) itself
* contains the memory barrier to tell GCC not to cache ` current ' .
*/
extern struct task_struct * __switch_to ( struct task_struct * , struct thread_info * , struct thread_info * ) ;
# define switch_to(prev,next,last) \
do { \
2006-01-12 12:05:56 +03:00
last = __switch_to ( prev , task_thread_info ( prev ) , task_thread_info ( next ) ) ; \
2005-04-17 02:20:36 +04:00
} while ( 0 )
# if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM , " swp " is terminally broken since it bypasses the
* cache totally . This means that the cache becomes inconsistent , and ,
* since we use normal loads / stores as well , this is really bad .
* Typically , this causes oopsen in filp_close , but could have other ,
* more disasterous effects . There are two work - arounds :
* 1. Disable interrupts and emulate the atomic swap
* 2. Clean the cache , perform atomic swap , flush the cache
*
* We choose ( 1 ) since its the " easiest " to achieve here and is not
* dependent on the processor type .
2005-06-28 22:22:25 +04:00
*
* NOTE that this solution won ' t work on an SMP system , so explcitly
* forbid it here .
2005-04-17 02:20:36 +04:00
*/
# define swp_is_buggy
# endif
static inline unsigned long __xchg ( unsigned long x , volatile void * ptr , int size )
{
extern void __bad_xchg ( volatile void * , int ) ;
unsigned long ret ;
# ifdef swp_is_buggy
unsigned long flags ;
# endif
2005-07-26 22:39:31 +04:00
# if __LINUX_ARM_ARCH__ >= 6
unsigned int tmp ;
# endif
2005-04-17 02:20:36 +04:00
switch ( size ) {
2005-07-26 22:39:31 +04:00
# if __LINUX_ARM_ARCH__ >= 6
case 1 :
asm volatile ( " @ __xchg1 \n "
" 1: ldrexb %0, [%3] \n "
" strexb %1, %2, [%3] \n "
" teq %1, #0 \n "
" bne 1b "
: " =&r " ( ret ) , " =&r " ( tmp )
: " r " ( x ) , " r " ( ptr )
: " memory " , " cc " ) ;
break ;
case 4 :
asm volatile ( " @ __xchg4 \n "
" 1: ldrex %0, [%3] \n "
" strex %1, %2, [%3] \n "
" teq %1, #0 \n "
" bne 1b "
: " =&r " ( ret ) , " =&r " ( tmp )
: " r " ( x ) , " r " ( ptr )
: " memory " , " cc " ) ;
break ;
# elif defined(swp_is_buggy)
# ifdef CONFIG_SMP
# error SMP is not supported on this platform
# endif
case 1 :
2006-09-21 06:35:20 +04:00
raw_local_irq_save ( flags ) ;
2005-07-26 22:39:31 +04:00
ret = * ( volatile unsigned char * ) ptr ;
* ( volatile unsigned char * ) ptr = x ;
2006-09-21 06:35:20 +04:00
raw_local_irq_restore ( flags ) ;
2005-07-26 22:39:31 +04:00
break ;
case 4 :
2006-09-21 06:35:20 +04:00
raw_local_irq_save ( flags ) ;
2005-07-26 22:39:31 +04:00
ret = * ( volatile unsigned long * ) ptr ;
* ( volatile unsigned long * ) ptr = x ;
2006-09-21 06:35:20 +04:00
raw_local_irq_restore ( flags ) ;
2005-07-26 22:39:31 +04:00
break ;
2005-04-17 02:20:36 +04:00
# else
2005-07-26 22:39:31 +04:00
case 1 :
asm volatile ( " @ __xchg1 \n "
" swpb %0, %1, [%2] "
: " =&r " ( ret )
: " r " ( x ) , " r " ( ptr )
: " memory " , " cc " ) ;
break ;
case 4 :
asm volatile ( " @ __xchg4 \n "
" swp %0, %1, [%2] "
: " =&r " ( ret )
: " r " ( x ) , " r " ( ptr )
: " memory " , " cc " ) ;
break ;
2005-04-17 02:20:36 +04:00
# endif
2005-07-26 22:39:31 +04:00
default :
__bad_xchg ( ptr , size ) , ret = 0 ;
break ;
2005-04-17 02:20:36 +04:00
}
return ret ;
}
2006-03-16 02:17:26 +03:00
extern void disable_hlt ( void ) ;
extern void enable_hlt ( void ) ;
2008-02-07 11:16:11 +03:00
# include <asm-generic/cmpxchg-local.h>
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU . Always make
* them available .
*/
# define cmpxchg_local(ptr, o, n) \
( ( __typeof__ ( * ( ptr ) ) ) __cmpxchg_local_generic ( ( ptr ) , ( unsigned long ) ( o ) , \
( unsigned long ) ( n ) , sizeof ( * ( ptr ) ) ) )
# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
# ifndef CONFIG_SMP
# include <asm-generic/cmpxchg.h>
# endif
2005-04-17 02:20:36 +04:00
# endif /* __ASSEMBLY__ */
# define arch_align_stack(x) (x)
# endif /* __KERNEL__ */
# endif