2005-06-24 09:01:26 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
2013-01-05 04:57:17 +04:00
* Copyright ( C ) 2001 - 2008 Tensilica Inc .
2015-07-16 10:37:31 +03:00
* Copyright ( C ) 2015 Cadence Design Systems Inc .
2005-06-24 09:01:26 +04:00
*/
# ifndef _XTENSA_PROCESSOR_H
# define _XTENSA_PROCESSOR_H
2008-11-06 17:40:46 +03:00
# include <variant/core.h>
2009-03-04 18:21:31 +03:00
# include <platform/hardware.h>
2005-06-24 09:01:26 +04:00
2006-07-08 20:10:29 +04:00
# include <linux/compiler.h>
2005-06-24 09:01:26 +04:00
# include <asm/ptrace.h>
# include <asm/types.h>
2006-12-10 13:18:48 +03:00
# include <asm/regs.h>
2005-06-24 09:01:26 +04:00
/* Assertions. */
# if (XCHAL_HAVE_WINDOWED != 1)
2006-12-10 13:18:48 +03:00
# error Linux requires the Xtensa Windowed Registers Option.
2005-06-24 09:01:26 +04:00
# endif
2009-03-04 18:21:30 +03:00
# define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH
2005-06-24 09:01:26 +04:00
/*
* User space process size : 1 GB .
* Windowed call ABI requires caller and callee to be located within the same
* 1 GB region . The C compiler places trampoline code on the stack for sources
* that take the address of a nested C function ( a feature used by glibc ) , so
* the 1 GB requirement applies to the stack as well .
*/
2009-03-04 18:21:31 +03:00
# ifdef CONFIG_MMU
2007-08-05 22:24:13 +04:00
# define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
2009-03-04 18:21:31 +03:00
# else
2016-07-23 02:47:58 +03:00
# define TASK_SIZE __XTENSA_UL_CONST(0xffffffff)
2009-03-04 18:21:31 +03:00
# endif
2008-02-08 15:19:26 +03:00
# define STACK_TOP TASK_SIZE
# define STACK_TOP_MAX STACK_TOP
2005-06-24 09:01:26 +04:00
2015-07-16 10:37:31 +03:00
/*
* General exception cause assigned to fake NMI . Fake NMI needs to be handled
* differently from other interrupts , but it uses common kernel entry / exit
* code .
*/
# define EXCCAUSE_MAPPED_NMI 62
2005-06-24 09:01:26 +04:00
/*
* General exception cause assigned to debug exceptions . Debug exceptions go
* to their own vector , rather than the general exception vectors ( user ,
* kernel , double ) ; and their specific causes are reported via DEBUGCAUSE
* rather than EXCCAUSE . However it is sometimes convenient to redirect debug
* exceptions to the general exception mechanism . To do this , an otherwise
* unused EXCCAUSE value was assigned to debug exceptions for this purpose .
*/
# define EXCCAUSE_MAPPED_DEBUG 63
/*
* We use DEPC also as a flag to distinguish between double and regular
* exceptions . For performance reasons , DEPC might contain the value of
* EXCCAUSE for regular exceptions , so we use this definition to mark a
* valid double exception address .
* ( Note : We use it in bgeui , so it should be 64 , 128 , or 256 )
*/
# define VALID_DOUBLE_EXCEPTION_ADDRESS 64
2015-07-16 10:37:31 +03:00
# define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
# define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
# define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
# define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
2015-11-27 16:26:41 +03:00
# define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
# define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)
2015-07-16 10:37:31 +03:00
# define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
2005-06-24 09:01:26 +04:00
/* LOCKLEVEL defines the interrupt level that masks all
* general - purpose interrupts .
*/
2015-11-27 16:26:41 +03:00
# if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
# define LOCKLEVEL (PROFILING_INTLEVEL - 1)
2015-07-16 10:37:31 +03:00
# else
2013-01-05 04:57:17 +04:00
# define LOCKLEVEL XCHAL_EXCM_LEVEL
2015-07-16 10:37:31 +03:00
# endif
2015-11-27 16:26:41 +03:00
2015-07-16 10:37:31 +03:00
# define TOPLEVEL XCHAL_EXCM_LEVEL
# define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
2005-06-24 09:01:26 +04:00
/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
* registers
*/
# define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */
# define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */
# ifndef __ASSEMBLY__
/* Build a valid return address for the specified call winsize.
* winsize must be 1 ( call4 ) , 2 ( call8 ) , or 3 ( call12 )
*/
# define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
/* Convert return address to a valid pc
* Note : We assume that the stack pointer is in the same 1 GB ranges as the ra
*/
# define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
typedef struct {
2012-11-29 04:53:51 +04:00
unsigned long seg ;
2005-06-24 09:01:26 +04:00
} mm_segment_t ;
struct thread_struct {
/* kernel's return address and stack pointer for context switching */
unsigned long ra ; /* kernel's a0: return address and window call size */
unsigned long sp ; /* kernel's a1: stack pointer */
mm_segment_t current_ds ; /* see uaccess.h for example uses */
/* struct xtensa_cpuinfo info; */
unsigned long bad_vaddr ; /* last user fault */
unsigned long bad_uaddr ; /* last kernel fault accessing user space */
unsigned long error_code ;
2016-01-24 10:32:10 +03:00
# ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event * ptrace_bp [ XCHAL_NUM_IBREAK ] ;
struct perf_event * ptrace_wp [ XCHAL_NUM_DBREAK ] ;
# endif
2005-06-24 09:01:26 +04:00
/* Make structure 16 bytes aligned. */
int align [ 0 ] __attribute__ ( ( aligned ( 16 ) ) ) ;
} ;
/*
* Default implementation of macro that returns current
* instruction pointer ( " program counter " ) .
*/
# define current_text_addr() ({ __label__ _l; _l: &&_l;})
/* This decides where the kernel will search for a free chunk of vm
* space during mmap ' s .
*/
# define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
# define INIT_THREAD \
{ \
ra : 0 , \
sp : sizeof ( init_stack ) + ( long ) & init_stack , \
current_ds : { 0 } , \
/*info: {0}, */ \
bad_vaddr : 0 , \
bad_uaddr : 0 , \
error_code : 0 , \
}
/*
* Do necessary setup to start up a newly executed thread .
* Note : We set - up ps as if we did a call4 to the new pc .
* set_thread_state in signal . c depends on it .
*/
2006-12-10 13:18:48 +03:00
# define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
2012-11-29 04:53:51 +04:00
( 1 < < PS_CALLINC_SHIFT ) | \
( USER_RING < < PS_RING_SHIFT ) | \
( 1 < < PS_UM_BIT ) | \
( 1 < < PS_EXCM_BIT ) )
2005-06-24 09:01:26 +04:00
/* Clearing a0 terminates the backtrace. */
# define start_thread(regs, new_pc, new_sp) \
2012-10-25 11:10:50 +04:00
memset ( regs , 0 , sizeof ( * regs ) ) ; \
2005-06-24 09:01:26 +04:00
regs - > pc = new_pc ; \
regs - > ps = USER_PS_VALUE ; \
regs - > areg [ 1 ] = new_sp ; \
regs - > areg [ 0 ] = 0 ; \
regs - > wmask = 1 ; \
regs - > depc = 0 ; \
regs - > windowbase = 0 ; \
regs - > windowstart = 1 ;
/* Forward declaration */
struct task_struct ;
struct mm_struct ;
/* Free all resources held by a thread. */
# define release_thread(thread) do { } while(0)
/* Copy and release all segment info associated with a VM */
# define copy_segments(p, mm) do { } while(0)
# define release_segments(mm) do { } while(0)
# define forget_segments() do { } while (0)
2006-01-12 12:05:50 +03:00
# define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
2005-06-24 09:01:26 +04:00
extern unsigned long get_wchan ( struct task_struct * p ) ;
2006-01-12 12:05:50 +03:00
# define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
# define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
2005-06-24 09:01:26 +04:00
2006-07-08 20:10:29 +04:00
# define cpu_relax() barrier()
2005-06-24 09:01:26 +04:00
/* Special register access. */
# define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v));
# define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v));
# define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
# define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
2013-12-01 12:04:57 +04:00
# ifndef XCHAL_HAVE_EXTERN_REGS
# define XCHAL_HAVE_EXTERN_REGS 0
# endif
# if XCHAL_HAVE_EXTERN_REGS
static inline void set_er ( unsigned long value , unsigned long addr )
{
asm volatile ( " wer %0, %1 " : : " a " ( value ) , " a " ( addr ) : " memory " ) ;
}
static inline unsigned long get_er ( unsigned long addr )
{
register unsigned long value ;
asm volatile ( " rer %0, %1 " : " =a " ( value ) : " a " ( addr ) : " memory " ) ;
return value ;
}
# endif /* XCHAL_HAVE_EXTERN_REGS */
2005-06-24 09:01:26 +04:00
# endif /* __ASSEMBLY__ */
# endif /* _XTENSA_PROCESSOR_H */