2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1994 , 95 , 96 , 99 , 2001 Ralf Baechle
* Copyright ( C ) 1994 , 1995 , 1996 Paul M . Antoine .
* Copyright ( C ) 1999 Silicon Graphics , Inc .
2007-10-23 15:43:25 +04:00
* Copyright ( C ) 2007 Maciej W . Rozycki
2005-04-17 02:20:36 +04:00
*/
# ifndef _ASM_STACKFRAME_H
# define _ASM_STACKFRAME_H
# include <linux/threads.h>
# include <asm/asm.h>
2006-04-05 12:45:45 +04:00
# include <asm/asmmacro.h>
2005-04-17 02:20:36 +04:00
# include <asm/mipsregs.h>
2005-09-10 00:32:31 +04:00
# include <asm/asm-offsets.h>
2013-08-11 15:40:16 +04:00
# include <asm/thread_info.h>
2005-04-17 02:20:36 +04:00
2014-05-23 18:29:44 +04:00
# if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
2007-05-21 16:47:22 +04:00
# define STATMASK 0x3f
# else
# define STATMASK 0x1f
# endif
2005-04-17 02:20:36 +04:00
. macro SAVE_AT
. set push
. set noat
LONG_S $ 1 , PT_R1 ( sp )
. set pop
. endm
. macro SAVE_TEMP
2007-02-02 19:41:47 +03:00
# ifdef CONFIG_CPU_HAS_SMARTMIPS
mflhxu v1
LONG_S v1 , PT_LO ( sp )
mflhxu v1
LONG_S v1 , PT_HI ( sp )
mflhxu v1
LONG_S v1 , PT_ACX ( sp )
2014-10-27 14:37:47 +03:00
# elif !defined(CONFIG_CPU_MIPSR6)
2005-04-17 02:20:36 +04:00
mfhi v1
2007-02-02 19:41:47 +03:00
# endif
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_32BIT
2005-04-17 02:20:36 +04:00
LONG_S $ 8 , PT_R8 ( sp )
LONG_S $ 9 , PT_R9 ( sp )
# endif
LONG_S $ 10 , PT_R10 ( sp )
LONG_S $ 11 , PT_R11 ( sp )
LONG_S $ 12 , PT_R12 ( sp )
2014-10-27 14:37:47 +03:00
# if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
2009-06-26 20:01:43 +04:00
LONG_S v1 , PT_HI ( sp )
mflo v1
# endif
2005-04-17 02:20:36 +04:00
LONG_S $ 13 , PT_R13 ( sp )
LONG_S $ 14 , PT_R14 ( sp )
LONG_S $ 15 , PT_R15 ( sp )
LONG_S $ 24 , PT_R24 ( sp )
2014-10-27 14:37:47 +03:00
# if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
2009-06-26 20:01:43 +04:00
LONG_S v1 , PT_LO ( sp )
2013-06-22 01:14:53 +04:00
# endif
# ifdef CONFIG_CPU_CAVIUM_OCTEON
/*
* The Octeon multiplier state is affected by general
* multiply instructions . It must be saved before and
* kernel code might corrupt it
*/
jal octeon_mult_save
2009-06-26 20:01:43 +04:00
# endif
2005-04-17 02:20:36 +04:00
. endm
. macro SAVE_STATIC
LONG_S $ 16 , PT_R16 ( sp )
LONG_S $ 17 , PT_R17 ( sp )
LONG_S $ 18 , PT_R18 ( sp )
LONG_S $ 19 , PT_R19 ( sp )
LONG_S $ 20 , PT_R20 ( sp )
LONG_S $ 21 , PT_R21 ( sp )
LONG_S $ 22 , PT_R22 ( sp )
LONG_S $ 23 , PT_R23 ( sp )
LONG_S $ 30 , PT_R30 ( sp )
. endm
# ifdef CONFIG_SMP
2006-10-10 17:46:52 +04:00
. macro get_saved_sp /* SMP variation */
2013-08-11 15:40:16 +04:00
ASM_CPUID_MFC0 k0 , ASM_SMP_CPUID_REG
2007-02-15 16:21:36 +03:00
# if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
lui k1 , % hi ( kernelsp )
# else
2006-10-10 17:46:52 +04:00
lui k1 , % highest ( kernelsp )
daddiu k1 , % higher ( kernelsp )
dsll k1 , 16
daddiu k1 , % hi ( kernelsp )
dsll k1 , 16
# endif
2013-08-11 15:40:16 +04:00
LONG_SRL k0 , SMP_CPUID_PTRSHIFT
2006-10-10 17:46:52 +04:00
LONG_ADDU k1 , k0
2005-02-13 03:32:43 +03:00
LONG_L k1 , % lo ( kernelsp ) ( k1 )
2005-04-17 02:20:36 +04:00
. endm
. macro set_saved_sp stackp temp temp2
2013-08-11 15:40:16 +04:00
ASM_CPUID_MFC0 \ temp , ASM_SMP_CPUID_REG
LONG_SRL \ temp , SMP_CPUID_PTRSHIFT
2005-07-08 12:03:48 +04:00
LONG_S \ stackp , kernelsp ( \ temp )
2005-04-17 02:20:36 +04:00
. endm
2013-08-11 15:40:16 +04:00
# else /* !CONFIG_SMP */
2005-04-17 02:20:36 +04:00
. macro get_saved_sp /* Uniprocessor variation */
2010-04-10 16:07:01 +04:00
# ifdef CONFIG_CPU_JUMP_WORKAROUNDS
2010-03-13 07:34:15 +03:00
/*
* Clear BTB ( branch target buffer ) , forbid RAS ( return address
* stack ) to workaround the Out - of - order Issue in Loongson2F
* via its diagnostic register .
*/
move k0 , ra
jal 1f
nop
1 : jal 1f
nop
1 : jal 1f
nop
1 : jal 1f
nop
1 : move ra , k0
li k0 , 3
mtc0 k0 , $ 22
2013-03-25 21:15:55 +04:00
# endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
2007-02-15 16:21:36 +03:00
# if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
lui k1 , % hi ( kernelsp )
# else
2005-07-08 12:03:48 +04:00
lui k1 , % highest ( kernelsp )
daddiu k1 , % higher ( kernelsp )
dsll k1 , k1 , 16
daddiu k1 , % hi ( kernelsp )
dsll k1 , k1 , 16
# endif
2005-04-17 02:20:36 +04:00
LONG_L k1 , % lo ( kernelsp ) ( k1 )
. endm
. macro set_saved_sp stackp temp temp2
LONG_S \ stackp , kernelsp
. endm
# endif
. macro SAVE_SOME
. set push
. set noat
. set reorder
mfc0 k0 , CP0_STATUS
sll k0 , 3 /* extract cu0 bit */
. set noreorder
bltz k0 , 8f
move k1 , sp
2015-07-31 18:29:38 +03:00
# ifdef CONFIG_EVA
/*
* Flush interAptiv ' s Return Prediction Stack ( RPS ) by writing
* EntryHi . Toggling Config7 . RPS is slower and less portable .
*
* The RPS isn ' t automatically flushed when exceptions are
* taken , which can result in kernel mode speculative accesses
* to user addresses if the RPS mispredicts . That ' s harmless
* when user and kernel share the same address space , but with
* EVA the same user segments may be unmapped to kernel mode ,
* even containing sensitive MMIO regions or invalid memory .
*
* This can happen when the kernel sets the return address to
* ret_from_ * and jr ' s to the exception handler , which looks
* more like a tail call than a function call . If nested calls
* don ' t evict the last user address in the RPS , it will
* mispredict the return and fetch from a user controlled
* address into the icache .
*
* More recent EVA - capable cores with MAAR to restrict
* speculative accesses aren ' t affected .
*/
MFC0 k0 , CP0_ENTRYHI
MTC0 k0 , CP0_ENTRYHI
# endif
2005-04-17 02:20:36 +04:00
. set reorder
/* Called from user mode, new stack. */
get_saved_sp
2007-10-23 15:43:25 +04:00
# ifndef CONFIG_CPU_DADDI_WORKAROUNDS
2005-04-17 02:20:36 +04:00
8 : move k0 , sp
PTR_SUBU sp , k1 , PT_SIZE
2007-10-23 15:43:25 +04:00
# else
. set at = k0
8 : PTR_SUBU k1 , PT_SIZE
. set noat
move k0 , sp
move sp , k1
# endif
2005-04-17 02:20:36 +04:00
LONG_S k0 , PT_R29 ( sp )
LONG_S $ 3 , PT_R3 ( sp )
2006-04-05 12:45:45 +04:00
/*
* You might think that you don ' t need to save $ 0 ,
* but the FPU emulator and gdb remote debug stub
* need it to operate correctly
*/
2005-04-17 02:20:36 +04:00
LONG_S $ 0 , PT_R0 ( sp )
mfc0 v1 , CP0_STATUS
LONG_S $ 2 , PT_R2 ( sp )
2013-03-25 21:15:55 +04:00
LONG_S v1 , PT_STATUS ( sp )
2005-04-17 02:20:36 +04:00
LONG_S $ 4 , PT_R4 ( sp )
2009-06-26 20:01:43 +04:00
mfc0 v1 , CP0_CAUSE
2013-03-25 21:15:55 +04:00
LONG_S $ 5 , PT_R5 ( sp )
2009-06-26 20:01:43 +04:00
LONG_S v1 , PT_CAUSE ( sp )
2013-03-25 21:15:55 +04:00
LONG_S $ 6 , PT_R6 ( sp )
2009-06-26 20:01:43 +04:00
MFC0 v1 , CP0_EPC
2013-03-25 21:15:55 +04:00
LONG_S $ 7 , PT_R7 ( sp )
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
LONG_S $ 8 , PT_R8 ( sp )
LONG_S $ 9 , PT_R9 ( sp )
# endif
2013-03-25 21:15:55 +04:00
LONG_S v1 , PT_EPC ( sp )
2005-04-17 02:20:36 +04:00
LONG_S $ 25 , PT_R25 ( sp )
LONG_S $ 28 , PT_R28 ( sp )
LONG_S $ 31 , PT_R31 ( sp )
ori $ 28 , sp , _THREAD_MASK
xori $ 28 , _THREAD_MASK
2008-12-12 02:33:33 +03:00
# ifdef CONFIG_CPU_CAVIUM_OCTEON
2013-06-22 01:14:53 +04:00
. set mips64
pref 0 , 0 ( $ 28 ) /* Prefetch the current pointer */
2008-12-12 02:33:33 +03:00
# endif
2005-04-17 02:20:36 +04:00
. set pop
. endm
. macro SAVE_ALL
SAVE_SOME
SAVE_AT
SAVE_TEMP
SAVE_STATIC
. endm
. macro RESTORE_AT
. set push
. set noat
LONG_L $ 1 , PT_R1 ( sp )
. set pop
. endm
. macro RESTORE_TEMP
2013-06-22 01:14:53 +04:00
# ifdef CONFIG_CPU_CAVIUM_OCTEON
/* Restore the Octeon multiplier state */
jal octeon_mult_restore
# endif
2007-02-02 19:41:47 +03:00
# ifdef CONFIG_CPU_HAS_SMARTMIPS
LONG_L $ 24 , PT_ACX ( sp )
mtlhx $ 24
LONG_L $ 24 , PT_HI ( sp )
mtlhx $ 24
2005-04-17 02:20:36 +04:00
LONG_L $ 24 , PT_LO ( sp )
2007-02-02 19:41:47 +03:00
mtlhx $ 24
2014-10-27 14:37:47 +03:00
# elif !defined(CONFIG_CPU_MIPSR6)
2007-02-02 19:41:47 +03:00
LONG_L $ 24 , PT_LO ( sp )
mtlo $ 24
LONG_L $ 24 , PT_HI ( sp )
mthi $ 24
# endif
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_32BIT
2005-04-17 02:20:36 +04:00
LONG_L $ 8 , PT_R8 ( sp )
LONG_L $ 9 , PT_R9 ( sp )
# endif
LONG_L $ 10 , PT_R10 ( sp )
LONG_L $ 11 , PT_R11 ( sp )
LONG_L $ 12 , PT_R12 ( sp )
LONG_L $ 13 , PT_R13 ( sp )
LONG_L $ 14 , PT_R14 ( sp )
LONG_L $ 15 , PT_R15 ( sp )
LONG_L $ 24 , PT_R24 ( sp )
. endm
. macro RESTORE_STATIC
LONG_L $ 16 , PT_R16 ( sp )
LONG_L $ 17 , PT_R17 ( sp )
LONG_L $ 18 , PT_R18 ( sp )
LONG_L $ 19 , PT_R19 ( sp )
LONG_L $ 20 , PT_R20 ( sp )
LONG_L $ 21 , PT_R21 ( sp )
LONG_L $ 22 , PT_R22 ( sp )
LONG_L $ 23 , PT_R23 ( sp )
LONG_L $ 30 , PT_R30 ( sp )
. endm
# if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
. macro RESTORE_SOME
. set push
. set reorder
. set noat
mfc0 a0 , CP0_STATUS
li v1 , 0xff00
2007-05-21 16:47:22 +04:00
ori a0 , STATMASK
xori a0 , STATMASK
mtc0 a0 , CP0_STATUS
2005-04-17 02:20:36 +04:00
and a0 , v1
LONG_L v0 , PT_STATUS ( sp )
nor v1 , $ 0 , v1
and v0 , v1
or v0 , a0
mtc0 v0 , CP0_STATUS
LONG_L $ 31 , PT_R31 ( sp )
LONG_L $ 28 , PT_R28 ( sp )
LONG_L $ 25 , PT_R25 ( sp )
LONG_L $ 7 , PT_R7 ( sp )
LONG_L $ 6 , PT_R6 ( sp )
LONG_L $ 5 , PT_R5 ( sp )
LONG_L $ 4 , PT_R4 ( sp )
LONG_L $ 3 , PT_R3 ( sp )
LONG_L $ 2 , PT_R2 ( sp )
. set pop
. endm
. macro RESTORE_SP_AND_RET
. set push
. set noreorder
LONG_L k0 , PT_EPC ( sp )
LONG_L sp , PT_R29 ( sp )
jr k0
rfe
. set pop
. endm
# else
. macro RESTORE_SOME
. set push
. set reorder
. set noat
mfc0 a0 , CP0_STATUS
2006-04-05 12:45:45 +04:00
ori a0 , STATMASK
xori a0 , STATMASK
2005-04-17 02:20:36 +04:00
mtc0 a0 , CP0_STATUS
li v1 , 0xff00
and a0 , v1
LONG_L v0 , PT_STATUS ( sp )
nor v1 , $ 0 , v1
and v0 , v1
or v0 , a0
mtc0 v0 , CP0_STATUS
LONG_L v1 , PT_EPC ( sp )
MTC0 v1 , CP0_EPC
LONG_L $ 31 , PT_R31 ( sp )
LONG_L $ 28 , PT_R28 ( sp )
LONG_L $ 25 , PT_R25 ( sp )
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
LONG_L $ 8 , PT_R8 ( sp )
LONG_L $ 9 , PT_R9 ( sp )
# endif
LONG_L $ 7 , PT_R7 ( sp )
LONG_L $ 6 , PT_R6 ( sp )
LONG_L $ 5 , PT_R5 ( sp )
LONG_L $ 4 , PT_R4 ( sp )
LONG_L $ 3 , PT_R3 ( sp )
LONG_L $ 2 , PT_R2 ( sp )
. set pop
. endm
. macro RESTORE_SP_AND_RET
LONG_L sp , PT_R29 ( sp )
2014-03-30 15:20:10 +04:00
. set arch = r4000
2005-04-17 02:20:36 +04:00
eret
. set mips0
. endm
# endif
. macro RESTORE_SP
LONG_L sp , PT_R29 ( sp )
. endm
. macro RESTORE_ALL
RESTORE_TEMP
RESTORE_STATIC
RESTORE_AT
RESTORE_SOME
RESTORE_SP
. endm
. macro RESTORE_ALL_AND_RET
RESTORE_TEMP
RESTORE_STATIC
RESTORE_AT
RESTORE_SOME
RESTORE_SP_AND_RET
. endm
/*
* Move to kernel mode and disable interrupts .
* Set cp0 enable bit as sign that we ' re running on the kernel stack
*/
. macro CLI
mfc0 t0 , CP0_STATUS
2007-05-21 16:47:22 +04:00
li t1 , ST0_CU0 | STATMASK
2005-04-17 02:20:36 +04:00
or t0 , t1
2007-05-21 16:47:22 +04:00
xori t0 , STATMASK
2005-04-17 02:20:36 +04:00
mtc0 t0 , CP0_STATUS
irq_disable_hazard
. endm
/*
* Move to kernel mode and enable interrupts .
* Set cp0 enable bit as sign that we ' re running on the kernel stack
*/
. macro STI
mfc0 t0 , CP0_STATUS
2007-05-21 16:47:22 +04:00
li t1 , ST0_CU0 | STATMASK
2005-04-17 02:20:36 +04:00
or t0 , t1
2007-05-21 16:47:22 +04:00
xori t0 , STATMASK & ~ 1
2005-04-17 02:20:36 +04:00
mtc0 t0 , CP0_STATUS
irq_enable_hazard
. endm
/*
2007-05-21 16:47:22 +04:00
* Just move to kernel mode and leave interrupts as they are . Note
* for the R3000 this means copying the previous enable from IEp .
2005-04-17 02:20:36 +04:00
* Set cp0 enable bit as sign that we ' re running on the kernel stack
*/
. macro KMODE
mfc0 t0 , CP0_STATUS
2007-05-21 16:47:22 +04:00
li t1 , ST0_CU0 | ( STATMASK & ~ 1 )
# if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
andi t2 , t0 , ST0_IEP
srl t2 , 2
or t0 , t2
# endif
2005-04-17 02:20:36 +04:00
or t0 , t1
2007-05-21 16:47:22 +04:00
xori t0 , STATMASK & ~ 1
2005-04-17 02:20:36 +04:00
mtc0 t0 , CP0_STATUS
irq_disable_hazard
. endm
# endif /* _ASM_STACKFRAME_H */