2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1994 , 95 , 96 , 97 , 98 , 99 , 2003 by Ralf Baechle
* Copyright ( C ) 1996 by Paul M . Antoine
* Copyright ( C ) 1999 Silicon Graphics
* Copyright ( C ) 2000 MIPS Technologies , Inc .
*/
2006-07-07 17:07:18 +04:00
# ifndef _ASM_IRQFLAGS_H
# define _ASM_IRQFLAGS_H
# ifndef __ASSEMBLY__
2005-04-17 02:20:36 +04:00
2007-03-30 02:42:42 +04:00
# include <linux/compiler.h>
2005-04-17 02:20:36 +04:00
# include <asm/hazards.h>
__asm__ (
2006-07-07 17:07:18 +04:00
" .macro raw_local_irq_enable \n "
2005-07-12 18:54:31 +04:00
" .set push \n "
" .set reorder \n "
" .set noat \n "
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n "
" ori $1, 0x400 \n "
" xori $1, 0x400 \n "
" mtc0 $1, $2, 1 \n "
# elif defined(CONFIG_CPU_MIPSR2)
2005-07-12 18:54:31 +04:00
" ei \n "
# else
" mfc0 $1,$12 \n "
" ori $1,0x1f \n "
" xori $1,0x1e \n "
" mtc0 $1,$12 \n "
# endif
" irq_enable_hazard \n "
" .set pop \n "
" .endm " ) ;
2005-04-17 02:20:36 +04:00
2006-07-07 17:07:18 +04:00
static inline void raw_local_irq_enable ( void )
2005-04-17 02:20:36 +04:00
{
__asm__ __volatile__ (
2006-07-07 17:07:18 +04:00
" raw_local_irq_enable "
2005-04-17 02:20:36 +04:00
: /* no outputs */
: /* no inputs */
: " memory " ) ;
}
/*
* For cli ( ) we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
* macro .
* R4000 / R4400 need three nops , the R4600 two nops and the R10000 needs
* no nops at all .
*/
2006-02-02 19:34:01 +03:00
/*
* For TX49 , operating only IE bit is not enough .
*
* If mfc0 $ 12 follows store and the mfc0 is last instruction of a
* page and fetching the next instruction causes TLB miss , the result
* of the mfc0 might wrongly contain EXL bit .
*
* ERT - TX49H2 - 027 , ERT - TX49H3 - 012 , ERT - TX49HL3 - 006 , ERT - TX49H4 - 00 8
*
* Workaround : mask EXL bit of the result or place a nop before mfc0 .
*/
2005-04-17 02:20:36 +04:00
__asm__ (
2006-07-07 17:07:18 +04:00
" .macro raw_local_irq_disable \n "
2005-07-12 18:54:31 +04:00
" .set push \n "
" .set noat \n "
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n "
" ori $1, 0x400 \n "
" .set noreorder \n "
" mtc0 $1, $2, 1 \n "
# elif defined(CONFIG_CPU_MIPSR2)
2005-07-12 18:54:31 +04:00
" di \n "
# else
" mfc0 $1,$12 \n "
2006-02-02 19:34:01 +03:00
" ori $1,0x1f \n "
" xori $1,0x1f \n "
2005-07-12 18:54:31 +04:00
" .set noreorder \n "
" mtc0 $1,$12 \n "
# endif
" irq_disable_hazard \n "
" .set pop \n "
" .endm \n " ) ;
2005-04-17 02:20:36 +04:00
2006-07-07 17:07:18 +04:00
static inline void raw_local_irq_disable ( void )
2005-04-17 02:20:36 +04:00
{
__asm__ __volatile__ (
2006-07-07 17:07:18 +04:00
" raw_local_irq_disable "
2005-04-17 02:20:36 +04:00
: /* no outputs */
: /* no inputs */
: " memory " ) ;
}
__asm__ (
2006-07-07 17:07:18 +04:00
" .macro raw_local_save_flags flags \n "
2005-07-12 18:54:31 +04:00
" .set push \n "
" .set reorder \n "
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\ flags, $2, 1 \n "
# else
2005-07-12 18:54:31 +04:00
" mfc0 \\ flags, $12 \n "
2006-04-05 12:45:45 +04:00
# endif
2005-07-12 18:54:31 +04:00
" .set pop \n "
" .endm \n " ) ;
2005-04-17 02:20:36 +04:00
2006-07-07 17:07:18 +04:00
# define raw_local_save_flags(x) \
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ ( \
2006-07-07 17:07:18 +04:00
" raw_local_save_flags %0 " \
2005-04-17 02:20:36 +04:00
: " =r " ( x ) )
__asm__ (
2006-07-07 17:07:18 +04:00
" .macro raw_local_irq_save result \n "
2005-07-12 18:54:31 +04:00
" .set push \n "
" .set reorder \n "
" .set noat \n "
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\ result, $2, 1 \n "
" ori $1, \\ result, 0x400 \n "
" .set noreorder \n "
" mtc0 $1, $2, 1 \n "
" andi \\ result, \\ result, 0x400 \n "
# elif defined(CONFIG_CPU_MIPSR2)
2005-07-12 18:54:31 +04:00
" di \\ result \n "
2005-12-20 08:32:19 +03:00
" andi \\ result, 1 \n "
2005-07-12 18:54:31 +04:00
# else
" mfc0 \\ result, $12 \n "
2006-02-02 19:34:01 +03:00
" ori $1, \\ result, 0x1f \n "
" xori $1, 0x1f \n "
2005-07-12 18:54:31 +04:00
" .set noreorder \n "
" mtc0 $1, $12 \n "
# endif
" irq_disable_hazard \n "
" .set pop \n "
" .endm \n " ) ;
2005-04-17 02:20:36 +04:00
2006-07-07 17:07:18 +04:00
# define raw_local_irq_save(x) \
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ ( \
2006-07-07 17:07:18 +04:00
" raw_local_irq_save \t %0 " \
2005-04-17 02:20:36 +04:00
: " =r " ( x ) \
: /* no inputs */ \
: " memory " )
__asm__ (
2006-07-07 17:07:18 +04:00
" .macro raw_local_irq_restore flags \n "
2006-01-30 19:48:26 +03:00
" .set push \n "
2005-07-12 18:54:31 +04:00
" .set noreorder \n "
" .set noat \n "
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n "
" andi \\ flags, 0x400 \n "
" ori $1, 0x400 \n "
" xori $1, 0x400 \n "
" or \\ flags, $1 \n "
" mtc0 \\ flags, $2, 1 \n "
# elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
2005-07-12 18:54:31 +04:00
/*
* Slow , but doesn ' t suffer from a relativly unlikely race
* condition we ' re having since days 1.
*/
" beqz \\ flags, 1f \n "
" di \n "
" ei \n "
" 1: \n "
2005-10-07 19:58:15 +04:00
# elif defined(CONFIG_CPU_MIPSR2)
2005-07-12 18:54:31 +04:00
/*
* Fast , dangerous . Life is fun , life is good .
*/
" mfc0 $1, $12 \n "
" ins $1, \\ flags, 0, 1 \n "
" mtc0 $1, $12 \n "
# else
" mfc0 $1, $12 \n "
" andi \\ flags, 1 \n "
2006-02-02 19:34:01 +03:00
" ori $1, 0x1f \n "
" xori $1, 0x1f \n "
2005-07-12 18:54:31 +04:00
" or \\ flags, $1 \n "
" mtc0 \\ flags, $12 \n "
# endif
" irq_disable_hazard \n "
2006-01-30 19:48:26 +03:00
" .set pop \n "
2005-07-12 18:54:31 +04:00
" .endm \n " ) ;
2005-04-17 02:20:36 +04:00
2007-03-30 02:42:42 +04:00
extern void smtc_ipi_replay ( void ) ;
static inline void raw_local_irq_restore ( unsigned long flags )
{
unsigned long __tmp1 ;
# ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
/*
* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred
* IPIs , at the cost of branch and call overhead on each
* local_irq_restore ( )
*/
if ( unlikely ( ! ( flags & 0x0400 ) ) )
smtc_ipi_replay ( ) ;
# endif
__asm__ __volatile__ (
" raw_local_irq_restore \t %0 "
: " =r " ( __tmp1 )
: " 0 " ( flags )
: " memory " ) ;
}
2005-04-17 02:20:36 +04:00
2006-07-07 17:07:18 +04:00
static inline int raw_irqs_disabled_flags ( unsigned long flags )
2006-04-05 12:45:45 +04:00
{
# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC model uses TCStatus . IXMT to disable interrupts for a thread / CPU
*/
2006-07-07 17:07:18 +04:00
return flags & 0x400 ;
2006-04-05 12:45:45 +04:00
# else
return ! ( flags & 1 ) ;
# endif
}
2005-04-17 02:20:36 +04:00
2006-07-07 17:07:18 +04:00
# endif
/*
* Do the CPU ' s IRQ - state tracing from assembly code .
*/
# ifdef CONFIG_TRACE_IRQFLAGS
2006-09-26 18:43:40 +04:00
/* Reload some registers clobbered by trace_hardirqs_on */
# ifdef CONFIG_64BIT
# define TRACE_IRQS_RELOAD_REGS \
LONG_L $ 11 , PT_R11 ( sp ) ; \
LONG_L $ 10 , PT_R10 ( sp ) ; \
LONG_L $ 9 , PT_R9 ( sp ) ; \
LONG_L $ 8 , PT_R8 ( sp ) ; \
LONG_L $ 7 , PT_R7 ( sp ) ; \
LONG_L $ 6 , PT_R6 ( sp ) ; \
LONG_L $ 5 , PT_R5 ( sp ) ; \
LONG_L $ 4 , PT_R4 ( sp ) ; \
LONG_L $ 2 , PT_R2 ( sp )
# else
# define TRACE_IRQS_RELOAD_REGS \
LONG_L $ 7 , PT_R7 ( sp ) ; \
LONG_L $ 6 , PT_R6 ( sp ) ; \
LONG_L $ 5 , PT_R5 ( sp ) ; \
LONG_L $ 4 , PT_R4 ( sp ) ; \
LONG_L $ 2 , PT_R2 ( sp )
# endif
2006-07-07 17:07:18 +04:00
# define TRACE_IRQS_ON \
2006-09-26 18:43:40 +04:00
CLI ; /* make sure trace_hardirqs_on() is called in kernel level */ \
2006-07-07 17:07:18 +04:00
jal trace_hardirqs_on
2006-09-26 18:43:40 +04:00
# define TRACE_IRQS_ON_RELOAD \
TRACE_IRQS_ON ; \
TRACE_IRQS_RELOAD_REGS
2006-07-07 17:07:18 +04:00
# define TRACE_IRQS_OFF \
jal trace_hardirqs_off
# else
# define TRACE_IRQS_ON
2006-09-26 18:43:40 +04:00
# define TRACE_IRQS_ON_RELOAD
2006-07-07 17:07:18 +04:00
# define TRACE_IRQS_OFF
# endif
# endif /* _ASM_IRQFLAGS_H */