2005-04-16 15:20:36 -07:00
/ *
* This f i l e i s s u b j e c t t o t h e t e r m s a n d c o n d i t i o n s o f t h e G N U G e n e r a l P u b l i c
* License. S e e t h e f i l e " C O P Y I N G " i n t h e m a i n d i r e c t o r y o f t h i s a r c h i v e
* for m o r e d e t a i l s .
*
* Copyright ( C ) 1 9 9 4 - 2 0 0 0 , 2 0 0 1 , 2 0 0 3 R a l f B a e c h l e
* Copyright ( C ) 1 9 9 9 , 2 0 0 0 S i l i c o n G r a p h i c s , I n c .
2007-10-23 12:43:25 +01:00
* Copyright ( C ) 2 0 0 2 , 2 0 0 7 M a c i e j W . R o z y c k i
2013-03-25 12:15:55 -05:00
* Copyright ( C ) 2 0 0 1 , 2 0 1 2 M I P S T e c h n o l o g i e s , I n c . A l l r i g h t s r e s e r v e d .
2005-04-16 15:20:36 -07:00
* /
# include < l i n u x / i n i t . h >
# include < a s m / a s m . h >
2006-04-05 09:45:45 +01:00
# include < a s m / a s m m a c r o . h >
2005-04-16 15:20:36 -07:00
# include < a s m / c a c h e o p s . h >
2006-07-07 14:07:18 +01:00
# include < a s m / i r q f l a g s . h >
2005-04-16 15:20:36 -07:00
# include < a s m / r e g d e f . h >
# include < a s m / f p r e g d e f . h >
# include < a s m / m i p s r e g s . h >
# include < a s m / s t a c k f r a m e . h >
2019-10-01 21:53:42 +00:00
# include < a s m / s y n c . h >
2005-04-16 15:20:36 -07:00
# include < a s m / w a r . h >
2007-11-12 02:05:18 +09:00
# include < a s m / t h r e a d _ i n f o . h >
2005-04-16 15:20:36 -07:00
_ _ INIT
/ *
* General e x c e p t i o n v e c t o r f o r a l l o t h e r C P U s .
*
* Be c a r e f u l w h e n c h a n g i n g t h i s , i t h a s t o b e a t m o s t 1 2 8 b y t e s
* to f i t i n t o s p a c e r e s e r v e d f o r t h e e x c e p t i o n h a n d l e r .
* /
NESTED( e x c e p t _ v e c3 _ g e n e r i c , 0 , s p )
.set push
.set noat
mfc0 k 1 , C P 0 _ C A U S E
andi k 1 , k 1 , 0 x7 c
2005-09-03 15:56:16 -07:00
# ifdef C O N F I G _ 6 4 B I T
2005-04-16 15:20:36 -07:00
dsll k 1 , k 1 , 1
# endif
PTR_ L k 0 , e x c e p t i o n _ h a n d l e r s ( k 1 )
jr k 0
.set pop
END( e x c e p t _ v e c3 _ g e n e r i c )
/ *
* General e x c e p t i o n h a n d l e r f o r C P U s w i t h v i r t u a l c o h e r e n c y e x c e p t i o n .
*
* Be c a r e f u l w h e n c h a n g i n g t h i s , i t h a s t o b e a t m o s t 2 5 6 ( a s a s p e c i a l
* exception) b y t e s t o f i t i n t o s p a c e r e s e r v e d f o r t h e e x c e p t i o n h a n d l e r .
* /
NESTED( e x c e p t _ v e c3 _ r40 0 0 , 0 , s p )
.set push
2014-03-30 13:20:10 +02:00
.set arch=r4000
2005-04-16 15:20:36 -07:00
.set noat
mfc0 k 1 , C P 0 _ C A U S E
li k 0 , 3 1 < < 2
andi k 1 , k 1 , 0 x7 c
.set push
.set noreorder
.set nomacro
beq k 1 , k 0 , h a n d l e _ v c e d
li k 0 , 1 4 < < 2
beq k 1 , k 0 , h a n d l e _ v c e i
2005-09-03 15:56:16 -07:00
# ifdef C O N F I G _ 6 4 B I T
2004-12-08 10:32:45 +00:00
dsll k 1 , k 1 , 1
2005-04-16 15:20:36 -07:00
# endif
.set pop
PTR_ L k 0 , e x c e p t i o n _ h a n d l e r s ( k 1 )
jr k 0
/ *
* Big s h i t , w e n o w m a y h a v e t w o d i r t y p r i m a r y c a c h e l i n e s f o r t h e s a m e
2004-12-08 10:32:45 +00:00
* physical a d d r e s s . W e c a n s a f e l y i n v a l i d a t e t h e l i n e p o i n t e d t o b y
2005-04-16 15:20:36 -07:00
* c0 _ b a d v a d d r b e c a u s e a f t e r r e t u r n f r o m t h i s e x c e p t i o n h a n d l e r t h e
* load / s t o r e w i l l b e r e - e x e c u t e d .
* /
handle_vced :
2004-12-08 10:32:45 +00:00
MFC0 k 0 , C P 0 _ B A D V A D D R
2005-04-16 15:20:36 -07:00
li k 1 , - 4 # I s t h i s . . .
and k 0 , k 1 # . . . r e a l l y n e e d e d ?
mtc0 z e r o , C P 0 _ T A G L O
2004-12-08 10:32:45 +00:00
cache I n d e x _ S t o r e _ T a g _ D , ( k 0 )
cache H i t _ W r i t e b a c k _ I n v _ S D , ( k 0 )
2005-04-16 15:20:36 -07:00
# ifdef C O N F I G _ P R O C _ F S
PTR_ L A k 0 , v c e d _ c o u n t
lw k 1 , ( k 0 )
addiu k 1 , 1
sw k 1 , ( k 0 )
# endif
eret
handle_vcei :
MFC0 k 0 , C P 0 _ B A D V A D D R
cache H i t _ W r i t e b a c k _ I n v _ S D , ( k 0 ) # a l s o c l e a n s p i
# ifdef C O N F I G _ P R O C _ F S
PTR_ L A k 0 , v c e i _ c o u n t
lw k 1 , ( k 0 )
addiu k 1 , 1
sw k 1 , ( k 0 )
# endif
eret
.set pop
END( e x c e p t _ v e c3 _ r40 0 0 )
2006-04-03 17:56:36 +01:00
_ _ FINIT
2007-11-12 02:05:18 +09:00
.align 5 /* 32 byte rollback region */
2013-05-21 17:33:32 +02:00
LEAF( _ _ r4 k _ w a i t )
2007-11-12 02:05:18 +09:00
.set push
.set noreorder
/* start of rollback region */
LONG_ L t 0 , T I _ F L A G S ( $ 2 8 )
nop
andi t 0 , _ T I F _ N E E D _ R E S C H E D
bnez t 0 , 1 f
nop
nop
nop
2013-03-25 12:15:55 -05:00
# ifdef C O N F I G _ C P U _ M I C R O M I P S
nop
nop
nop
nop
# endif
2014-11-24 13:17:27 +00:00
.set MIPS_ISA_ARCH_LEVEL_RAW
2007-11-12 02:05:18 +09:00
wait
/* end of rollback region (the region size must be power of two) */
1 :
jr r a
2016-04-29 17:29:29 +01:00
nop
2013-03-25 12:15:55 -05:00
.set pop
2013-05-21 17:33:32 +02:00
END( _ _ r4 k _ w a i t )
2007-11-12 02:05:18 +09:00
.macro BUILD_ROLLBACK_PROLOGUE handler
FEXPORT( r o l l b a c k _ \ h a n d l e r )
.set push
.set noat
MFC0 k 0 , C P 0 _ E P C
2013-05-21 17:33:32 +02:00
PTR_ L A k 1 , _ _ r4 k _ w a i t
2007-11-12 02:05:18 +09:00
ori k 0 , 0 x1 f / * 3 2 b y t e r o l l b a c k r e g i o n * /
xori k 0 , 0 x1 f
2016-08-19 18:15:40 +01:00
bne k 0 , k 1 , \ h a n d l e r
2007-11-12 02:05:18 +09:00
MTC0 k 0 , C P 0 _ E P C
.set pop
.endm
2013-01-22 12:59:30 +01:00
.align 5
2007-11-12 02:05:18 +09:00
BUILD_ R O L L B A C K _ P R O L O G U E h a n d l e _ i n t
2006-04-03 17:56:36 +01:00
NESTED( h a n d l e _ i n t , P T _ S I Z E , s p )
2017-08-10 13:27:39 -05:00
.cfi_signal_frame
2007-03-26 14:48:50 +01:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
/ *
* Check t o s e e i f t h e i n t e r r u p t e d c o d e h a s j u s t d i s a b l e d
* interrupts a n d i g n o r e t h i s i n t e r r u p t f o r n o w i f s o .
*
* local_ i r q _ d i s a b l e ( ) d i s a b l e s i n t e r r u p t s a n d t h e n c a l l s
* trace_ h a r d i r q s _ o f f ( ) t o t r a c k t h e s t a t e . I f a n i n t e r r u p t i s t a k e n
* after i n t e r r u p t s a r e d i s a b l e d b u t b e f o r e t h e s t a t e i s u p d a t e d
* it w i l l a p p e a r t o r e s t o r e _ a l l t h a t i t i s i n c o r r e c t l y r e t u r n i n g w i t h
* interrupts d i s a b l e d
* /
.set push
.set noat
mfc0 k 0 , C P 0 _ S T A T U S
# if d e f i n e d ( C O N F I G _ C P U _ R 3 0 0 0 ) | | d e f i n e d ( C O N F I G _ C P U _ T X 3 9 X X )
and k 0 , S T 0 _ I E P
bnez k 0 , 1 f
2007-11-07 01:08:48 +09:00
mfc0 k 0 , C P 0 _ E P C
2007-03-26 14:48:50 +01:00
.set noreorder
j k 0
2016-04-29 17:29:29 +01:00
rfe
2007-03-26 14:48:50 +01:00
# else
and k 0 , S T 0 _ I E
bnez k 0 , 1 f
eret
# endif
1 :
.set pop
# endif
2017-08-10 13:27:39 -05:00
SAVE_ A L L d o c f i =1
2006-04-03 17:56:36 +01:00
CLI
2006-07-07 14:07:18 +01:00
TRACE_ I R Q S _ O F F
2006-04-03 17:56:36 +01:00
2006-10-07 19:44:33 +01:00
LONG_ L s0 , T I _ R E G S ( $ 2 8 )
LONG_ S s p , T I _ R E G S ( $ 2 8 )
2016-12-19 14:20:59 +00:00
/ *
* SAVE_ A L L e n s u r e s w e a r e u s i n g a v a l i d k e r n e l s t a c k f o r t h e t h r e a d .
* Check i f w e a r e a l r e a d y u s i n g t h e I R Q s t a c k .
* /
move s1 , s p # P r e s e r v e t h e s p
/* Get IRQ stack for this CPU */
ASM_ C P U I D _ M F C 0 k 0 , A S M _ S M P _ C P U I D _ R E G
# if d e f i n e d ( C O N F I G _ 3 2 B I T ) | | d e f i n e d ( K B U I L D _ 6 4 B I T _ S Y M 3 2 )
lui k 1 , % h i ( i r q _ s t a c k )
# else
lui k 1 , % h i g h e s t ( i r q _ s t a c k )
daddiu k 1 , % h i g h e r ( i r q _ s t a c k )
dsll k 1 , 1 6
daddiu k 1 , % h i ( i r q _ s t a c k )
dsll k 1 , 1 6
# endif
LONG_ S R L k 0 , S M P _ C P U I D _ P T R S H I F T
LONG_ A D D U k 1 , k 0
LONG_ L t 0 , % l o ( i r q _ s t a c k ) ( k 1 )
# Check i f a l r e a d y o n I R Q s t a c k
PTR_ L I t 1 , ~ ( _ T H R E A D _ S I Z E - 1 )
and t 1 , t 1 , s p
beq t 0 , t 1 , 2 f
/* Switch to IRQ stack */
2017-03-21 14:52:25 +00:00
li t 1 , _ I R Q _ S T A C K _ S T A R T
2016-12-19 14:20:59 +00:00
PTR_ A D D s p , t 0 , t 1
2017-03-21 14:52:25 +00:00
/* Save task's sp on IRQ stack so that unwinding can follow it */
LONG_ S s1 , 0 ( s p )
2016-12-19 14:20:59 +00:00
2 :
jal p l a t _ i r q _ d i s p a t c h
/* Restore sp */
move s p , s1
j r e t _ f r o m _ i r q
2013-03-25 12:15:55 -05:00
# ifdef C O N F I G _ C P U _ M I C R O M I P S
nop
# endif
2006-04-03 17:56:36 +01:00
END( h a n d l e _ i n t )
_ _ INIT
2005-04-16 15:20:36 -07:00
/ *
* Special i n t e r r u p t v e c t o r f o r M I P S 6 4 I S A & e m b e d d e d M I P S p r o c e s s o r s .
* This i s a d e d i c a t e d i n t e r r u p t e x c e p t i o n v e c t o r w h i c h r e d u c e s t h e
* interrupt p r o c e s s i n g o v e r h e a d . T h e j u m p i n s t r u c t i o n w i l l b e r e p l a c e d
* at t h e i n i t i a l i z a t i o n t i m e .
*
* Be c a r e f u l w h e n c h a n g i n g t h i s , i t h a s t o b e a t m o s t 1 2 8 b y t e s
* to f i t i n t o s p a c e r e s e r v e d f o r t h e e x c e p t i o n h a n d l e r .
* /
NESTED( e x c e p t _ v e c4 , 0 , s p )
1 : j 1 b / * D u m m y , w i l l b e r e p l a c e d * /
END( e x c e p t _ v e c4 )
/ *
* EJTAG d e b u g e x c e p t i o n h a n d l e r .
* The E J T A G d e b u g e x c e p t i o n e n t r y p o i n t i s 0 x b f c00 4 8 0 , w h i c h
2013-03-25 12:15:55 -05:00
* normally i s i n t h e b o o t P R O M , s o t h e b o o t P R O M m u s t d o a n
2005-04-16 15:20:36 -07:00
* unconditional j u m p t o t h i s v e c t o r .
* /
NESTED( e x c e p t _ v e c _ e j t a g _ d e b u g , 0 , s p )
j e j t a g _ d e b u g _ h a n d l e r
2013-03-25 12:15:55 -05:00
# ifdef C O N F I G _ C P U _ M I C R O M I P S
nop
# endif
2005-04-16 15:20:36 -07:00
END( e x c e p t _ v e c _ e j t a g _ d e b u g )
_ _ FINIT
2005-07-14 15:57:16 +00:00
/ *
* Vectored i n t e r r u p t h a n d l e r .
* This p r o t o t y p e i s c o p i e d t o e b a s e + n * I n t C t l . V S a n d p a t c h e d
* to i n v o k e t h e h a n d l e r
* /
2007-11-12 02:05:18 +09:00
BUILD_ R O L L B A C K _ P R O L O G U E e x c e p t _ v e c _ v i
2005-07-14 15:57:16 +00:00
NESTED( e x c e p t _ v e c _ v i , 0 , s p )
2017-08-10 13:27:39 -05:00
SAVE_ S O M E d o c f i =1
SAVE_ A T d o c f i =1
2005-07-14 15:57:16 +00:00
.set push
.set noreorder
2013-03-25 12:15:55 -05:00
PTR_ L A v1 , e x c e p t _ v e c _ v i _ h a n d l e r
2007-03-19 15:29:39 +00:00
FEXPORT( e x c e p t _ v e c _ v i _ l u i )
2005-07-14 15:57:16 +00:00
lui v0 , 0 / * P a t c h e d * /
2013-03-25 12:15:55 -05:00
jr v1
2007-03-19 15:29:39 +00:00
FEXPORT( e x c e p t _ v e c _ v i _ o r i )
2005-07-14 15:57:16 +00:00
ori v0 , 0 / * P a t c h e d * /
.set pop
END( e x c e p t _ v e c _ v i )
EXPORT( e x c e p t _ v e c _ v i _ e n d )
/ *
* Common V e c t o r e d I n t e r r u p t c o d e
* Complete t h e r e g i s t e r s a v e s a n d i n v o k e t h e h a n d l e r w h i c h i s p a s s e d i n $ v0
* /
NESTED( e x c e p t _ v e c _ v i _ h a n d l e r , 0 , s p )
SAVE_ T E M P
SAVE_ S T A T I C
CLI
2007-03-17 16:21:28 +00:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
move s0 , v0
2006-07-07 14:07:18 +01:00
TRACE_ I R Q S _ O F F
2007-03-17 16:21:28 +00:00
move v0 , s0
# endif
2006-10-07 19:44:33 +01:00
LONG_ L s0 , T I _ R E G S ( $ 2 8 )
LONG_ S s p , T I _ R E G S ( $ 2 8 )
2016-12-19 14:20:59 +00:00
/ *
* SAVE_ A L L e n s u r e s w e a r e u s i n g a v a l i d k e r n e l s t a c k f o r t h e t h r e a d .
* Check i f w e a r e a l r e a d y u s i n g t h e I R Q s t a c k .
* /
move s1 , s p # P r e s e r v e t h e s p
/* Get IRQ stack for this CPU */
ASM_ C P U I D _ M F C 0 k 0 , A S M _ S M P _ C P U I D _ R E G
# if d e f i n e d ( C O N F I G _ 3 2 B I T ) | | d e f i n e d ( K B U I L D _ 6 4 B I T _ S Y M 3 2 )
lui k 1 , % h i ( i r q _ s t a c k )
# else
lui k 1 , % h i g h e s t ( i r q _ s t a c k )
daddiu k 1 , % h i g h e r ( i r q _ s t a c k )
dsll k 1 , 1 6
daddiu k 1 , % h i ( i r q _ s t a c k )
dsll k 1 , 1 6
# endif
LONG_ S R L k 0 , S M P _ C P U I D _ P T R S H I F T
LONG_ A D D U k 1 , k 0
LONG_ L t 0 , % l o ( i r q _ s t a c k ) ( k 1 )
# Check i f a l r e a d y o n I R Q s t a c k
PTR_ L I t 1 , ~ ( _ T H R E A D _ S I Z E - 1 )
and t 1 , t 1 , s p
beq t 0 , t 1 , 2 f
/* Switch to IRQ stack */
2017-03-21 14:52:25 +00:00
li t 1 , _ I R Q _ S T A C K _ S T A R T
2016-12-19 14:20:59 +00:00
PTR_ A D D s p , t 0 , t 1
2017-03-21 14:52:25 +00:00
/* Save task's sp on IRQ stack so that unwinding can follow it */
LONG_ S s1 , 0 ( s p )
2016-12-19 14:20:59 +00:00
2 :
2017-01-25 17:00:25 +00:00
jalr v0
2016-12-19 14:20:59 +00:00
/* Restore sp */
move s p , s1
j r e t _ f r o m _ i r q
2005-07-14 15:57:16 +00:00
END( e x c e p t _ v e c _ v i _ h a n d l e r )
2005-04-16 15:20:36 -07:00
/ *
* EJTAG d e b u g e x c e p t i o n h a n d l e r .
* /
NESTED( e j t a g _ d e b u g _ h a n d l e r , P T _ S I Z E , s p )
.set push
.set noat
MTC0 k 0 , C P 0 _ D E S A V E
mfc0 k 0 , C P 0 _ D E B U G
sll k 0 , k 0 , 3 0 # C h e c k f o r S D B B P .
bgez k 0 , e j t a g _ r e t u r n
2018-06-11 17:01:10 +08:00
# ifdef C O N F I G _ S M P
1 : PTR_ L A k 0 , e j t a g _ d e b u g _ b u f f e r _ s p i n l o c k
2019-10-01 21:53:42 +00:00
_ _ SYNC( f u l l , l o o n g s o n 3 _ w a r )
2019-10-01 21:53:43 +00:00
2 : ll k 0 , 0 ( k 0 )
bnez k 0 , 2 b
2018-06-11 17:01:10 +08:00
PTR_ L A k 0 , e j t a g _ d e b u g _ b u f f e r _ s p i n l o c k
sc k 0 , 0 ( k 0 )
beqz k 0 , 1 b
# ifdef C O N F I G _ W E A K _ R E O R D E R I N G _ B E Y O N D _ L L S C
sync
# endif
PTR_ L A k 0 , e j t a g _ d e b u g _ b u f f e r
LONG_ S k 1 , 0 ( k 0 )
ASM_ C P U I D _ M F C 0 k 1 , A S M _ S M P _ C P U I D _ R E G
PTR_ S R L k 1 , S M P _ C P U I D _ P T R S H I F T
PTR_ S L L k 1 , L O N G L O G
PTR_ L A k 0 , e j t a g _ d e b u g _ b u f f e r _ p e r _ c p u
PTR_ A D D U k 0 , k 1
PTR_ L A k 1 , e j t a g _ d e b u g _ b u f f e r
LONG_ L k 1 , 0 ( k 1 )
LONG_ S k 1 , 0 ( k 0 )
PTR_ L A k 0 , e j t a g _ d e b u g _ b u f f e r _ s p i n l o c k
sw z e r o , 0 ( k 0 )
# else
2005-04-16 15:20:36 -07:00
PTR_ L A k 0 , e j t a g _ d e b u g _ b u f f e r
LONG_ S k 1 , 0 ( k 0 )
2018-06-11 17:01:10 +08:00
# endif
2005-04-16 15:20:36 -07:00
SAVE_ A L L
move a0 , s p
jal e j t a g _ e x c e p t i o n _ h a n d l e r
RESTORE_ A L L
2018-06-11 17:01:10 +08:00
# ifdef C O N F I G _ S M P
ASM_ C P U I D _ M F C 0 k 1 , A S M _ S M P _ C P U I D _ R E G
PTR_ S R L k 1 , S M P _ C P U I D _ P T R S H I F T
PTR_ S L L k 1 , L O N G L O G
PTR_ L A k 0 , e j t a g _ d e b u g _ b u f f e r _ p e r _ c p u
PTR_ A D D U k 0 , k 1
LONG_ L k 1 , 0 ( k 0 )
# else
2005-04-16 15:20:36 -07:00
PTR_ L A k 0 , e j t a g _ d e b u g _ b u f f e r
LONG_ L k 1 , 0 ( k 0 )
2018-06-11 17:01:10 +08:00
# endif
2005-04-16 15:20:36 -07:00
ejtag_return :
2018-06-11 17:01:10 +08:00
back_ t o _ b a c k _ c0 _ h a z a r d
2005-04-16 15:20:36 -07:00
MFC0 k 0 , C P 0 _ D E S A V E
.set mips32
deret
2016-04-29 17:29:29 +01:00
.set pop
2005-04-16 15:20:36 -07:00
END( e j t a g _ d e b u g _ h a n d l e r )
/ *
* This b u f f e r i s r e s e r v e d f o r t h e u s e o f t h e E J T A G d e b u g
* handler.
* /
.data
EXPORT( e j t a g _ d e b u g _ b u f f e r )
.fill LONGSIZE
2018-06-11 17:01:10 +08:00
# ifdef C O N F I G _ S M P
EXPORT( e j t a g _ d e b u g _ b u f f e r _ s p i n l o c k )
.fill LONGSIZE
EXPORT( e j t a g _ d e b u g _ b u f f e r _ p e r _ c p u )
.fill LONGSIZE * NR_ C P U S
# endif
2005-04-16 15:20:36 -07:00
.previous
_ _ INIT
/ *
* NMI d e b u g e x c e p t i o n h a n d l e r f o r M I P S r e f e r e n c e b o a r d s .
* The N M I d e b u g e x c e p t i o n e n t r y p o i n t i s 0 x b f c00 0 0 0 , w h i c h
* normally i s i n t h e b o o t P R O M , s o t h e b o o t P R O M m u s t d o a
* unconditional j u m p t o t h i s v e c t o r .
* /
NESTED( e x c e p t _ v e c _ n m i , 0 , s p )
j n m i _ h a n d l e r
2013-03-25 12:15:55 -05:00
# ifdef C O N F I G _ C P U _ M I C R O M I P S
nop
# endif
2005-04-16 15:20:36 -07:00
END( e x c e p t _ v e c _ n m i )
_ _ FINIT
NESTED( n m i _ h a n d l e r , P T _ S I Z E , s p )
2017-08-10 13:27:39 -05:00
.cfi_signal_frame
2005-04-16 15:20:36 -07:00
.set push
.set noat
2013-10-08 12:39:31 +01:00
/ *
* Clear E R L - r e s t o r e s e g m e n t m a p p i n g
* Clear B E V - r e q u i r e d f o r p a g e f a u l t e x c e p t i o n h a n d l e r t o w o r k
* /
mfc0 k 0 , C P 0 _ S T A T U S
2016-04-29 17:29:29 +01:00
ori k 0 , k 0 , S T 0 _ E X L
2013-10-08 12:39:31 +01:00
li k 1 , ~ ( S T 0 _ B E V | S T 0 _ E R L )
2016-04-29 17:29:29 +01:00
and k 0 , k 0 , k 1
mtc0 k 0 , C P 0 _ S T A T U S
2013-10-08 12:39:31 +01:00
_ ehb
2005-04-16 15:20:36 -07:00
SAVE_ A L L
2013-01-22 12:59:30 +01:00
move a0 , s p
2005-04-16 15:20:36 -07:00
jal n m i _ e x c e p t i o n _ h a n d l e r
2013-10-08 12:39:31 +01:00
/* nmi_exception_handler never returns */
2005-04-16 15:20:36 -07:00
.set pop
END( n m i _ h a n d l e r )
.macro __build_clear_none
.endm
.macro __build_clear_sti
2006-07-07 14:07:18 +01:00
TRACE_ I R Q S _ O N
2005-04-16 15:20:36 -07:00
STI
.endm
.macro __build_clear_cli
CLI
2006-07-07 14:07:18 +01:00
TRACE_ I R Q S _ O F F
2005-04-16 15:20:36 -07:00
.endm
.macro __build_clear_fpe
2008-12-11 15:33:25 -08:00
.set push
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
.set mips1
2014-11-07 14:13:54 +01:00
SET_ H A R D F L O A T
2005-04-16 15:20:36 -07:00
cfc1 a1 , f c r31
2008-12-11 15:33:25 -08:00
.set pop
MIPS: Clear [MSA]FPE CSR.Cause after notify_die()
When handling floating point exceptions (FPEs) and MSA FPEs the Cause
bits of the appropriate control and status register (FCSR for FPEs and
MSACSR for MSA FPEs) are read and cleared before enabling interrupts,
presumably so that it doesn't have to go through the pain of restoring
those bits if the process is pre-empted, since writing those bits would
cause another immediate exception while still in the kernel.
The bits aren't normally ever restored again, since userland never
expects to see them set.
However for virtualisation it is necessary for the kernel to be able to
restore these Cause bits, as the guest may have been interrupted in an
FP exception handler but before it could read the Cause bits. This can
be done by registering a die notifier, to get notified of the exception
when such a value is restored, and if the PC was at the instruction
which is used to restore the guest state, the handler can step over it
and continue execution. The Cause bits can then remain set without
causing further exceptions.
For this to work safely a few changes are made:
- __build_clear_fpe and __build_clear_msa_fpe no longer clear the Cause
bits, and now return from exception level with interrupts disabled
instead of enabled.
- do_fpe() now clears the Cause bits and enables interrupts after
notify_die() is called, so that the notifier can chose to return from
exception without this happening.
- do_msa_fpe() acts similarly, but now actually makes use of the second
argument (msacsr) and calls notify_die() with the new DIE_MSAFP,
allowing die notifiers to be informed of MSA FPEs too.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2014-12-02 13:44:13 +00:00
CLI
TRACE_ I R Q S _ O F F
2005-04-16 15:20:36 -07:00
.endm
2015-01-30 12:09:34 +00:00
.macro __build_clear_msa_fpe
_ cfcmsa a1 , M S A _ C S R
MIPS: Clear [MSA]FPE CSR.Cause after notify_die()
When handling floating point exceptions (FPEs) and MSA FPEs the Cause
bits of the appropriate control and status register (FCSR for FPEs and
MSACSR for MSA FPEs) are read and cleared before enabling interrupts,
presumably so that it doesn't have to go through the pain of restoring
those bits if the process is pre-empted, since writing those bits would
cause another immediate exception while still in the kernel.
The bits aren't normally ever restored again, since userland never
expects to see them set.
However for virtualisation it is necessary for the kernel to be able to
restore these Cause bits, as the guest may have been interrupted in an
FP exception handler but before it could read the Cause bits. This can
be done by registering a die notifier, to get notified of the exception
when such a value is restored, and if the PC was at the instruction
which is used to restore the guest state, the handler can step over it
and continue execution. The Cause bits can then remain set without
causing further exceptions.
For this to work safely a few changes are made:
- __build_clear_fpe and __build_clear_msa_fpe no longer clear the Cause
bits, and now return from exception level with interrupts disabled
instead of enabled.
- do_fpe() now clears the Cause bits and enables interrupts after
notify_die() is called, so that the notifier can chose to return from
exception without this happening.
- do_msa_fpe() acts similarly, but now actually makes use of the second
argument (msacsr) and calls notify_die() with the new DIE_MSAFP,
allowing die notifiers to be informed of MSA FPEs too.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2014-12-02 13:44:13 +00:00
CLI
TRACE_ I R Q S _ O F F
2015-01-30 12:09:34 +00:00
.endm
2005-04-16 15:20:36 -07:00
.macro __build_clear_ade
MFC0 t 0 , C P 0 _ B A D V A D D R
PTR_ S t 0 , P T _ B V A D D R ( s p )
KMODE
.endm
.macro __BUILD_silent exception
.endm
/ * Gas t r i e s t o p a r s e t h e P R I N T a r g u m e n t a s a s t r i n g c o n t a i n i n g
string e s c a p e s a n d e m i t s b o g u s w a r n i n g s i f i t b e l i e v e s t o
recognize a n u n k n o w n e s c a p e c o d e . S o m a k e t h e a r g u m e n t s
start w i t h a n n a n d g a s w i l l b e l i e v e \ n i s o k . . . * /
2013-01-22 12:59:30 +01:00
.macro __BUILD_verbose nexception
2005-04-16 15:20:36 -07:00
LONG_ L a1 , P T _ E P C ( s p )
2005-09-03 15:56:22 -07:00
# ifdef C O N F I G _ 3 2 B I T
2005-04-16 15:20:36 -07:00
PRINT( " G o t \ n e x c e p t i o n a t % 0 8 l x \ 0 1 2 " )
2005-09-03 15:56:17 -07:00
# endif
2005-09-03 15:56:22 -07:00
# ifdef C O N F I G _ 6 4 B I T
2005-04-16 15:20:36 -07:00
PRINT( " G o t \ n e x c e p t i o n a t % 0 1 6 l x \ 0 1 2 " )
2005-09-03 15:56:17 -07:00
# endif
2005-04-16 15:20:36 -07:00
.endm
.macro __BUILD_count exception
LONG_ L t 0 ,e x c e p t i o n _ c o u n t _ \ e x c e p t i o n
2016-04-29 17:29:29 +01:00
LONG_ A D D I U t 0 , 1
2005-04-16 15:20:36 -07:00
LONG_ S t 0 ,e x c e p t i o n _ c o u n t _ \ e x c e p t i o n
.comm exception_ c o u n t \ e x c e p t i o n , 8 , 8
.endm
.macro __BUILD_HANDLER exception h a n d l e r c l e a r v e r b o s e e x t
.align 5
NESTED( h a n d l e _ \ e x c e p t i o n , P T _ S I Z E , s p )
2017-08-10 13:27:39 -05:00
.cfi_signal_frame
2005-04-16 15:20:36 -07:00
.set noat
SAVE_ A L L
FEXPORT( h a n d l e _ \ e x c e p t i o n \ e x t )
2015-08-18 11:25:50 +02:00
_ _ build_ c l e a r _ \ c l e a r
2005-04-16 15:20:36 -07:00
.set at
_ _ BUILD_ \ v e r b o s e \ e x c e p t i o n
move a0 , s p
2017-08-10 13:27:39 -05:00
jal d o _ \ h a n d l e r
j r e t _ f r o m _ e x c e p t i o n
2005-04-16 15:20:36 -07:00
END( h a n d l e _ \ e x c e p t i o n )
.endm
.macro BUILD_HANDLER exception h a n d l e r c l e a r v e r b o s e
2013-01-22 12:59:30 +01:00
_ _ BUILD_ H A N D L E R \ e x c e p t i o n \ h a n d l e r \ c l e a r \ v e r b o s e _ i n t
2005-04-16 15:20:36 -07:00
.endm
BUILD_ H A N D L E R a d e l a d e a d e s i l e n t / * #4 * /
BUILD_ H A N D L E R a d e s a d e a d e s i l e n t / * #5 * /
BUILD_ H A N D L E R i b e b e c l i s i l e n t / * #6 * /
BUILD_ H A N D L E R d b e b e c l i s i l e n t / * #7 * /
BUILD_ H A N D L E R b p b p s t i s i l e n t / * #9 * /
BUILD_ H A N D L E R r i r i s t i s i l e n t / * #10 * /
BUILD_ H A N D L E R c p u c p u s t i s i l e n t / * #11 * /
BUILD_ H A N D L E R o v o v s t i s i l e n t / * #12 * /
BUILD_ H A N D L E R t r t r s t i s i l e n t / * #13 * /
2015-01-30 12:09:34 +00:00
BUILD_ H A N D L E R m s a _ f p e m s a _ f p e m s a _ f p e s i l e n t / * #14 * /
2018-11-07 23:14:05 +00:00
# ifdef C O N F I G _ M I P S _ F P _ S U P P O R T
2005-04-16 15:20:36 -07:00
BUILD_ H A N D L E R f p e f p e f p e s i l e n t / * #15 * /
2018-11-07 23:14:05 +00:00
# endif
2013-11-14 16:12:31 +00:00
BUILD_ H A N D L E R f t l b f t l b n o n e s i l e n t / * #16 * /
2014-01-27 15:23:11 +00:00
BUILD_ H A N D L E R m s a m s a s t i s i l e n t / * #21 * /
2005-04-16 15:20:36 -07:00
BUILD_ H A N D L E R m d m x m d m x s t i s i l e n t / * #22 * /
2013-01-22 12:59:30 +01:00
# ifdef C O N F I G _ H A R D W A R E _ W A T C H P O I N T S
2009-01-05 15:29:58 -08:00
/ *
* For w a t c h , i n t e r r u p t s w i l l b e e n a b l e d a f t e r t h e w a t c h
* registers a r e r e a d .
* /
BUILD_ H A N D L E R w a t c h w a t c h c l i s i l e n t / * #23 * /
2008-09-23 00:08:45 -07:00
# else
2005-04-16 15:20:36 -07:00
BUILD_ H A N D L E R w a t c h w a t c h s t i v e r b o s e / * #23 * /
2008-09-23 00:08:45 -07:00
# endif
2005-04-16 15:20:36 -07:00
BUILD_ H A N D L E R m c h e c k m c h e c k c l i v e r b o s e / * #24 * /
2006-06-30 14:19:45 +01:00
BUILD_ H A N D L E R m t m t s t i s i l e n t / * #25 * /
2005-05-31 11:49:19 +00:00
BUILD_ H A N D L E R d s p d s p s t i s i l e n t / * #26 * /
2005-04-16 15:20:36 -07:00
BUILD_ H A N D L E R r e s e r v e d r e s e r v e d s t i v e r b o s e / * o t h e r s * /
2006-09-11 17:50:29 +09:00
.align 5
MIPS: Check TLB before handle_ri_rdhwr() for Loongson-3
Loongson-3's micro TLB (ITLB) is not strictly a subset of JTLB. That
means: when a JTLB entry is replaced by hardware, there may be an old
valid entry exists in ITLB. So, a TLB miss exception may occur while
handle_ri_rdhwr() is running because it try to access EPC's content.
However, handle_ri_rdhwr() doesn't clear EXL, which makes a TLB Refill
exception be treated as a TLB Invalid exception and tlbp may fail. In
this case, if FTLB (which is usually set-associative instead of set-
associative) is enabled, a tlbp failure will cause an invalid tlbwi,
which will hang the whole system.
This patch rename handle_ri_rdhwr_vivt to handle_ri_rdhwr_tlbp and use
it for Loongson-3. It try to solve the same problem described as below,
but more straightforwards.
https://patchwork.linux-mips.org/patch/12591/
I think Loongson-2 has the same problem, but it has no FTLB, so we just
keep it as is.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Cc: Rui Wang <wangr@lemote.com>
Cc: John Crispin <john@phrozen.org>
Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/15753/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-16 21:00:26 +08:00
LEAF( h a n d l e _ r i _ r d h w r _ t l b p )
2006-09-11 17:50:29 +09:00
.set push
.set noat
.set noreorder
/* check if TLB contains a entry for EPC */
MFC0 k 1 , C P 0 _ E N T R Y H I
2016-05-06 14:36:24 +01:00
andi k 1 , M I P S _ E N T R Y H I _ A S I D | M I P S _ E N T R Y H I _ A S I D X
2006-09-11 17:50:29 +09:00
MFC0 k 0 , C P 0 _ E P C
2016-04-29 17:29:29 +01:00
PTR_ S R L k 0 , _ P A G E _ S H I F T + 1
PTR_ S L L k 0 , _ P A G E _ S H I F T + 1
2006-09-11 17:50:29 +09:00
or k 1 , k 0
MTC0 k 1 , C P 0 _ E N T R Y H I
mtc0 _ t l b w _ h a z a r d
tlbp
tlb_ p r o b e _ h a z a r d
mfc0 k 1 , C P 0 _ I N D E X
.set pop
bltz k 1 , h a n d l e _ r i / * s l o w p a t h * /
/* fall thru */
MIPS: Check TLB before handle_ri_rdhwr() for Loongson-3
Loongson-3's micro TLB (ITLB) is not strictly a subset of JTLB. That
means: when a JTLB entry is replaced by hardware, there may be an old
valid entry exists in ITLB. So, a TLB miss exception may occur while
handle_ri_rdhwr() is running because it try to access EPC's content.
However, handle_ri_rdhwr() doesn't clear EXL, which makes a TLB Refill
exception be treated as a TLB Invalid exception and tlbp may fail. In
this case, if FTLB (which is usually set-associative instead of set-
associative) is enabled, a tlbp failure will cause an invalid tlbwi,
which will hang the whole system.
This patch rename handle_ri_rdhwr_vivt to handle_ri_rdhwr_tlbp and use
it for Loongson-3. It try to solve the same problem described as below,
but more straightforwards.
https://patchwork.linux-mips.org/patch/12591/
I think Loongson-2 has the same problem, but it has no FTLB, so we just
keep it as is.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Cc: Rui Wang <wangr@lemote.com>
Cc: John Crispin <john@phrozen.org>
Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/15753/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-16 21:00:26 +08:00
END( h a n d l e _ r i _ r d h w r _ t l b p )
2006-09-11 17:50:29 +09:00
LEAF( h a n d l e _ r i _ r d h w r )
.set push
.set noat
.set noreorder
2013-03-25 12:15:55 -05:00
/* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
2006-09-11 17:50:29 +09:00
MFC0 k 1 , C P 0 _ E P C
2013-03-25 12:15:55 -05:00
# if d e f i n e d ( C O N F I G _ C P U _ M I C R O M I P S ) | | d e f i n e d ( C O N F I G _ C P U _ M I P S 3 2 _ R 2 ) | | d e f i n e d ( C O N F I G _ C P U _ M I P S 6 4 _ R 2 )
2016-04-29 17:29:29 +01:00
and k 0 , k 1 , 1
beqz k 0 , 1 f
xor k 1 , k 0
lhu k 0 , ( k 1 )
lhu k 1 , 2 ( k 1 )
ins k 1 , k 0 , 1 6 , 1 6
lui k 0 , 0 x00 7 d
b d o c h e c k
ori k 0 , 0 x6 b3 c
2013-03-25 12:15:55 -05:00
1 :
2016-04-29 17:29:29 +01:00
lui k 0 , 0 x7 c03
lw k 1 , ( k 1 )
ori k 0 , 0 x e 8 3 b
2013-03-25 12:15:55 -05:00
# else
2016-04-29 17:29:29 +01:00
andi k 0 , k 1 , 1
bnez k 0 , h a n d l e _ r i
lui k 0 , 0 x7 c03
lw k 1 , ( k 1 )
ori k 0 , 0 x e 8 3 b
2013-03-25 12:15:55 -05:00
# endif
2016-04-29 17:29:29 +01:00
.set reorder
2013-03-25 12:15:55 -05:00
docheck :
2006-09-11 17:50:29 +09:00
bne k 0 , k 1 , h a n d l e _ r i / * i f n o t o u r s * /
2013-03-25 12:15:55 -05:00
isrdhwr :
2006-09-11 17:50:29 +09:00
/* The insn is rdhwr. No need to check CAUSE.BD here. */
get_ s a v e d _ s p / * k 1 : = c u r r e n t _ t h r e a d _ i n f o * /
.set noreorder
MFC0 k 0 , C P 0 _ E P C
# if d e f i n e d ( C O N F I G _ C P U _ R 3 0 0 0 ) | | d e f i n e d ( C O N F I G _ C P U _ T X 3 9 X X )
ori k 1 , _ T H R E A D _ M A S K
xori k 1 , _ T H R E A D _ M A S K
LONG_ L v1 , T I _ T P _ V A L U E ( k 1 )
LONG_ A D D I U k 0 , 4
jr k 0
rfe
# else
2007-10-23 12:43:25 +01:00
# ifndef C O N F I G _ C P U _ D A D D I _ W O R K A R O U N D S
2006-09-11 17:50:29 +09:00
LONG_ A D D I U k 0 , 4 / * s t a l l o n $ k 0 * /
2007-10-23 12:43:25 +01:00
# else
.set at=v1
LONG_ A D D I U k 0 , 4
.set noat
# endif
2006-09-11 17:50:29 +09:00
MTC0 k 0 , C P 0 _ E P C
/* I hope three instructions between MTC0 and ERET are enough... */
ori k 1 , _ T H R E A D _ M A S K
xori k 1 , _ T H R E A D _ M A S K
LONG_ L v1 , T I _ T P _ V A L U E ( k 1 )
2018-11-08 20:14:38 +00:00
.set push
2014-03-30 13:20:10 +02:00
.set arch=r4000
2006-09-11 17:50:29 +09:00
eret
2018-11-08 20:14:38 +00:00
.set pop
2006-09-11 17:50:29 +09:00
# endif
.set pop
END( h a n d l e _ r i _ r d h w r )
2019-10-01 23:04:32 +00:00
# ifdef C O N F I G _ C P U _ R 4 X 0 0 _ B U G S 6 4
2005-04-16 15:20:36 -07:00
/* A temporary overflow handler used by check_daddi(). */
_ _ INIT
BUILD_ H A N D L E R d a d d i _ o v d a d d i _ o v n o n e s i l e n t / * #12 * /
# endif