2005-04-16 15:20:36 -07:00
/ *
* linux/ a r c h / x86 _ 6 4 / e n t r y . S
*
* Copyright ( C ) 1 9 9 1 , 1 9 9 2 L i n u s T o r v a l d s
* Copyright ( C ) 2 0 0 0 , 2 0 0 1 , 2 0 0 2 A n d i K l e e n S u S E L a b s
* Copyright ( C ) 2 0 0 0 P a v e l M a c h e k < p a v e l @suse.cz>
* /
/ *
* entry. S c o n t a i n s t h e s y s t e m - c a l l a n d f a u l t l o w - l e v e l h a n d l i n g r o u t i n e s .
*
* NOTE : This c o d e h a n d l e s s i g n a l - r e c o g n i t i o n , w h i c h h a p p e n s e v e r y t i m e
* after a n i n t e r r u p t a n d a f t e r e a c h s y s t e m c a l l .
*
* Normal s y s c a l l s a n d i n t e r r u p t s d o n ' t s a v e a f u l l s t a c k f r a m e , t h i s i s
* only d o n e f o r s y s c a l l t r a c i n g , s i g n a l s o r f o r k / e x e c e t . a l .
*
* A n o t e o n t e r m i n o l o g y :
* - top o f s t a c k : A r c h i t e c t u r e d e f i n e d i n t e r r u p t f r a m e f r o m S S t o R I P
* at t h e t o p o f t h e k e r n e l p r o c e s s s t a c k .
* - partial s t a c k f r a m e : p a r t i a l l y s a v e d r e g i s t e r s u p t o R 1 1 .
* - full s t a c k f r a m e : L i k e p a r t i a l s t a c k f r a m e , b u t a l l r e g i s t e r s a v e d .
2006-09-26 10:52:29 +02:00
*
* Some m a c r o u s a g e :
* - CFI m a c r o s a r e u s e d t o g e n e r a t e d w a r f2 u n w i n d i n f o r m a t i o n f o r b e t t e r
* backtraces. T h e y d o n ' t c h a n g e a n y c o d e .
* - SAVE_ A L L / R E S T O R E _ A L L - S a v e / r e s t o r e a l l r e g i s t e r s
* - SAVE_ A R G S / R E S T O R E _ A R G S - S a v e / r e s t o r e r e g i s t e r s t h a t C f u n c t i o n s m o d i f y .
* There a r e u n f o r t u n a t e l y l o t s o f s p e c i a l c a s e s w h e r e s o m e r e g i s t e r s
* not t o u c h e d . T h e m a c r o i s a b i g m e s s t h a t s h o u l d b e c l e a n e d u p .
* - SAVE_ R E S T / R E S T O R E _ R E S T - H a n d l e t h e r e g i s t e r s n o t s a v e d b y S A V E _ A R G S .
* Gives a f u l l s t a c k f r a m e .
* - ENTRY/ E N D D e f i n e f u n c t i o n s i n t h e s y m b o l t a b l e .
* - FIXUP_ T O P _ O F _ S T A C K / R E S T O R E _ T O P _ O F _ S T A C K - F i x u p t h e h a r d w a r e s t a c k
* frame t h a t i s o t h e r w i s e u n d e f i n e d a f t e r a S Y S C A L L
* - TRACE_ I R Q _ * - T r a c e h a r d i n t e r r u p t s t a t e f o r l o c k d e b u g g i n g .
* - errorentry/ p a r a n o i d e n t r y / z e r o e n t r y - D e f i n e e x c e p t i o n e n t r y p o i n t s .
2005-04-16 15:20:36 -07:00
* /
# include < l i n u x / l i n k a g e . h >
# include < a s m / s e g m e n t . h >
# include < a s m / c a c h e . h >
# include < a s m / e r r n o . h >
# include < a s m / d w a r f2 . h >
# include < a s m / c a l l i n g . h >
2005-09-09 21:28:48 +02:00
# include < a s m / a s m - o f f s e t s . h >
2005-04-16 15:20:36 -07:00
# include < a s m / m s r . h >
# include < a s m / u n i s t d . h >
# include < a s m / t h r e a d _ i n f o . h >
# include < a s m / h w _ i r q . h >
2006-01-16 01:56:39 +01:00
# include < a s m / p a g e . h >
2006-07-03 00:24:45 -07:00
# include < a s m / i r q f l a g s . h >
2008-01-30 13:32:08 +01:00
# include < a s m / p a r a v i r t . h >
2005-04-16 15:20:36 -07:00
.code64
2005-04-16 15:25:05 -07:00
# ifndef C O N F I G _ P R E E M P T
2005-04-16 15:20:36 -07:00
# define r e t i n t _ k e r n e l r e t i n t _ r e s t o r e _ a r g s
# endif
2006-07-03 00:24:45 -07:00
2008-01-30 13:32:08 +01:00
# ifdef C O N F I G _ P A R A V I R T
ENTRY( n a t i v e _ i r q _ e n a b l e _ s y s c a l l _ r e t )
movq % g s : p d a _ o l d r s p ,% r s p
swapgs
sysretq
# endif / * C O N F I G _ P A R A V I R T * /
2006-07-03 00:24:45 -07:00
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
bt $ 9 ,E F L A G S - \ o f f s e t ( % r s p ) / * i n t e r r u p t s o f f ? * /
jnc 1 f
TRACE_ I R Q S _ O N
1 :
# endif
.endm
2005-04-16 15:20:36 -07:00
/ *
* C c o d e i s n o t s u p p o s e d t o k n o w a b o u t u n d e f i n e d t o p o f s t a c k . E v e r y t i m e
* a C f u n c t i o n w i t h a n p t _ r e g s a r g u m e n t i s c a l l e d f r o m t h e S Y S C A L L b a s e d
* fast p a t h F I X U P _ T O P _ O F _ S T A C K i s n e e d e d .
* RESTORE_ T O P _ O F _ S T A C K s y n c s t h e s y s c a l l s t a t e a f t e r a n y p o s s i b l e p t r e g s
* manipulation.
* /
/* %rsp:at FRAMEEND */
.macro FIXUP_TOP_OF_STACK tmp
movq % g s : p d a _ o l d r s p ,\ t m p
movq \ t m p ,R S P ( % r s p )
movq $ _ _ U S E R _ D S ,S S ( % r s p )
movq $ _ _ U S E R _ C S ,C S ( % r s p )
movq $ - 1 ,R C X ( % r s p )
movq R 1 1 ( % r s p ) ,\ t m p / * g e t e f l a g s * /
movq \ t m p ,E F L A G S ( % r s p )
.endm
.macro RESTORE_TOP_OF_STACK tmp,o f f s e t =0
movq R S P - \ o f f s e t ( % r s p ) ,\ t m p
movq \ t m p ,% g s : p d a _ o l d r s p
movq E F L A G S - \ o f f s e t ( % r s p ) ,\ t m p
movq \ t m p ,R 1 1 - \ o f f s e t ( % r s p )
.endm
.macro FAKE_STACK_FRAME child_ r i p
/* push in order ss, rsp, eflags, cs, rip */
2005-07-28 21:15:48 -07:00
xorl % e a x , % e a x
2005-04-16 15:20:36 -07:00
pushq % r a x / * s s * /
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-09-12 18:49:24 +02:00
/*CFI_REL_OFFSET ss,0*/
2005-04-16 15:20:36 -07:00
pushq % r a x / * r s p * /
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-09-12 18:49:24 +02:00
CFI_ R E L _ O F F S E T r s p ,0
2005-04-16 15:20:36 -07:00
pushq $ ( 1 < < 9 ) / * e f l a g s - i n t e r r u p t s o n * /
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-09-12 18:49:24 +02:00
/*CFI_REL_OFFSET rflags,0*/
2005-04-16 15:20:36 -07:00
pushq $ _ _ K E R N E L _ C S / * c s * /
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-09-12 18:49:24 +02:00
/*CFI_REL_OFFSET cs,0*/
2005-04-16 15:20:36 -07:00
pushq \ c h i l d _ r i p / * r i p * /
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-09-12 18:49:24 +02:00
CFI_ R E L _ O F F S E T r i p ,0
2005-04-16 15:20:36 -07:00
pushq % r a x / * o r i g r a x * /
CFI_ A D J U S T _ C F A _ O F F S E T 8
.endm
.macro UNFAKE_STACK_FRAME
addq $ 8 * 6 , % r s p
CFI_ A D J U S T _ C F A _ O F F S E T - ( 6 * 8 )
.endm
2005-09-12 18:49:24 +02:00
.macro CFI_DEFAULT_STACK start=1
.if \ start
CFI_ S T A R T P R O C s i m p l e
2006-09-26 10:52:41 +02:00
CFI_ S I G N A L _ F R A M E
2005-09-12 18:49:24 +02:00
CFI_ D E F _ C F A r s p ,S S + 8
.else
CFI_ D E F _ C F A _ O F F S E T S S + 8
.endif
CFI_ R E L _ O F F S E T r15 ,R 1 5
CFI_ R E L _ O F F S E T r14 ,R 1 4
CFI_ R E L _ O F F S E T r13 ,R 1 3
CFI_ R E L _ O F F S E T r12 ,R 1 2
CFI_ R E L _ O F F S E T r b p ,R B P
CFI_ R E L _ O F F S E T r b x ,R B X
CFI_ R E L _ O F F S E T r11 ,R 1 1
CFI_ R E L _ O F F S E T r10 ,R 1 0
CFI_ R E L _ O F F S E T r9 ,R 9
CFI_ R E L _ O F F S E T r8 ,R 8
CFI_ R E L _ O F F S E T r a x ,R A X
CFI_ R E L _ O F F S E T r c x ,R C X
CFI_ R E L _ O F F S E T r d x ,R D X
CFI_ R E L _ O F F S E T r s i ,R S I
CFI_ R E L _ O F F S E T r d i ,R D I
CFI_ R E L _ O F F S E T r i p ,R I P
/*CFI_REL_OFFSET cs,CS*/
/*CFI_REL_OFFSET rflags,EFLAGS*/
CFI_ R E L _ O F F S E T r s p ,R S P
/*CFI_REL_OFFSET ss,SS*/
2005-04-16 15:20:36 -07:00
.endm
/ *
* A n e w l y f o r k e d p r o c e s s d i r e c t l y c o n t e x t s w i t c h e s i n t o t h i s .
* /
/* rdi: prev */
ENTRY( r e t _ f r o m _ f o r k )
CFI_ D E F A U L T _ S T A C K
2006-09-26 10:52:41 +02:00
push k e r n e l _ e f l a g s ( % r i p )
CFI_ A D J U S T _ C F A _ O F F S E T 4
popf # r e s e t k e r n e l e f l a g s
CFI_ A D J U S T _ C F A _ O F F S E T - 4
2005-04-16 15:20:36 -07:00
call s c h e d u l e _ t a i l
GET_ T H R E A D _ I N F O ( % r c x )
testl $ ( _ T I F _ S Y S C A L L _ T R A C E | _ T I F _ S Y S C A L L _ A U D I T ) ,t h r e a d i n f o _ f l a g s ( % r c x )
jnz r f f _ t r a c e
rff_action :
RESTORE_ R E S T
testl $ 3 ,C S - A R G O F F S E T ( % r s p ) # f r o m k e r n e l _ t h r e a d ?
je i n t _ r e t _ f r o m _ s y s _ c a l l
testl $ _ T I F _ I A 3 2 ,t h r e a d i n f o _ f l a g s ( % r c x )
jnz i n t _ r e t _ f r o m _ s y s _ c a l l
RESTORE_ T O P _ O F _ S T A C K % r d i ,A R G O F F S E T
jmp r e t _ f r o m _ s y s _ c a l l
rff_trace :
movq % r s p ,% r d i
call s y s c a l l _ t r a c e _ l e a v e
GET_ T H R E A D _ I N F O ( % r c x )
jmp r f f _ a c t i o n
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
END( r e t _ f r o m _ f o r k )
2005-04-16 15:20:36 -07:00
/ *
* System c a l l e n t r y . U p t o 6 a r g u m e n t s i n r e g i s t e r s a r e s u p p o r t e d .
*
* SYSCALL d o e s n o t s a v e a n y t h i n g o n t h e s t a c k a n d d o e s n o t c h a n g e t h e
* stack p o i n t e r .
* /
/ *
* Register s e t u p :
* rax s y s t e m c a l l n u m b e r
* rdi a r g 0
* rcx r e t u r n a d d r e s s f o r s y s c a l l / s y s r e t , C a r g 3
* rsi a r g 1
* rdx a r g 2
* r1 0 a r g 3 ( - - > m o v e d t o r c x f o r C )
* r8 a r g 4
* r9 a r g 5
* r1 1 e f l a g s f o r s y s c a l l / s y s r e t , t e m p o r a r y f o r C
* r1 2 - r15 ,r b p ,r b x s a v e d b y C c o d e , n o t t o u c h e d .
*
* Interrupts a r e o f f o n e n t r y .
* Only c a l l e d f r o m u s e r s p a c e .
*
* XXX i f w e h a d a f r e e s c r a t c h r e g i s t e r w e c o u l d s a v e t h e R S P i n t o t h e s t a c k f r a m e
* and r e p o r t i t p r o p e r l y i n p s . U n f o r t u n a t e l y w e h a v e n ' t .
2006-04-07 19:50:00 +02:00
*
* When u s e r c a n c h a n g e t h e f r a m e s a l w a y s f o r c e I R E T . T h a t i s b e c a u s e
* it d e a l s w i t h u n c a n o n i c a l a d d r e s s e s b e t t e r . S Y S R E T h a s t r o u b l e
* with t h e m d u e t o b u g s i n b o t h A M D a n d I n t e l C P U s .
2005-04-16 15:20:36 -07:00
* /
ENTRY( s y s t e m _ c a l l )
2005-09-12 18:49:24 +02:00
CFI_ S T A R T P R O C s i m p l e
2006-09-26 10:52:41 +02:00
CFI_ S I G N A L _ F R A M E
2006-06-26 13:57:38 +02:00
CFI_ D E F _ C F A r s p ,P D A _ S T A C K O F F S E T
2005-09-12 18:49:24 +02:00
CFI_ R E G I S T E R r i p ,r c x
/*CFI_REGISTER rflags,r11*/
2008-01-30 13:32:08 +01:00
SWAPGS_ U N S A F E _ S T A C K
/ *
* A h y p e r v i s o r i m p l e m e n t a t i o n m i g h t w a n t t o u s e a l a b e l
* after t h e s w a p g s , s o t h a t i t c a n d o t h e s w a p g s
* for t h e g u e s t a n d j u m p h e r e o n s y s c a l l .
* /
ENTRY( s y s t e m _ c a l l _ a f t e r _ s w a p g s )
2005-04-16 15:20:36 -07:00
movq % r s p ,% g s : p d a _ o l d r s p
movq % g s : p d a _ k e r n e l s t a c k ,% r s p
2006-07-03 00:24:45 -07:00
/ *
* No n e e d t o f o l l o w t h i s i r q s o f f / o n s e c t i o n - i t ' s s t r a i g h t
* and s h o r t :
* /
2008-01-30 13:32:08 +01:00
ENABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2005-04-16 15:20:36 -07:00
SAVE_ A R G S 8 ,1
movq % r a x ,O R I G _ R A X - A R G O F F S E T ( % r s p )
2005-09-12 18:49:24 +02:00
movq % r c x ,R I P - A R G O F F S E T ( % r s p )
CFI_ R E L _ O F F S E T r i p ,R I P - A R G O F F S E T
2005-04-16 15:20:36 -07:00
GET_ T H R E A D _ I N F O ( % r c x )
testl $ ( _ T I F _ S Y S C A L L _ T R A C E | _ T I F _ S Y S C A L L _ A U D I T | _ T I F _ S E C C O M P ) ,t h r e a d i n f o _ f l a g s ( % r c x )
jnz t r a c e s y s
cmpq $ _ _ N R _ s y s c a l l _ m a x ,% r a x
ja b a d s y s
movq % r10 ,% r c x
call * s y s _ c a l l _ t a b l e ( ,% r a x ,8 ) # X X X : r i p r e l a t i v e
movq % r a x ,R A X - A R G O F F S E T ( % r s p )
/ *
* Syscall r e t u r n p a t h e n d i n g w i t h S Y S R E T ( f a s t p a t h )
* Has i n c o m p l e t e s t a c k f r a m e a n d u n d e f i n e d t o p o f s t a c k .
* /
ret_from_sys_call :
2005-04-16 15:25:02 -07:00
movl $ _ T I F _ A L L W O R K _ M A S K ,% e d i
2005-04-16 15:20:36 -07:00
/* edi: flagmask */
sysret_check :
2007-10-11 22:11:12 +02:00
LOCKDEP_ S Y S _ E X I T
2005-04-16 15:20:36 -07:00
GET_ T H R E A D _ I N F O ( % r c x )
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O F F
2005-04-16 15:20:36 -07:00
movl t h r e a d i n f o _ f l a g s ( % r c x ) ,% e d x
andl % e d i ,% e d x
jnz s y s r e t _ c a r e f u l
2006-12-07 02:14:02 +01:00
CFI_ R E M E M B E R _ S T A T E
2006-07-03 00:24:45 -07:00
/ *
* sysretq w i l l r e - e n a b l e i n t e r r u p t s :
* /
TRACE_ I R Q S _ O N
2005-04-16 15:20:36 -07:00
movq R I P - A R G O F F S E T ( % r s p ) ,% r c x
2005-09-12 18:49:24 +02:00
CFI_ R E G I S T E R r i p ,r c x
2005-04-16 15:20:36 -07:00
RESTORE_ A R G S 0 ,- A R G _ S K I P ,1
2005-09-12 18:49:24 +02:00
/*CFI_REGISTER rflags,r11*/
2008-01-30 13:32:08 +01:00
ENABLE_ I N T E R R U P T S _ S Y S C A L L _ R E T
2005-04-16 15:20:36 -07:00
2006-12-07 02:14:02 +01:00
CFI_ R E S T O R E _ S T A T E
2005-04-16 15:20:36 -07:00
/* Handle reschedules */
/* edx: work, edi: workmask */
sysret_careful :
bt $ T I F _ N E E D _ R E S C H E D ,% e d x
jnc s y s r e t _ s i g n a l
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O N
2008-01-30 13:32:08 +01:00
ENABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2005-04-16 15:20:36 -07:00
pushq % r d i
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-04-16 15:20:36 -07:00
call s c h e d u l e
popq % r d i
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T - 8
2005-04-16 15:20:36 -07:00
jmp s y s r e t _ c h e c k
/* Handle a signal */
sysret_signal :
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O N
2008-01-30 13:32:08 +01:00
ENABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2008-01-25 21:08:29 +01:00
testl $ _ T I F _ D O _ N O T I F Y _ M A S K ,% e d x
2005-05-16 21:53:19 -07:00
jz 1 f
/* Really a signal */
/* edx: work flags (arg3) */
2005-04-16 15:20:36 -07:00
leaq d o _ n o t i f y _ r e s u m e ( % r i p ) ,% r a x
leaq - A R G O F F S E T ( % r s p ) ,% r d i # & p t _ r e g s - > a r g 1
xorl % e s i ,% e s i # o l d s e t - > a r g 2
call p t r e g s c a l l _ c o m m o n
2005-05-16 21:53:19 -07:00
1 : movl $ _ T I F _ N E E D _ R E S C H E D ,% e d i
2006-04-07 19:50:00 +02:00
/ * Use I R E T b e c a u s e u s e r c o u l d h a v e c h a n g e d f r a m e . T h i s
works b e c a u s e p t r e g s c a l l _ c o m m o n h a s c a l l e d F I X U P _ T O P _ O F _ S T A C K . * /
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O F F
2006-04-07 19:50:00 +02:00
jmp i n t _ w i t h _ c h e c k
2005-04-16 15:20:36 -07:00
2005-09-12 18:49:24 +02:00
badsys :
movq $ - E N O S Y S ,R A X - A R G O F F S E T ( % r s p )
jmp r e t _ f r o m _ s y s _ c a l l
2005-04-16 15:20:36 -07:00
/* Do syscall tracing */
tracesys :
SAVE_ R E S T
movq $ - E N O S Y S ,R A X ( % r s p )
FIXUP_ T O P _ O F _ S T A C K % r d i
movq % r s p ,% r d i
call s y s c a l l _ t r a c e _ e n t e r
LOAD_ A R G S A R G O F F S E T / * r e l o a d a r g s f r o m s t a c k i n c a s e p t r a c e c h a n g e d i t * /
RESTORE_ R E S T
cmpq $ _ _ N R _ s y s c a l l _ m a x ,% r a x
2006-10-21 18:37:02 +02:00
movq $ - E N O S Y S ,% r c x
cmova % r c x ,% r a x
2005-04-16 15:20:36 -07:00
ja 1 f
movq % r10 ,% r c x / * f i x u p f o r C * /
call * s y s _ c a l l _ t a b l e ( ,% r a x ,8 )
2006-05-30 22:48:03 +02:00
1 : movq % r a x ,R A X - A R G O F F S E T ( % r s p )
2006-04-07 19:50:00 +02:00
/* Use IRET because user could have changed frame */
2005-04-16 15:20:36 -07:00
/ *
* Syscall r e t u r n p a t h e n d i n g w i t h I R E T .
* Has c o r r e c t t o p o f s t a c k , b u t p a r t i a l s t a c k f r a m e .
2006-12-07 02:14:02 +01:00
* /
.globl int_ret_from_sys_call
int_ret_from_sys_call :
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O F F
2005-04-16 15:20:36 -07:00
testl $ 3 ,C S - A R G O F F S E T ( % r s p )
je r e t i n t _ r e s t o r e _ a r g s
movl $ _ T I F _ A L L W O R K _ M A S K ,% e d i
/* edi: mask to check */
int_with_check :
2007-10-11 22:11:12 +02:00
LOCKDEP_ S Y S _ E X I T _ I R Q
2005-04-16 15:20:36 -07:00
GET_ T H R E A D _ I N F O ( % r c x )
movl t h r e a d i n f o _ f l a g s ( % r c x ) ,% e d x
andl % e d i ,% e d x
jnz i n t _ c a r e f u l
2006-01-11 22:44:06 +01:00
andl $ ~ T S _ C O M P A T ,t h r e a d i n f o _ s t a t u s ( % r c x )
2005-04-16 15:20:36 -07:00
jmp r e t i n t _ s w a p g s
/* Either reschedule or signal or syscall exit tracking needed. */
/* First do a reschedule test. */
/* edx: work, edi: workmask */
int_careful :
bt $ T I F _ N E E D _ R E S C H E D ,% e d x
jnc i n t _ v e r y _ c a r e f u l
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O N
2008-01-30 13:32:08 +01:00
ENABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2005-04-16 15:20:36 -07:00
pushq % r d i
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-04-16 15:20:36 -07:00
call s c h e d u l e
popq % r d i
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T - 8
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O F F
2005-04-16 15:20:36 -07:00
jmp i n t _ w i t h _ c h e c k
/* handle signals and tracing -- both require a full stack frame */
int_very_careful :
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O N
2008-01-30 13:32:08 +01:00
ENABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2005-04-16 15:20:36 -07:00
SAVE_ R E S T
/* Check for syscall exit trace */
testl $ ( _ T I F _ S Y S C A L L _ T R A C E | _ T I F _ S Y S C A L L _ A U D I T | _ T I F _ S I N G L E S T E P ) ,% e d x
jz i n t _ s i g n a l
pushq % r d i
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-04-16 15:20:36 -07:00
leaq 8 ( % r s p ) ,% r d i # & p t r e g s - > a r g 1
call s y s c a l l _ t r a c e _ l e a v e
popq % r d i
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T - 8
2005-04-16 15:25:01 -07:00
andl $ ~ ( _ T I F _ S Y S C A L L _ T R A C E | _ T I F _ S Y S C A L L _ A U D I T | _ T I F _ S I N G L E S T E P ) ,% e d i
2005-04-16 15:20:36 -07:00
jmp i n t _ r e s t o r e _ r e s t
int_signal :
2008-01-25 21:08:29 +01:00
testl $ _ T I F _ D O _ N O T I F Y _ M A S K ,% e d x
2005-04-16 15:20:36 -07:00
jz 1 f
movq % r s p ,% r d i # & p t r e g s - > a r g 1
xorl % e s i ,% e s i # o l d s e t - > a r g 2
call d o _ n o t i f y _ r e s u m e
1 : movl $ _ T I F _ N E E D _ R E S C H E D ,% e d i
int_restore_rest :
RESTORE_ R E S T
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O F F
2005-04-16 15:20:36 -07:00
jmp i n t _ w i t h _ c h e c k
CFI_ E N D P R O C
2006-12-07 02:14:02 +01:00
END( s y s t e m _ c a l l )
2005-04-16 15:20:36 -07:00
/ *
* Certain s p e c i a l s y s t e m c a l l s t h a t n e e d t o s a v e a c o m p l e t e f u l l s t a c k f r a m e .
* /
.macro PTREGSCALL label,f u n c ,a r g
.globl \ label
\ label :
leaq \ f u n c ( % r i p ) ,% r a x
leaq - A R G O F F S E T + 8 ( % r s p ) ,\ a r g / * 8 f o r r e t u r n a d d r e s s * /
jmp p t r e g s c a l l _ c o m m o n
2006-06-26 13:56:55 +02:00
END( \ l a b e l )
2005-04-16 15:20:36 -07:00
.endm
2005-09-12 18:49:24 +02:00
CFI_ S T A R T P R O C
2005-04-16 15:20:36 -07:00
PTREGSCALL s t u b _ c l o n e , s y s _ c l o n e , % r8
PTREGSCALL s t u b _ f o r k , s y s _ f o r k , % r d i
PTREGSCALL s t u b _ v f o r k , s y s _ v f o r k , % r d i
PTREGSCALL s t u b _ r t _ s i g s u s p e n d , s y s _ r t _ s i g s u s p e n d , % r d x
PTREGSCALL s t u b _ s i g a l t s t a c k , s y s _ s i g a l t s t a c k , % r d x
PTREGSCALL s t u b _ i o p l , s y s _ i o p l , % r s i
ENTRY( p t r e g s c a l l _ c o m m o n )
popq % r11
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T - 8
CFI_ R E G I S T E R r i p , r11
2005-04-16 15:20:36 -07:00
SAVE_ R E S T
movq % r11 , % r15
2005-09-12 18:49:24 +02:00
CFI_ R E G I S T E R r i p , r15
2005-04-16 15:20:36 -07:00
FIXUP_ T O P _ O F _ S T A C K % r11
call * % r a x
RESTORE_ T O P _ O F _ S T A C K % r11
movq % r15 , % r11
2005-09-12 18:49:24 +02:00
CFI_ R E G I S T E R r i p , r11
2005-04-16 15:20:36 -07:00
RESTORE_ R E S T
pushq % r11
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
CFI_ R E L _ O F F S E T r i p , 0
2005-04-16 15:20:36 -07:00
ret
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
END( p t r e g s c a l l _ c o m m o n )
2005-04-16 15:20:36 -07:00
ENTRY( s t u b _ e x e c v e )
CFI_ S T A R T P R O C
popq % r11
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T - 8
CFI_ R E G I S T E R r i p , r11
2005-04-16 15:20:36 -07:00
SAVE_ R E S T
FIXUP_ T O P _ O F _ S T A C K % r11
2008-02-26 12:55:57 +01:00
movq % r s p , % r c x
2005-04-16 15:20:36 -07:00
call s y s _ e x e c v e
RESTORE_ T O P _ O F _ S T A C K % r11
movq % r a x ,R A X ( % r s p )
RESTORE_ R E S T
jmp i n t _ r e t _ f r o m _ s y s _ c a l l
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
END( s t u b _ e x e c v e )
2005-04-16 15:20:36 -07:00
/ *
* sigreturn i s s p e c i a l b e c a u s e i t n e e d s t o r e s t o r e a l l r e g i s t e r s o n r e t u r n .
* This c a n n o t b e d o n e w i t h S Y S R E T , s o u s e t h e I R E T r e t u r n p a t h i n s t e a d .
* /
ENTRY( s t u b _ r t _ s i g r e t u r n )
CFI_ S T A R T P R O C
2005-09-12 18:49:24 +02:00
addq $ 8 , % r s p
CFI_ A D J U S T _ C F A _ O F F S E T - 8
2005-04-16 15:20:36 -07:00
SAVE_ R E S T
movq % r s p ,% r d i
FIXUP_ T O P _ O F _ S T A C K % r11
call s y s _ r t _ s i g r e t u r n
movq % r a x ,R A X ( % r s p ) # f i x m e , t h i s c o u l d b e d o n e a t t h e h i g h e r l a y e r
RESTORE_ R E S T
jmp i n t _ r e t _ f r o m _ s y s _ c a l l
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
END( s t u b _ r t _ s i g r e t u r n )
2005-04-16 15:20:36 -07:00
2005-09-12 18:49:24 +02:00
/ *
* initial f r a m e s t a t e f o r i n t e r r u p t s a n d e x c e p t i o n s
* /
.macro _frame ref
CFI_ S T A R T P R O C s i m p l e
2006-09-26 10:52:41 +02:00
CFI_ S I G N A L _ F R A M E
2005-09-12 18:49:24 +02:00
CFI_ D E F _ C F A r s p ,S S + 8 - \ r e f
/*CFI_REL_OFFSET ss,SS-\ref*/
CFI_ R E L _ O F F S E T r s p ,R S P - \ r e f
/*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
/*CFI_REL_OFFSET cs,CS-\ref*/
CFI_ R E L _ O F F S E T r i p ,R I P - \ r e f
.endm
/* initial frame state for interrupts (and exceptions without error code) */
# define I N T R _ F R A M E _ f r a m e R I P
/ * initial f r a m e s t a t e f o r e x c e p t i o n s w i t h e r r o r c o d e ( a n d i n t e r r u p t s w i t h
vector a l r e a d y p u s h e d ) * /
# define X C P T _ F R A M E _ f r a m e O R I G _ R A X
2005-04-16 15:20:36 -07:00
/ *
* Interrupt e n t r y / e x i t .
*
* Interrupt e n t r y p o i n t s s a v e o n l y c a l l e e c l o b b e r e d r e g i s t e r s i n f a s t p a t h .
*
* Entry r u n s w i t h i n t e r r u p t s o f f .
* /
/* 0(%rsp): interrupt number */
.macro interrupt func
cld
SAVE_ A R G S
leaq - A R G O F F S E T ( % r s p ) ,% r d i # a r g 1 f o r h a n d l e r
2006-06-26 13:57:35 +02:00
pushq % r b p
CFI_ A D J U S T _ C F A _ O F F S E T 8
CFI_ R E L _ O F F S E T r b p , 0
movq % r s p ,% r b p
CFI_ D E F _ C F A _ R E G I S T E R r b p
2005-04-16 15:20:36 -07:00
testl $ 3 ,C S ( % r d i )
je 1 f
2008-01-30 13:32:08 +01:00
SWAPGS
2006-09-26 10:52:39 +02:00
/ * irqcount i s u s e d t o c h e c k i f a C P U i s a l r e a d y o n a n i n t e r r u p t
stack o r n o t . W h i l e t h i s i s e s s e n t i a l l y r e d u n d a n t w i t h p r e e m p t _ c o u n t
it i s a l i t t l e c h e a p e r t o u s e a s e p a r a t e c o u n t e r i n t h e P D A
( short o f m o v i n g i r q _ e n t e r i n t o a s s e m b l y , w h i c h w o u l d b e t o o
much w o r k ) * /
1 : incl % g s : p d a _ i r q c o u n t
2006-06-26 13:57:35 +02:00
cmoveq % g s : p d a _ i r q s t a c k p t r ,% r s p
2006-08-02 22:37:28 +02:00
push % r b p # b a c k l i n k f o r o l d u n w i n d e r
2006-07-03 00:24:45 -07:00
/ *
* We e n t e r e d a n i n t e r r u p t c o n t e x t - i r q s a r e o f f :
* /
TRACE_ I R Q S _ O F F
2005-04-16 15:20:36 -07:00
call \ f u n c
.endm
ENTRY( c o m m o n _ i n t e r r u p t )
2005-09-12 18:49:24 +02:00
XCPT_ F R A M E
2005-04-16 15:20:36 -07:00
interrupt d o _ I R Q
/* 0(%rsp): oldrsp-ARGOFFSET */
2005-09-12 18:49:24 +02:00
ret_from_intr :
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O F F
2005-07-28 21:15:48 -07:00
decl % g s : p d a _ i r q c o u n t
2006-06-26 13:57:35 +02:00
leaveq
2005-09-12 18:49:24 +02:00
CFI_ D E F _ C F A _ R E G I S T E R r s p
2006-06-26 13:57:35 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T - 8
2005-09-12 18:49:24 +02:00
exit_intr :
2005-04-16 15:20:36 -07:00
GET_ T H R E A D _ I N F O ( % r c x )
testl $ 3 ,C S - A R G O F F S E T ( % r s p )
je r e t i n t _ k e r n e l
/* Interrupt came from user space */
/ *
* Has a c o r r e c t t o p o f s t a c k , b u t a p a r t i a l s t a c k f r a m e
* % rcx : thread i n f o . I n t e r r u p t s o f f .
* /
retint_with_reschedule :
movl $ _ T I F _ W O R K _ M A S K ,% e d i
2005-09-12 18:49:24 +02:00
retint_check :
2007-10-11 22:11:12 +02:00
LOCKDEP_ S Y S _ E X I T _ I R Q
2005-04-16 15:20:36 -07:00
movl t h r e a d i n f o _ f l a g s ( % r c x ) ,% e d x
andl % e d i ,% e d x
2005-09-12 18:49:24 +02:00
CFI_ R E M E M B E R _ S T A T E
2005-04-16 15:20:36 -07:00
jnz r e t i n t _ c a r e f u l
2007-10-11 22:11:12 +02:00
retint_swapgs : /* return to user-space */
2006-07-03 00:24:45 -07:00
/ *
* The i r e t q c o u l d r e - e n a b l e i n t e r r u p t s :
* /
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ A N Y )
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ I R E T Q
2008-01-30 13:32:08 +01:00
SWAPGS
2006-07-03 00:24:45 -07:00
jmp r e s t o r e _ a r g s
2007-10-11 22:11:12 +02:00
retint_restore_args : /* return to kernel space */
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ A N Y )
2006-07-03 00:24:45 -07:00
/ *
* The i r e t q c o u l d r e - e n a b l e i n t e r r u p t s :
* /
TRACE_ I R Q S _ I R E T Q
restore_args :
2008-02-09 23:24:08 +01:00
RESTORE_ A R G S 0 ,8 ,0
2008-02-13 23:29:53 +02:00
irq_return :
2008-01-30 13:32:08 +01:00
INTERRUPT_ R E T U R N
2008-02-09 23:24:08 +01:00
.section _ _ ex_ t a b l e , " a "
.quad irq_ r e t u r n , b a d _ i r e t
.previous
# ifdef C O N F I G _ P A R A V I R T
2008-01-30 13:32:08 +01:00
ENTRY( n a t i v e _ i r e t )
2005-04-16 15:20:36 -07:00
iretq
.section _ _ ex_ t a b l e ," a "
2008-01-30 13:32:08 +01:00
.quad native_ i r e t , b a d _ i r e t
2005-04-16 15:20:36 -07:00
.previous
2008-02-09 23:24:08 +01:00
# endif
2005-04-16 15:20:36 -07:00
.section .fixup , " ax"
bad_iret :
2008-02-06 22:39:43 +01:00
/ *
* The i r e t t r a p s w h e n t h e % c s o r % s s b e i n g r e s t o r e d i s b o g u s .
* We' v e l o s t t h e o r i g i n a l t r a p v e c t o r a n d e r r o r c o d e .
* # GPF i s t h e m o s t l i k e l y o n e t o g e t f o r a n i n v a l i d s e l e c t o r .
* So p r e t e n d w e c o m p l e t e d t h e i r e t a n d t o o k t h e #G P F i n u s e r m o d e .
*
* We a r e n o w r u n n i n g w i t h t h e k e r n e l G S a f t e r e x c e p t i o n r e c o v e r y .
* But e r r o r _ e n t r y e x p e c t s u s t o h a v e u s e r G S t o m a t c h t h e u s e r % c s ,
* so s w a p b a c k .
* /
pushq $ 0
SWAPGS
jmp g e n e r a l _ p r o t e c t i o n
2008-01-30 13:32:08 +01:00
.previous
2005-09-12 18:49:24 +02:00
/* edi: workmask, edx: work */
2005-04-16 15:20:36 -07:00
retint_careful :
2005-09-12 18:49:24 +02:00
CFI_ R E S T O R E _ S T A T E
2005-04-16 15:20:36 -07:00
bt $ T I F _ N E E D _ R E S C H E D ,% e d x
jnc r e t i n t _ s i g n a l
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O N
2008-01-30 13:32:08 +01:00
ENABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2005-04-16 15:20:36 -07:00
pushq % r d i
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-04-16 15:20:36 -07:00
call s c h e d u l e
popq % r d i
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T - 8
2005-04-16 15:20:36 -07:00
GET_ T H R E A D _ I N F O ( % r c x )
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O F F
2005-04-16 15:20:36 -07:00
jmp r e t i n t _ c h e c k
retint_signal :
2008-01-25 21:08:29 +01:00
testl $ _ T I F _ D O _ N O T I F Y _ M A S K ,% e d x
2005-05-16 21:53:19 -07:00
jz r e t i n t _ s w a p g s
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O N
2008-01-30 13:32:08 +01:00
ENABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2005-04-16 15:20:36 -07:00
SAVE_ R E S T
movq $ - 1 ,O R I G _ R A X ( % r s p )
2005-07-28 21:15:48 -07:00
xorl % e s i ,% e s i # o l d s e t
2005-04-16 15:20:36 -07:00
movq % r s p ,% r d i # & p t _ r e g s
call d o _ n o t i f y _ r e s u m e
RESTORE_ R E S T
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O F F
2005-05-16 21:53:19 -07:00
movl $ _ T I F _ N E E D _ R E S C H E D ,% e d i
2005-05-01 08:58:51 -07:00
GET_ T H R E A D _ I N F O ( % r c x )
2005-04-16 15:20:36 -07:00
jmp r e t i n t _ c h e c k
# ifdef C O N F I G _ P R E E M P T
/* Returning to kernel space. Check if we need preemption */
/* rcx: threadinfo. interrupts off. */
2006-09-26 10:52:29 +02:00
ENTRY( r e t i n t _ k e r n e l )
2005-04-16 15:20:36 -07:00
cmpl $ 0 ,t h r e a d i n f o _ p r e e m p t _ c o u n t ( % r c x )
jnz r e t i n t _ r e s t o r e _ a r g s
bt $ T I F _ N E E D _ R E S C H E D ,t h r e a d i n f o _ f l a g s ( % r c x )
jnc r e t i n t _ r e s t o r e _ a r g s
bt $ 9 ,E F L A G S - A R G O F F S E T ( % r s p ) / * i n t e r r u p t s o f f ? * /
jnc r e t i n t _ r e s t o r e _ a r g s
call p r e e m p t _ s c h e d u l e _ i r q
jmp e x i t _ i n t r
# endif
2006-06-26 13:56:55 +02:00
2005-04-16 15:20:36 -07:00
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
END( c o m m o n _ i n t e r r u p t )
2005-04-16 15:20:36 -07:00
/ *
* APIC i n t e r r u p t s .
* /
.macro apicinterrupt num,f u n c
2005-09-12 18:49:24 +02:00
INTR_ F R A M E
2006-06-27 02:53:44 -07:00
pushq $ ~ ( \ n u m )
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-04-16 15:20:36 -07:00
interrupt \ f u n c
jmp r e t _ f r o m _ i n t r
CFI_ E N D P R O C
.endm
ENTRY( t h e r m a l _ i n t e r r u p t )
apicinterrupt T H E R M A L _ A P I C _ V E C T O R ,s m p _ t h e r m a l _ i n t e r r u p t
2006-06-26 13:56:55 +02:00
END( t h e r m a l _ i n t e r r u p t )
2005-04-16 15:20:36 -07:00
2005-11-05 17:25:53 +01:00
ENTRY( t h r e s h o l d _ i n t e r r u p t )
apicinterrupt T H R E S H O L D _ A P I C _ V E C T O R ,m c e _ t h r e s h o l d _ i n t e r r u p t
2006-06-26 13:56:55 +02:00
END( t h r e s h o l d _ i n t e r r u p t )
2005-11-05 17:25:53 +01:00
2005-04-16 15:20:36 -07:00
# ifdef C O N F I G _ S M P
ENTRY( r e s c h e d u l e _ i n t e r r u p t )
apicinterrupt R E S C H E D U L E _ V E C T O R ,s m p _ r e s c h e d u l e _ i n t e r r u p t
2006-06-26 13:56:55 +02:00
END( r e s c h e d u l e _ i n t e r r u p t )
2005-04-16 15:20:36 -07:00
2005-09-12 18:49:24 +02:00
.macro INVALIDATE_ENTRY num
ENTRY( i n v a l i d a t e _ i n t e r r u p t \ n u m )
apicinterrupt I N V A L I D A T E _ T L B _ V E C T O R _ S T A R T + \ n u m ,s m p _ i n v a l i d a t e _ i n t e r r u p t
2006-06-26 13:56:55 +02:00
END( i n v a l i d a t e _ i n t e r r u p t \ n u m )
2005-09-12 18:49:24 +02:00
.endm
INVALIDATE_ E N T R Y 0
INVALIDATE_ E N T R Y 1
INVALIDATE_ E N T R Y 2
INVALIDATE_ E N T R Y 3
INVALIDATE_ E N T R Y 4
INVALIDATE_ E N T R Y 5
INVALIDATE_ E N T R Y 6
INVALIDATE_ E N T R Y 7
2005-04-16 15:20:36 -07:00
ENTRY( c a l l _ f u n c t i o n _ i n t e r r u p t )
apicinterrupt C A L L _ F U N C T I O N _ V E C T O R ,s m p _ c a l l _ f u n c t i o n _ i n t e r r u p t
2006-06-26 13:56:55 +02:00
END( c a l l _ f u n c t i o n _ i n t e r r u p t )
2007-02-23 04:40:58 -07:00
ENTRY( i r q _ m o v e _ c l e a n u p _ i n t e r r u p t )
apicinterrupt I R Q _ M O V E _ C L E A N U P _ V E C T O R ,s m p _ i r q _ m o v e _ c l e a n u p _ i n t e r r u p t
END( i r q _ m o v e _ c l e a n u p _ i n t e r r u p t )
2005-04-16 15:20:36 -07:00
# endif
ENTRY( a p i c _ t i m e r _ i n t e r r u p t )
apicinterrupt L O C A L _ T I M E R _ V E C T O R ,s m p _ a p i c _ t i m e r _ i n t e r r u p t
2006-06-26 13:56:55 +02:00
END( a p i c _ t i m e r _ i n t e r r u p t )
2005-04-16 15:20:36 -07:00
ENTRY( e r r o r _ i n t e r r u p t )
apicinterrupt E R R O R _ A P I C _ V E C T O R ,s m p _ e r r o r _ i n t e r r u p t
2006-06-26 13:56:55 +02:00
END( e r r o r _ i n t e r r u p t )
2005-04-16 15:20:36 -07:00
ENTRY( s p u r i o u s _ i n t e r r u p t )
apicinterrupt S P U R I O U S _ A P I C _ V E C T O R ,s m p _ s p u r i o u s _ i n t e r r u p t
2006-06-26 13:56:55 +02:00
END( s p u r i o u s _ i n t e r r u p t )
2005-04-16 15:20:36 -07:00
/ *
* Exception e n t r y p o i n t s .
* /
.macro zeroentry sym
2005-09-12 18:49:24 +02:00
INTR_ F R A M E
2005-04-16 15:20:36 -07:00
pushq $ 0 / * p u s h e r r o r c o d e / o l d r a x * /
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
2005-04-16 15:20:36 -07:00
pushq % r a x / * p u s h r e a l o l d r a x t o t h e r d i s l o t * /
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
2007-05-02 19:27:05 +02:00
CFI_ R E L _ O F F S E T r a x ,0
2005-04-16 15:20:36 -07:00
leaq \ s y m ( % r i p ) ,% r a x
jmp e r r o r _ e n t r y
2005-09-12 18:49:24 +02:00
CFI_ E N D P R O C
2005-04-16 15:20:36 -07:00
.endm
.macro errorentry sym
2005-09-12 18:49:24 +02:00
XCPT_ F R A M E
2005-04-16 15:20:36 -07:00
pushq % r a x
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
2007-05-02 19:27:05 +02:00
CFI_ R E L _ O F F S E T r a x ,0
2005-04-16 15:20:36 -07:00
leaq \ s y m ( % r i p ) ,% r a x
jmp e r r o r _ e n t r y
2005-09-12 18:49:24 +02:00
CFI_ E N D P R O C
2005-04-16 15:20:36 -07:00
.endm
/* error code is on the stack already */
/* handle NMI like exceptions that can happen everywhere */
2006-07-03 00:24:45 -07:00
.macro paranoidentry sym, i s t =0 , i r q t r a c e =1
2005-04-16 15:20:36 -07:00
SAVE_ A L L
cld
movl $ 1 ,% e b x
movl $ M S R _ G S _ B A S E ,% e c x
rdmsr
testl % e d x ,% e d x
js 1 f
2008-01-30 13:32:08 +01:00
SWAPGS
2005-04-16 15:20:36 -07:00
xorl % e b x ,% e b x
2006-01-11 22:43:00 +01:00
1 :
.if \ ist
movq % g s : p d a _ d a t a _ o f f s e t , % r b p
.endif
movq % r s p ,% r d i
2005-04-16 15:20:36 -07:00
movq O R I G _ R A X ( % r s p ) ,% r s i
movq $ - 1 ,O R I G _ R A X ( % r s p )
2006-01-11 22:43:00 +01:00
.if \ ist
2006-01-16 01:56:39 +01:00
subq $ E X C E P T I O N _ S T K S Z , p e r _ c p u _ _ i n i t _ t s s + T S S _ i s t + ( \ i s t - 1 ) * 8 ( % r b p )
2006-01-11 22:43:00 +01:00
.endif
2005-04-16 15:20:36 -07:00
call \ s y m
2006-01-11 22:43:00 +01:00
.if \ ist
2006-01-16 01:56:39 +01:00
addq $ E X C E P T I O N _ S T K S Z , p e r _ c p u _ _ i n i t _ t s s + T S S _ i s t + ( \ i s t - 1 ) * 8 ( % r b p )
2006-01-11 22:43:00 +01:00
.endif
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
.if \ irqtrace
TRACE_ I R Q S _ O F F
.endif
2005-04-16 15:20:36 -07:00
.endm
2006-07-03 00:24:45 -07:00
/ *
* " Paranoid" e x i t p a t h f r o m e x c e p t i o n s t a c k .
* Paranoid b e c a u s e t h i s i s u s e d b y N M I s a n d c a n n o t t a k e
* any k e r n e l s t a t e f o r g r a n t e d .
* We d o n ' t d o k e r n e l p r e e m p t i o n c h e c k s h e r e , b e c a u s e o n l y
* NMI s h o u l d b e c o m m o n a n d i t d o e s n o t e n a b l e I R Q s a n d
* cannot g e t r e s c h e d u l e t i c k s .
*
* " trace" i s 0 f o r t h e N M I h a n d l e r o n l y , b e c a u s e i r q - t r a c i n g
* is f u n d a m e n t a l l y N M I - u n s a f e . ( w e c a n n o t c h a n g e t h e s o f t a n d
* hard f l a g s a t o n c e , a t o m i c a l l y )
* /
.macro paranoidexit trace=1
/* ebx: no swapgs flag */
paranoid_ e x i t \ t r a c e :
testl % e b x ,% e b x / * s w a p g s n e e d e d ? * /
jnz p a r a n o i d _ r e s t o r e \ t r a c e
testl $ 3 ,C S ( % r s p )
jnz p a r a n o i d _ u s e r s p a c e \ t r a c e
paranoid_ s w a p g s \ t r a c e :
2006-09-26 10:52:37 +02:00
.if \ trace
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ I R E T Q 0
2006-09-26 10:52:37 +02:00
.endif
2008-01-30 13:32:08 +01:00
SWAPGS_ U N S A F E _ S T A C K
2006-07-03 00:24:45 -07:00
paranoid_ r e s t o r e \ t r a c e :
RESTORE_ A L L 8
2008-02-09 23:24:08 +01:00
jmp i r q _ r e t u r n
2006-07-03 00:24:45 -07:00
paranoid_ u s e r s p a c e \ t r a c e :
GET_ T H R E A D _ I N F O ( % r c x )
movl t h r e a d i n f o _ f l a g s ( % r c x ) ,% e b x
andl $ _ T I F _ W O R K _ M A S K ,% e b x
jz p a r a n o i d _ s w a p g s \ t r a c e
movq % r s p ,% r d i / * & p t _ r e g s * /
call s y n c _ r e g s
movq % r a x ,% r s p / * s w i t c h s t a c k f o r s c h e d u l i n g * /
testl $ _ T I F _ N E E D _ R E S C H E D ,% e b x
jnz p a r a n o i d _ s c h e d u l e \ t r a c e
movl % e b x ,% e d x / * a r g 3 : t h r e a d f l a g s * /
.if \ trace
TRACE_ I R Q S _ O N
.endif
2008-01-30 13:32:08 +01:00
ENABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
xorl % e s i ,% e s i / * a r g 2 : o l d s e t * /
movq % r s p ,% r d i / * a r g 1 : & p t _ r e g s * /
call d o _ n o t i f y _ r e s u m e
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
.if \ trace
TRACE_ I R Q S _ O F F
.endif
jmp p a r a n o i d _ u s e r s p a c e \ t r a c e
paranoid_ s c h e d u l e \ t r a c e :
.if \ trace
TRACE_ I R Q S _ O N
.endif
2008-01-30 13:32:08 +01:00
ENABLE_ I N T E R R U P T S ( C L B R _ A N Y )
2006-07-03 00:24:45 -07:00
call s c h e d u l e
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ A N Y )
2006-07-03 00:24:45 -07:00
.if \ trace
TRACE_ I R Q S _ O F F
.endif
jmp p a r a n o i d _ u s e r s p a c e \ t r a c e
CFI_ E N D P R O C
.endm
2005-04-16 15:20:36 -07:00
/ *
* Exception e n t r y p o i n t . T h i s e x p e c t s a n e r r o r c o d e / o r i g _ r a x o n t h e s t a c k
* and t h e e x c e p t i o n h a n d l e r i n % r a x .
* /
[PATCH] x86: error_code is not safe for kprobes
This patch moves the entry.S:error_entry to .kprobes.text section,
since code marked unsafe for kprobes jumps directly to entry.S::error_entry,
that must be marked unsafe as well.
This patch also moves all the ".previous.text" asm directives to ".previous"
for kprobes section.
AK: Following a similar i386 patch from Chuck Ebbert
AK: Also merged Jeremy's fix in.
+From: Jeremy Fitzhardinge <jeremy@goop.org>
KPROBE_ENTRY does a .section .kprobes.text, and expects its users to
do a .previous at the end of the function.
Unfortunately, if any code within the function switches sections, for
example .fixup, then the .previous ends up putting all subsequent code
into .fixup. Worse, any subsequent .fixup code gets intermingled with
the code its supposed to be fixing (which is also in .fixup). It's
surprising this didn't cause more havok.
The fix is to use .pushsection/.popsection, so this stuff nests
properly. A further cleanup would be to get rid of all
.section/.previous pairs, since they're inherently fragile.
+From: Chuck Ebbert <76306.1226@compuserve.com>
Because code marked unsafe for kprobes jumps directly to
entry.S::error_code, that must be marked unsafe as well.
The easiest way to do that is to move the page fault entry
point to just before error_code and let it inherit the same
section.
Also moved all the ".previous" asm directives for kprobes
sections to column 1 and removed ".text" from them.
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-09-26 10:52:34 +02:00
KPROBE_ E N T R Y ( e r r o r _ e n t r y )
2005-09-12 18:49:24 +02:00
_ frame R D I
2007-05-02 19:27:05 +02:00
CFI_ R E L _ O F F S E T r a x ,0
2005-04-16 15:20:36 -07:00
/* rdi slot contains rax, oldrax contains error code */
cld
subq $ 1 4 * 8 ,% r s p
CFI_ A D J U S T _ C F A _ O F F S E T ( 1 4 * 8 )
movq % r s i ,1 3 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r s i ,R S I
movq 1 4 * 8 ( % r s p ) ,% r s i / * l o a d r a x f r o m r d i s l o t * /
2007-05-02 19:27:05 +02:00
CFI_ R E G I S T E R r a x ,r s i
2005-04-16 15:20:36 -07:00
movq % r d x ,1 2 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r d x ,R D X
movq % r c x ,1 1 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r c x ,R C X
movq % r s i ,1 0 * 8 ( % r s p ) / * s t o r e r a x * /
CFI_ R E L _ O F F S E T r a x ,R A X
movq % r8 , 9 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r8 ,R 8
movq % r9 , 8 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r9 ,R 9
movq % r10 ,7 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r10 ,R 1 0
movq % r11 ,6 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r11 ,R 1 1
movq % r b x ,5 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r b x ,R B X
movq % r b p ,4 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r b p ,R B P
movq % r12 ,3 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r12 ,R 1 2
movq % r13 ,2 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r13 ,R 1 3
movq % r14 ,1 * 8 ( % r s p )
CFI_ R E L _ O F F S E T r14 ,R 1 4
movq % r15 ,( % r s p )
CFI_ R E L _ O F F S E T r15 ,R 1 5
xorl % e b x ,% e b x
testl $ 3 ,C S ( % r s p )
je e r r o r _ k e r n e l s p a c e
error_swapgs :
2008-01-30 13:32:08 +01:00
SWAPGS
2005-04-16 15:20:36 -07:00
error_sti :
movq % r d i ,R D I ( % r s p )
2007-05-02 19:27:05 +02:00
CFI_ R E L _ O F F S E T r d i ,R D I
2005-04-16 15:20:36 -07:00
movq % r s p ,% r d i
movq O R I G _ R A X ( % r s p ) ,% r s i / * g e t e r r o r c o d e * /
movq $ - 1 ,O R I G _ R A X ( % r s p )
call * % r a x
2007-10-11 22:11:12 +02:00
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
error_exit :
movl % e b x ,% e a x
2005-04-16 15:20:36 -07:00
RESTORE_ R E S T
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ N O N E )
2006-07-03 00:24:45 -07:00
TRACE_ I R Q S _ O F F
2005-04-16 15:20:36 -07:00
GET_ T H R E A D _ I N F O ( % r c x )
testl % e a x ,% e a x
jne r e t i n t _ k e r n e l
2007-10-11 22:11:12 +02:00
LOCKDEP_ S Y S _ E X I T _ I R Q
2005-04-16 15:20:36 -07:00
movl t h r e a d i n f o _ f l a g s ( % r c x ) ,% e d x
movl $ _ T I F _ W O R K _ M A S K ,% e d i
andl % e d i ,% e d x
jnz r e t i n t _ c a r e f u l
2007-10-11 22:11:12 +02:00
jmp r e t i n t _ s w a p g s
2005-04-16 15:20:36 -07:00
CFI_ E N D P R O C
error_kernelspace :
incl % e b x
/ * There a r e t w o p l a c e s i n t h e k e r n e l t h a t c a n p o t e n t i a l l y f a u l t w i t h
usergs. H a n d l e t h e m h e r e . T h e e x c e p t i o n h a n d l e r s a f t e r
iret r u n w i t h k e r n e l g s a g a i n , s o d o n ' t s e t t h e u s e r s p a c e f l a g .
B s t e p p i n g K 8 s s o m e t i m e s r e p o r t a n t r u n c a t e d R I P f o r I R E T
exceptions r e t u r n i n g t o c o m p a t m o d e . C h e c k f o r t h e s e h e r e t o o . * /
2008-02-09 23:24:08 +01:00
leaq i r q _ r e t u r n ( % r i p ) ,% r b p
2005-04-16 15:20:36 -07:00
cmpq % r b p ,R I P ( % r s p )
je e r r o r _ s w a p g s
movl % e b p ,% e b p / * z e r o e x t e n d * /
cmpq % r b p ,R I P ( % r s p )
je e r r o r _ s w a p g s
cmpq $ g s _ c h a n g e ,R I P ( % r s p )
je e r r o r _ s w a p g s
jmp e r r o r _ s t i
[PATCH] x86: error_code is not safe for kprobes
This patch moves the entry.S:error_entry to .kprobes.text section,
since code marked unsafe for kprobes jumps directly to entry.S::error_entry,
that must be marked unsafe as well.
This patch also moves all the ".previous.text" asm directives to ".previous"
for kprobes section.
AK: Following a similar i386 patch from Chuck Ebbert
AK: Also merged Jeremy's fix in.
+From: Jeremy Fitzhardinge <jeremy@goop.org>
KPROBE_ENTRY does a .section .kprobes.text, and expects its users to
do a .previous at the end of the function.
Unfortunately, if any code within the function switches sections, for
example .fixup, then the .previous ends up putting all subsequent code
into .fixup. Worse, any subsequent .fixup code gets intermingled with
the code its supposed to be fixing (which is also in .fixup). It's
surprising this didn't cause more havok.
The fix is to use .pushsection/.popsection, so this stuff nests
properly. A further cleanup would be to get rid of all
.section/.previous pairs, since they're inherently fragile.
+From: Chuck Ebbert <76306.1226@compuserve.com>
Because code marked unsafe for kprobes jumps directly to
entry.S::error_code, that must be marked unsafe as well.
The easiest way to do that is to move the page fault entry
point to just before error_code and let it inherit the same
section.
Also moved all the ".previous" asm directives for kprobes
sections to column 1 and removed ".text" from them.
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-09-26 10:52:34 +02:00
KPROBE_ E N D ( e r r o r _ e n t r y )
2005-04-16 15:20:36 -07:00
/* Reload gs selector with exception handling */
/* edi: new selector */
ENTRY( l o a d _ g s _ i n d e x )
2005-09-12 18:49:24 +02:00
CFI_ S T A R T P R O C
2005-04-16 15:20:36 -07:00
pushf
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
2008-01-30 13:32:08 +01:00
DISABLE_ I N T E R R U P T S ( C L B R _ A N Y | ~ ( C L B R _ R D I ) )
SWAPGS
2005-04-16 15:20:36 -07:00
gs_change :
movl % e d i ,% g s
2 : mfence / * w o r k a r o u n d * /
2008-01-30 13:32:08 +01:00
SWAPGS
2005-04-16 15:20:36 -07:00
popf
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T - 8
2005-04-16 15:20:36 -07:00
ret
2005-09-12 18:49:24 +02:00
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
ENDPROC( l o a d _ g s _ i n d e x )
2005-04-16 15:20:36 -07:00
.section _ _ ex_ t a b l e ," a "
.align 8
.quad gs_ c h a n g e ,b a d _ g s
.previous
.section .fixup , " ax"
/* running with kernelgs */
bad_gs :
2008-01-30 13:32:08 +01:00
SWAPGS / * s w i t c h b a c k t o u s e r g s * /
2005-04-16 15:20:36 -07:00
xorl % e a x ,% e a x
movl % e a x ,% g s
jmp 2 b
.previous
/ *
* Create a k e r n e l t h r e a d .
*
* C e x t e r n i n t e r f a c e :
* extern l o n g k e r n e l _ t h r e a d ( i n t ( * f n ) ( v o i d * ) , v o i d * a r g , u n s i g n e d l o n g f l a g s )
*
* asm i n p u t a r g u m e n t s :
* rdi : fn, r s i : a r g , r d x : f l a g s
* /
ENTRY( k e r n e l _ t h r e a d )
CFI_ S T A R T P R O C
FAKE_ S T A C K _ F R A M E $ c h i l d _ r i p
SAVE_ A L L
# rdi : flags, r s i : u s p , r d x : w i l l b e & p t _ r e g s
movq % r d x ,% r d i
orq k e r n e l _ t h r e a d _ f l a g s ( % r i p ) ,% r d i
movq $ - 1 , % r s i
movq % r s p , % r d x
xorl % r8 d ,% r8 d
xorl % r9 d ,% r9 d
# clone n o w
call d o _ f o r k
movq % r a x ,R A X ( % r s p )
xorl % e d i ,% e d i
/ *
* It i s n ' t w o r t h t o c h e c k f o r r e s c h e d u l e h e r e ,
* so i n t e r n a l l y t o t h e x86 _ 6 4 p o r t y o u c a n r e l y o n k e r n e l _ t h r e a d ( )
* not t o r e s c h e d u l e t h e c h i l d b e f o r e r e t u r n i n g , t h i s a v o i d s t h e n e e d
* of h a c k s f o r e x a m p l e t o f o r k o f f t h e p e r - C P U i d l e t a s k s .
* [ Hopefully n o g e n e r i c c o d e r e l i e s o n t h e r e s c h e d u l e - A K ]
* /
RESTORE_ A L L
UNFAKE_ S T A C K _ F R A M E
ret
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
ENDPROC( k e r n e l _ t h r e a d )
2005-04-16 15:20:36 -07:00
child_rip :
2006-08-30 19:37:08 +02:00
pushq $ 0 # f a k e r e t u r n a d d r e s s
CFI_ S T A R T P R O C
2005-04-16 15:20:36 -07:00
/ *
* Here w e a r e i n t h e c h i l d a n d t h e r e g i s t e r s a r e s e t a s t h e y w e r e
* at k e r n e l _ t h r e a d ( ) i n v o c a t i o n i n t h e p a r e n t .
* /
movq % r d i , % r a x
movq % r s i , % r d i
call * % r a x
# exit
2007-10-17 18:04:33 +02:00
mov % e a x , % e d i
2005-04-16 15:20:36 -07:00
call d o _ e x i t
2006-08-30 19:37:08 +02:00
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
ENDPROC( c h i l d _ r i p )
2005-04-16 15:20:36 -07:00
/ *
* execve( ) . T h i s f u n c t i o n n e e d s t o u s e I R E T , n o t S Y S R E T , t o s e t u p a l l s t a t e p r o p e r l y .
*
* C e x t e r n i n t e r f a c e :
* extern l o n g e x e c v e ( c h a r * n a m e , c h a r * * a r g v , c h a r * * e n v p )
*
* asm i n p u t a r g u m e n t s :
* rdi : name, r s i : a r g v , r d x : e n v p
*
* We w a n t t o f a l l b a c k i n t o :
2008-02-26 12:55:57 +01:00
* extern l o n g s y s _ e x e c v e ( c h a r * n a m e , c h a r * * a r g v ,c h a r * * e n v p , s t r u c t p t _ r e g s * r e g s )
2005-04-16 15:20:36 -07:00
*
* do_ s y s _ e x e c v e a s m f a l l b a c k a r g u m e n t s :
2008-02-26 12:55:57 +01:00
* rdi : name, r s i : a r g v , r d x : e n v p , r c x : f a k e f r a m e o n t h e s t a c k
2005-04-16 15:20:36 -07:00
* /
2006-10-02 02:18:31 -07:00
ENTRY( k e r n e l _ e x e c v e )
2005-04-16 15:20:36 -07:00
CFI_ S T A R T P R O C
FAKE_ S T A C K _ F R A M E $ 0
SAVE_ A L L
2008-02-26 12:55:57 +01:00
movq % r s p ,% r c x
2005-04-16 15:20:36 -07:00
call s y s _ e x e c v e
movq % r a x , R A X ( % r s p )
RESTORE_ R E S T
testq % r a x ,% r a x
je i n t _ r e t _ f r o m _ s y s _ c a l l
RESTORE_ A R G S
UNFAKE_ S T A C K _ F R A M E
ret
CFI_ E N D P R O C
2006-10-02 02:18:31 -07:00
ENDPROC( k e r n e l _ e x e c v e )
2005-04-16 15:20:36 -07:00
2005-09-06 15:19:28 -07:00
KPROBE_ E N T R Y ( p a g e _ f a u l t )
2005-04-16 15:20:36 -07:00
errorentry d o _ p a g e _ f a u l t
[PATCH] x86: error_code is not safe for kprobes
This patch moves the entry.S:error_entry to .kprobes.text section,
since code marked unsafe for kprobes jumps directly to entry.S::error_entry,
that must be marked unsafe as well.
This patch also moves all the ".previous.text" asm directives to ".previous"
for kprobes section.
AK: Following a similar i386 patch from Chuck Ebbert
AK: Also merged Jeremy's fix in.
+From: Jeremy Fitzhardinge <jeremy@goop.org>
KPROBE_ENTRY does a .section .kprobes.text, and expects its users to
do a .previous at the end of the function.
Unfortunately, if any code within the function switches sections, for
example .fixup, then the .previous ends up putting all subsequent code
into .fixup. Worse, any subsequent .fixup code gets intermingled with
the code its supposed to be fixing (which is also in .fixup). It's
surprising this didn't cause more havok.
The fix is to use .pushsection/.popsection, so this stuff nests
properly. A further cleanup would be to get rid of all
.section/.previous pairs, since they're inherently fragile.
+From: Chuck Ebbert <76306.1226@compuserve.com>
Because code marked unsafe for kprobes jumps directly to
entry.S::error_code, that must be marked unsafe as well.
The easiest way to do that is to move the page fault entry
point to just before error_code and let it inherit the same
section.
Also moved all the ".previous" asm directives for kprobes
sections to column 1 and removed ".text" from them.
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-09-26 10:52:34 +02:00
KPROBE_ E N D ( p a g e _ f a u l t )
2005-04-16 15:20:36 -07:00
ENTRY( c o p r o c e s s o r _ e r r o r )
zeroentry d o _ c o p r o c e s s o r _ e r r o r
2006-06-26 13:56:55 +02:00
END( c o p r o c e s s o r _ e r r o r )
2005-04-16 15:20:36 -07:00
ENTRY( s i m d _ c o p r o c e s s o r _ e r r o r )
zeroentry d o _ s i m d _ c o p r o c e s s o r _ e r r o r
2006-06-26 13:56:55 +02:00
END( s i m d _ c o p r o c e s s o r _ e r r o r )
2005-04-16 15:20:36 -07:00
ENTRY( d e v i c e _ n o t _ a v a i l a b l e )
zeroentry m a t h _ s t a t e _ r e s t o r e
2006-06-26 13:56:55 +02:00
END( d e v i c e _ n o t _ a v a i l a b l e )
2005-04-16 15:20:36 -07:00
/* runs on exception stack */
2005-09-06 15:19:28 -07:00
KPROBE_ E N T R Y ( d e b u g )
2005-09-12 18:49:24 +02:00
INTR_ F R A M E
2005-04-16 15:20:36 -07:00
pushq $ 0
CFI_ A D J U S T _ C F A _ O F F S E T 8
2006-01-16 01:56:39 +01:00
paranoidentry d o _ d e b u g , D E B U G _ S T A C K
2006-07-03 00:24:45 -07:00
paranoidexit
[PATCH] x86: error_code is not safe for kprobes
This patch moves the entry.S:error_entry to .kprobes.text section,
since code marked unsafe for kprobes jumps directly to entry.S::error_entry,
that must be marked unsafe as well.
This patch also moves all the ".previous.text" asm directives to ".previous"
for kprobes section.
AK: Following a similar i386 patch from Chuck Ebbert
AK: Also merged Jeremy's fix in.
+From: Jeremy Fitzhardinge <jeremy@goop.org>
KPROBE_ENTRY does a .section .kprobes.text, and expects its users to
do a .previous at the end of the function.
Unfortunately, if any code within the function switches sections, for
example .fixup, then the .previous ends up putting all subsequent code
into .fixup. Worse, any subsequent .fixup code gets intermingled with
the code its supposed to be fixing (which is also in .fixup). It's
surprising this didn't cause more havok.
The fix is to use .pushsection/.popsection, so this stuff nests
properly. A further cleanup would be to get rid of all
.section/.previous pairs, since they're inherently fragile.
+From: Chuck Ebbert <76306.1226@compuserve.com>
Because code marked unsafe for kprobes jumps directly to
entry.S::error_code, that must be marked unsafe as well.
The easiest way to do that is to move the page fault entry
point to just before error_code and let it inherit the same
section.
Also moved all the ".previous" asm directives for kprobes
sections to column 1 and removed ".text" from them.
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-09-26 10:52:34 +02:00
KPROBE_ E N D ( d e b u g )
2005-04-16 15:20:36 -07:00
/* runs on exception stack */
2006-02-03 21:50:41 +01:00
KPROBE_ E N T R Y ( n m i )
2005-09-12 18:49:24 +02:00
INTR_ F R A M E
2005-04-16 15:20:36 -07:00
pushq $ - 1
2005-09-12 18:49:24 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T 8
2006-07-03 00:24:45 -07:00
paranoidentry d o _ n m i , 0 , 0
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
paranoidexit 0
# else
jmp p a r a n o i d _ e x i t 1
CFI_ E N D P R O C
# endif
[PATCH] x86: error_code is not safe for kprobes
This patch moves the entry.S:error_entry to .kprobes.text section,
since code marked unsafe for kprobes jumps directly to entry.S::error_entry,
that must be marked unsafe as well.
This patch also moves all the ".previous.text" asm directives to ".previous"
for kprobes section.
AK: Following a similar i386 patch from Chuck Ebbert
AK: Also merged Jeremy's fix in.
+From: Jeremy Fitzhardinge <jeremy@goop.org>
KPROBE_ENTRY does a .section .kprobes.text, and expects its users to
do a .previous at the end of the function.
Unfortunately, if any code within the function switches sections, for
example .fixup, then the .previous ends up putting all subsequent code
into .fixup. Worse, any subsequent .fixup code gets intermingled with
the code its supposed to be fixing (which is also in .fixup). It's
surprising this didn't cause more havok.
The fix is to use .pushsection/.popsection, so this stuff nests
properly. A further cleanup would be to get rid of all
.section/.previous pairs, since they're inherently fragile.
+From: Chuck Ebbert <76306.1226@compuserve.com>
Because code marked unsafe for kprobes jumps directly to
entry.S::error_code, that must be marked unsafe as well.
The easiest way to do that is to move the page fault entry
point to just before error_code and let it inherit the same
section.
Also moved all the ".previous" asm directives for kprobes
sections to column 1 and removed ".text" from them.
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-09-26 10:52:34 +02:00
KPROBE_ E N D ( n m i )
2005-04-16 15:25:03 -07:00
2005-09-06 15:19:28 -07:00
KPROBE_ E N T R Y ( i n t 3 )
2006-01-11 22:43:00 +01:00
INTR_ F R A M E
pushq $ 0
CFI_ A D J U S T _ C F A _ O F F S E T 8
2006-01-16 01:56:39 +01:00
paranoidentry d o _ i n t 3 , D E B U G _ S T A C K
2006-07-03 00:24:45 -07:00
jmp p a r a n o i d _ e x i t 1
2006-01-11 22:43:00 +01:00
CFI_ E N D P R O C
[PATCH] x86: error_code is not safe for kprobes
This patch moves the entry.S:error_entry to .kprobes.text section,
since code marked unsafe for kprobes jumps directly to entry.S::error_entry,
that must be marked unsafe as well.
This patch also moves all the ".previous.text" asm directives to ".previous"
for kprobes section.
AK: Following a similar i386 patch from Chuck Ebbert
AK: Also merged Jeremy's fix in.
+From: Jeremy Fitzhardinge <jeremy@goop.org>
KPROBE_ENTRY does a .section .kprobes.text, and expects its users to
do a .previous at the end of the function.
Unfortunately, if any code within the function switches sections, for
example .fixup, then the .previous ends up putting all subsequent code
into .fixup. Worse, any subsequent .fixup code gets intermingled with
the code its supposed to be fixing (which is also in .fixup). It's
surprising this didn't cause more havok.
The fix is to use .pushsection/.popsection, so this stuff nests
properly. A further cleanup would be to get rid of all
.section/.previous pairs, since they're inherently fragile.
+From: Chuck Ebbert <76306.1226@compuserve.com>
Because code marked unsafe for kprobes jumps directly to
entry.S::error_code, that must be marked unsafe as well.
The easiest way to do that is to move the page fault entry
point to just before error_code and let it inherit the same
section.
Also moved all the ".previous" asm directives for kprobes
sections to column 1 and removed ".text" from them.
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-09-26 10:52:34 +02:00
KPROBE_ E N D ( i n t 3 )
2005-04-16 15:20:36 -07:00
ENTRY( o v e r f l o w )
zeroentry d o _ o v e r f l o w
2006-06-26 13:56:55 +02:00
END( o v e r f l o w )
2005-04-16 15:20:36 -07:00
ENTRY( b o u n d s )
zeroentry d o _ b o u n d s
2006-06-26 13:56:55 +02:00
END( b o u n d s )
2005-04-16 15:20:36 -07:00
ENTRY( i n v a l i d _ o p )
zeroentry d o _ i n v a l i d _ o p
2006-06-26 13:56:55 +02:00
END( i n v a l i d _ o p )
2005-04-16 15:20:36 -07:00
ENTRY( c o p r o c e s s o r _ s e g m e n t _ o v e r r u n )
zeroentry d o _ c o p r o c e s s o r _ s e g m e n t _ o v e r r u n
2006-06-26 13:56:55 +02:00
END( c o p r o c e s s o r _ s e g m e n t _ o v e r r u n )
2005-04-16 15:20:36 -07:00
ENTRY( r e s e r v e d )
zeroentry d o _ r e s e r v e d
2006-06-26 13:56:55 +02:00
END( r e s e r v e d )
2005-04-16 15:20:36 -07:00
/* runs on exception stack */
ENTRY( d o u b l e _ f a u l t )
2005-09-12 18:49:24 +02:00
XCPT_ F R A M E
2005-04-16 15:20:36 -07:00
paranoidentry d o _ d o u b l e _ f a u l t
2006-07-03 00:24:45 -07:00
jmp p a r a n o i d _ e x i t 1
2005-04-16 15:20:36 -07:00
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
END( d o u b l e _ f a u l t )
2005-04-16 15:20:36 -07:00
ENTRY( i n v a l i d _ T S S )
errorentry d o _ i n v a l i d _ T S S
2006-06-26 13:56:55 +02:00
END( i n v a l i d _ T S S )
2005-04-16 15:20:36 -07:00
ENTRY( s e g m e n t _ n o t _ p r e s e n t )
errorentry d o _ s e g m e n t _ n o t _ p r e s e n t
2006-06-26 13:56:55 +02:00
END( s e g m e n t _ n o t _ p r e s e n t )
2005-04-16 15:20:36 -07:00
/* runs on exception stack */
ENTRY( s t a c k _ s e g m e n t )
2005-09-12 18:49:24 +02:00
XCPT_ F R A M E
2005-04-16 15:20:36 -07:00
paranoidentry d o _ s t a c k _ s e g m e n t
2006-07-03 00:24:45 -07:00
jmp p a r a n o i d _ e x i t 1
2005-04-16 15:20:36 -07:00
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
END( s t a c k _ s e g m e n t )
2005-04-16 15:20:36 -07:00
2005-09-06 15:19:28 -07:00
KPROBE_ E N T R Y ( g e n e r a l _ p r o t e c t i o n )
2005-04-16 15:20:36 -07:00
errorentry d o _ g e n e r a l _ p r o t e c t i o n
[PATCH] x86: error_code is not safe for kprobes
This patch moves the entry.S:error_entry to .kprobes.text section,
since code marked unsafe for kprobes jumps directly to entry.S::error_entry,
that must be marked unsafe as well.
This patch also moves all the ".previous.text" asm directives to ".previous"
for kprobes section.
AK: Following a similar i386 patch from Chuck Ebbert
AK: Also merged Jeremy's fix in.
+From: Jeremy Fitzhardinge <jeremy@goop.org>
KPROBE_ENTRY does a .section .kprobes.text, and expects its users to
do a .previous at the end of the function.
Unfortunately, if any code within the function switches sections, for
example .fixup, then the .previous ends up putting all subsequent code
into .fixup. Worse, any subsequent .fixup code gets intermingled with
the code its supposed to be fixing (which is also in .fixup). It's
surprising this didn't cause more havok.
The fix is to use .pushsection/.popsection, so this stuff nests
properly. A further cleanup would be to get rid of all
.section/.previous pairs, since they're inherently fragile.
+From: Chuck Ebbert <76306.1226@compuserve.com>
Because code marked unsafe for kprobes jumps directly to
entry.S::error_code, that must be marked unsafe as well.
The easiest way to do that is to move the page fault entry
point to just before error_code and let it inherit the same
section.
Also moved all the ".previous" asm directives for kprobes
sections to column 1 and removed ".text" from them.
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-09-26 10:52:34 +02:00
KPROBE_ E N D ( g e n e r a l _ p r o t e c t i o n )
2005-04-16 15:20:36 -07:00
ENTRY( a l i g n m e n t _ c h e c k )
errorentry d o _ a l i g n m e n t _ c h e c k
2006-06-26 13:56:55 +02:00
END( a l i g n m e n t _ c h e c k )
2005-04-16 15:20:36 -07:00
ENTRY( d i v i d e _ e r r o r )
zeroentry d o _ d i v i d e _ e r r o r
2006-06-26 13:56:55 +02:00
END( d i v i d e _ e r r o r )
2005-04-16 15:20:36 -07:00
ENTRY( s p u r i o u s _ i n t e r r u p t _ b u g )
zeroentry d o _ s p u r i o u s _ i n t e r r u p t _ b u g
2006-06-26 13:56:55 +02:00
END( s p u r i o u s _ i n t e r r u p t _ b u g )
2005-04-16 15:20:36 -07:00
# ifdef C O N F I G _ X 8 6 _ M C E
/* runs on exception stack */
ENTRY( m a c h i n e _ c h e c k )
2005-09-12 18:49:24 +02:00
INTR_ F R A M E
2005-04-16 15:20:36 -07:00
pushq $ 0
CFI_ A D J U S T _ C F A _ O F F S E T 8
paranoidentry d o _ m a c h i n e _ c h e c k
2006-07-03 00:24:45 -07:00
jmp p a r a n o i d _ e x i t 1
2005-04-16 15:20:36 -07:00
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
END( m a c h i n e _ c h e c k )
2005-04-16 15:20:36 -07:00
# endif
2006-08-02 22:37:28 +02:00
/* Call softirq on interrupt stack. Interrupts are off. */
2005-07-28 21:15:49 -07:00
ENTRY( c a l l _ s o f t i r q )
2005-09-12 18:49:24 +02:00
CFI_ S T A R T P R O C
2006-08-02 22:37:28 +02:00
push % r b p
CFI_ A D J U S T _ C F A _ O F F S E T 8
CFI_ R E L _ O F F S E T r b p ,0
mov % r s p ,% r b p
CFI_ D E F _ C F A _ R E G I S T E R r b p
2005-07-28 21:15:49 -07:00
incl % g s : p d a _ i r q c o u n t
2006-08-02 22:37:28 +02:00
cmove % g s : p d a _ i r q s t a c k p t r ,% r s p
push % r b p # b a c k l i n k f o r o l d u n w i n d e r
2005-07-28 21:15:49 -07:00
call _ _ d o _ s o f t i r q
2006-08-02 22:37:28 +02:00
leaveq
2005-09-12 18:49:24 +02:00
CFI_ D E F _ C F A _ R E G I S T E R r s p
2006-08-02 22:37:28 +02:00
CFI_ A D J U S T _ C F A _ O F F S E T - 8
2005-07-28 21:15:49 -07:00
decl % g s : p d a _ i r q c o u n t
ret
2005-09-12 18:49:24 +02:00
CFI_ E N D P R O C
2006-06-26 13:56:55 +02:00
ENDPROC( c a l l _ s o f t i r q )
2007-06-23 02:29:25 +02:00
KPROBE_ E N T R Y ( i g n o r e _ s y s r e t )
CFI_ S T A R T P R O C
mov $ - E N O S Y S ,% e a x
sysret
CFI_ E N D P R O C
ENDPROC( i g n o r e _ s y s r e t )