2014-05-08 15:21:52 -04:00
/ *
* linux/ a r c h / x86 _ 6 4 / m c o u n t _ 6 4 . S
*
* Copyright ( C ) 2 0 1 4 S t e v e n R o s t e d t , R e d H a t I n c
* /
# include < l i n u x / l i n k a g e . h >
# include < a s m / p t r a c e . h >
# include < a s m / f t r a c e . h >
.code64
.section .entry .text , " ax"
# ifdef C O N F I G _ F U N C T I O N _ T R A C E R
# ifdef C C _ U S I N G _ F E N T R Y
# define f u n c t i o n _ h o o k _ _ f e n t r y _ _
# else
# define f u n c t i o n _ h o o k m c o u n t
# endif
2014-11-24 11:43:39 -05:00
/ *
* gcc - p g o p t i o n a d d s a c a l l t o ' m c o u n t ' i n m o s t f u n c t i o n s .
* When - m f e n t r y i s u s e d , t h e c a l l i s t o ' f e n t r y ' a n d n o t ' m c o u n t '
* and i s d o n e b e f o r e t h e f u n c t i o n ' s s t a c k f r a m e i s s e t u p .
* They b o t h r e q u i r e a s e t o f r e g s t o b e s a v e d b e f o r e c a l l i n g
* any C c o d e a n d r e s t o r e d b e f o r e r e t u r n i n g b a c k t o t h e f u n c t i o n .
*
* On b o o t u p , a l l t h e s e c a l l s a r e c o n v e r t e d i n t o n o p s . W h e n t r a c i n g
* is e n a b l e d , t h e c a l l c a n j u m p t o e i t h e r f t r a c e _ c a l l e r o r
* ftrace_ r e g s _ c a l l e r . C a l l b a c k s ( t r a c i n g f u n c t i o n s ) t h a t r e q u i r e
* ftrace_ r e g s _ c a l l e r ( l i k e k p r o b e s ) n e e d t o h a v e p t _ r e g s p a s s e d t o
* it. F o r t h i s r e a s o n , t h e s i z e o f t h e p t _ r e g s s t r u c t u r e w i l l b e
* allocated o n t h e s t a c k a n d t h e r e q u i r e d m c o u n t r e g i s t e r s w i l l
* be s a v e d i n t h e l o c a t i o n s t h a t p t _ r e g s h a s t h e m i n .
* /
2014-11-24 11:30:58 -05:00
/* skip is set if the stack was already partially adjusted */
2014-11-24 11:43:39 -05:00
.macro save_mcount_regs skip=0
2014-11-24 11:30:58 -05:00
/ *
* We a d d e n o u g h s t a c k t o s a v e a l l r e g s .
* /
subq $ ( S S + 8 - \ s k i p ) , % r s p
movq % r a x , R A X ( % r s p )
movq % r c x , R C X ( % r s p )
movq % r d x , R D X ( % r s p )
movq % r s i , R S I ( % r s p )
movq % r d i , R D I ( % r s p )
movq % r8 , R 8 ( % r s p )
movq % r9 , R 9 ( % r s p )
/* Move RIP to its proper location */
2014-11-24 13:21:09 -05:00
movq S S + 8 ( % r s p ) , % r d i
movq % r d i , R I P ( % r s p )
2014-11-24 11:30:58 -05:00
.endm
2014-11-24 11:43:39 -05:00
.macro restore_mcount_regs skip=0
2014-11-24 11:30:58 -05:00
movq R 9 ( % r s p ) , % r9
movq R 8 ( % r s p ) , % r8
movq R D I ( % r s p ) , % r d i
movq R S I ( % r s p ) , % r s i
movq R D X ( % r s p ) , % r d x
movq R C X ( % r s p ) , % r c x
movq R A X ( % r s p ) , % r a x
addq $ ( S S + 8 - \ s k i p ) , % r s p
.endm
2014-05-08 15:21:52 -04:00
/* skip is set if stack has been adjusted */
ftrace/x86: Add dynamic allocated trampoline for ftrace_ops
The current method of handling multiple function callbacks is to register
a list function callback that calls all the other callbacks based on
their hash tables and compare it to the function that the callback was
called on. But this is very inefficient.
For example, if you are tracing all functions in the kernel and then
add a kprobe to a function such that the kprobe uses ftrace, the
mcount trampoline will switch from calling the function trace callback
to calling the list callback that will iterate over all registered
ftrace_ops (in this case, the function tracer and the kprobes callback).
That means for every function being traced it checks the hash of the
ftrace_ops for function tracing and kprobes, even though the kprobes
is only set at a single function. The kprobes ftrace_ops is checked
for every function being traced!
Instead of calling the list function for functions that are only being
traced by a single callback, we can call a dynamically allocated
trampoline that calls the callback directly. The function graph tracer
already uses a direct call trampoline when it is being traced by itself
but it is not dynamically allocated. It's trampoline is static in the
kernel core. The infrastructure that called the function graph trampoline
can also be used to call a dynamically allocated one.
For now, only ftrace_ops that are not dynamically allocated can have
a trampoline. That is, users such as function tracer or stack tracer.
kprobes and perf allocate their ftrace_ops, and until there's a safe
way to free the trampoline, it can not be used. The dynamically allocated
ftrace_ops may, although, use the trampoline if the kernel is not
compiled with CONFIG_PREEMPT. But that will come later.
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-07-02 23:23:31 -04:00
.macro ftrace_caller_setup trace_ l a b e l s k i p =0
2014-11-24 11:43:39 -05:00
save_ m c o u n t _ r e g s \ s k i p
2014-05-08 15:21:52 -04:00
ftrace/x86: Add dynamic allocated trampoline for ftrace_ops
The current method of handling multiple function callbacks is to register
a list function callback that calls all the other callbacks based on
their hash tables and compare it to the function that the callback was
called on. But this is very inefficient.
For example, if you are tracing all functions in the kernel and then
add a kprobe to a function such that the kprobe uses ftrace, the
mcount trampoline will switch from calling the function trace callback
to calling the list callback that will iterate over all registered
ftrace_ops (in this case, the function tracer and the kprobes callback).
That means for every function being traced it checks the hash of the
ftrace_ops for function tracing and kprobes, even though the kprobes
is only set at a single function. The kprobes ftrace_ops is checked
for every function being traced!
Instead of calling the list function for functions that are only being
traced by a single callback, we can call a dynamically allocated
trampoline that calls the callback directly. The function graph tracer
already uses a direct call trampoline when it is being traced by itself
but it is not dynamically allocated. It's trampoline is static in the
kernel core. The infrastructure that called the function graph trampoline
can also be used to call a dynamically allocated one.
For now, only ftrace_ops that are not dynamically allocated can have
a trampoline. That is, users such as function tracer or stack tracer.
kprobes and perf allocate their ftrace_ops, and until there's a safe
way to free the trampoline, it can not be used. The dynamically allocated
ftrace_ops may, although, use the trampoline if the kernel is not
compiled with CONFIG_PREEMPT. But that will come later.
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-07-02 23:23:31 -04:00
/* Save this location */
GLOBAL( \ t r a c e _ l a b e l )
2014-05-08 15:21:52 -04:00
/* Load the ftrace_ops into the 3rd parameter */
movq f u n c t i o n _ t r a c e _ o p ( % r i p ) , % r d x
2014-11-24 13:21:09 -05:00
/* %rdi already has %rip from the save_mcount_regs macro */
2014-05-08 15:21:52 -04:00
subq $ M C O U N T _ I N S N _ S I Z E , % r d i
/* Load the parent_ip into the second parameter */
# ifdef C C _ U S I N G _ F E N T R Y
movq S S + 1 6 ( % r s p ) , % r s i
# else
movq 8 ( % r b p ) , % r s i
# endif
.endm
2014-11-24 14:54:27 -05:00
# ifdef C O N F I G _ D Y N A M I C _ F T R A C E
ENTRY( f u n c t i o n _ h o o k )
retq
END( f u n c t i o n _ h o o k )
2014-11-18 19:13:25 -05:00
# ifdef C O N F I G _ F R A M E _ P O I N T E R
/ *
* Stack t r a c e s w i l l s t o p a t t h e f t r a c e t r a m p o l i n e i f t h e f r a m e p o i n t e r
* is n o t s e t u p p r o p e r l y . I f f e n t r y i s u s e d , w e n e e d t o s a v e a f r a m e
* pointer f o r t h e p a r e n t a s w e l l a s t h e f u n c t i o n t r a c e d , b e c a u s e t h e
* fentry i s c a l l e d b e f o r e t h e s t a c k f r a m e i s s e t u p , w h e r e a s m c o u n t
* is c a l l e d a f t e r w a r d .
* /
.macro create_frame parent r i p
# ifdef C C _ U S I N G _ F E N T R Y
pushq \ p a r e n t
pushq % r b p
movq % r s p , % r b p
# endif
pushq \ r i p
pushq % r b p
movq % r s p , % r b p
.endm
.macro restore_frame
# ifdef C C _ U S I N G _ F E N T R Y
addq $ 1 6 , % r s p
# endif
popq % r b p
addq $ 8 , % r s p
.endm
# else
.macro create_frame parent r i p
.endm
.macro restore_frame
.endm
# endif / * C O N F I G _ F R A M E _ P O I N T E R * /
2014-05-08 15:21:52 -04:00
ENTRY( f t r a c e _ c a l l e r )
ftrace/x86: Add dynamic allocated trampoline for ftrace_ops
The current method of handling multiple function callbacks is to register
a list function callback that calls all the other callbacks based on
their hash tables and compare it to the function that the callback was
called on. But this is very inefficient.
For example, if you are tracing all functions in the kernel and then
add a kprobe to a function such that the kprobe uses ftrace, the
mcount trampoline will switch from calling the function trace callback
to calling the list callback that will iterate over all registered
ftrace_ops (in this case, the function tracer and the kprobes callback).
That means for every function being traced it checks the hash of the
ftrace_ops for function tracing and kprobes, even though the kprobes
is only set at a single function. The kprobes ftrace_ops is checked
for every function being traced!
Instead of calling the list function for functions that are only being
traced by a single callback, we can call a dynamically allocated
trampoline that calls the callback directly. The function graph tracer
already uses a direct call trampoline when it is being traced by itself
but it is not dynamically allocated. It's trampoline is static in the
kernel core. The infrastructure that called the function graph trampoline
can also be used to call a dynamically allocated one.
For now, only ftrace_ops that are not dynamically allocated can have
a trampoline. That is, users such as function tracer or stack tracer.
kprobes and perf allocate their ftrace_ops, and until there's a safe
way to free the trampoline, it can not be used. The dynamically allocated
ftrace_ops may, although, use the trampoline if the kernel is not
compiled with CONFIG_PREEMPT. But that will come later.
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-07-02 23:23:31 -04:00
ftrace_ c a l l e r _ s e t u p f t r a c e _ c a l l e r _ o p _ p t r
2014-05-08 15:21:52 -04:00
/* regs go into 4th parameter (but make it NULL) */
movq $ 0 , % r c x
2014-11-18 19:13:25 -05:00
create_ f r a m e % r s i , % r d i
2014-05-08 15:21:52 -04:00
GLOBAL( f t r a c e _ c a l l )
call f t r a c e _ s t u b
2014-11-18 19:13:25 -05:00
restore_ f r a m e
2014-11-24 11:43:39 -05:00
restore_ m c o u n t _ r e g s
ftrace/x86: Add dynamic allocated trampoline for ftrace_ops
The current method of handling multiple function callbacks is to register
a list function callback that calls all the other callbacks based on
their hash tables and compare it to the function that the callback was
called on. But this is very inefficient.
For example, if you are tracing all functions in the kernel and then
add a kprobe to a function such that the kprobe uses ftrace, the
mcount trampoline will switch from calling the function trace callback
to calling the list callback that will iterate over all registered
ftrace_ops (in this case, the function tracer and the kprobes callback).
That means for every function being traced it checks the hash of the
ftrace_ops for function tracing and kprobes, even though the kprobes
is only set at a single function. The kprobes ftrace_ops is checked
for every function being traced!
Instead of calling the list function for functions that are only being
traced by a single callback, we can call a dynamically allocated
trampoline that calls the callback directly. The function graph tracer
already uses a direct call trampoline when it is being traced by itself
but it is not dynamically allocated. It's trampoline is static in the
kernel core. The infrastructure that called the function graph trampoline
can also be used to call a dynamically allocated one.
For now, only ftrace_ops that are not dynamically allocated can have
a trampoline. That is, users such as function tracer or stack tracer.
kprobes and perf allocate their ftrace_ops, and until there's a safe
way to free the trampoline, it can not be used. The dynamically allocated
ftrace_ops may, although, use the trampoline if the kernel is not
compiled with CONFIG_PREEMPT. But that will come later.
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-07-02 23:23:31 -04:00
/ *
* The c o p i e d t r a m p o l i n e m u s t c a l l f t r a c e _ r e t u r n a s i t
* still m a y n e e d t o c a l l t h e f u n c t i o n g r a p h t r a c e r .
* /
GLOBAL( f t r a c e _ c a l l e r _ e n d )
GLOBAL( f t r a c e _ r e t u r n )
2014-05-08 15:21:52 -04:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
GLOBAL( f t r a c e _ g r a p h _ c a l l )
jmp f t r a c e _ s t u b
# endif
GLOBAL( f t r a c e _ s t u b )
retq
END( f t r a c e _ c a l l e r )
ENTRY( f t r a c e _ r e g s _ c a l l e r )
/* Save the current flags before compare (in SS location)*/
pushfq
/* skip=8 to skip flags saved in SS */
ftrace/x86: Add dynamic allocated trampoline for ftrace_ops
The current method of handling multiple function callbacks is to register
a list function callback that calls all the other callbacks based on
their hash tables and compare it to the function that the callback was
called on. But this is very inefficient.
For example, if you are tracing all functions in the kernel and then
add a kprobe to a function such that the kprobe uses ftrace, the
mcount trampoline will switch from calling the function trace callback
to calling the list callback that will iterate over all registered
ftrace_ops (in this case, the function tracer and the kprobes callback).
That means for every function being traced it checks the hash of the
ftrace_ops for function tracing and kprobes, even though the kprobes
is only set at a single function. The kprobes ftrace_ops is checked
for every function being traced!
Instead of calling the list function for functions that are only being
traced by a single callback, we can call a dynamically allocated
trampoline that calls the callback directly. The function graph tracer
already uses a direct call trampoline when it is being traced by itself
but it is not dynamically allocated. It's trampoline is static in the
kernel core. The infrastructure that called the function graph trampoline
can also be used to call a dynamically allocated one.
For now, only ftrace_ops that are not dynamically allocated can have
a trampoline. That is, users such as function tracer or stack tracer.
kprobes and perf allocate their ftrace_ops, and until there's a safe
way to free the trampoline, it can not be used. The dynamically allocated
ftrace_ops may, although, use the trampoline if the kernel is not
compiled with CONFIG_PREEMPT. But that will come later.
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-07-02 23:23:31 -04:00
ftrace_ c a l l e r _ s e t u p f t r a c e _ r e g s _ c a l l e r _ o p _ p t r 8
2014-05-08 15:21:52 -04:00
/* Save the rest of pt_regs */
movq % r15 , R 1 5 ( % r s p )
movq % r14 , R 1 4 ( % r s p )
movq % r13 , R 1 3 ( % r s p )
movq % r12 , R 1 2 ( % r s p )
movq % r11 , R 1 1 ( % r s p )
movq % r10 , R 1 0 ( % r s p )
movq % r b p , R B P ( % r s p )
movq % r b x , R B X ( % r s p )
/* Copy saved flags */
movq S S ( % r s p ) , % r c x
movq % r c x , E F L A G S ( % r s p )
/* Kernel segments */
movq $ _ _ K E R N E L _ D S , % r c x
movq % r c x , S S ( % r s p )
movq $ _ _ K E R N E L _ C S , % r c x
movq % r c x , C S ( % r s p )
/* Stack - skipping return address */
leaq S S + 1 6 ( % r s p ) , % r c x
movq % r c x , R S P ( % r s p )
/* regs go into 4th parameter */
leaq ( % r s p ) , % r c x
2014-11-18 19:13:25 -05:00
create_ f r a m e % r s i , % r d i
2014-05-08 15:21:52 -04:00
GLOBAL( f t r a c e _ r e g s _ c a l l )
call f t r a c e _ s t u b
2014-11-18 19:13:25 -05:00
restore_ f r a m e
2014-05-08 15:21:52 -04:00
/* Copy flags back to SS, to restore them */
movq E F L A G S ( % r s p ) , % r a x
movq % r a x , S S ( % r s p )
/* Handlers can change the RIP */
movq R I P ( % r s p ) , % r a x
movq % r a x , S S + 8 ( % r s p )
/* restore the rest of pt_regs */
movq R 1 5 ( % r s p ) , % r15
movq R 1 4 ( % r s p ) , % r14
movq R 1 3 ( % r s p ) , % r13
movq R 1 2 ( % r s p ) , % r12
movq R 1 0 ( % r s p ) , % r10
movq R B P ( % r s p ) , % r b p
movq R B X ( % r s p ) , % r b x
/* skip=8 to skip flags saved in SS */
2014-11-24 11:43:39 -05:00
restore_ m c o u n t _ r e g s 8
2014-05-08 15:21:52 -04:00
/* Restore flags */
popfq
ftrace/x86: Add dynamic allocated trampoline for ftrace_ops
The current method of handling multiple function callbacks is to register
a list function callback that calls all the other callbacks based on
their hash tables and compare it to the function that the callback was
called on. But this is very inefficient.
For example, if you are tracing all functions in the kernel and then
add a kprobe to a function such that the kprobe uses ftrace, the
mcount trampoline will switch from calling the function trace callback
to calling the list callback that will iterate over all registered
ftrace_ops (in this case, the function tracer and the kprobes callback).
That means for every function being traced it checks the hash of the
ftrace_ops for function tracing and kprobes, even though the kprobes
is only set at a single function. The kprobes ftrace_ops is checked
for every function being traced!
Instead of calling the list function for functions that are only being
traced by a single callback, we can call a dynamically allocated
trampoline that calls the callback directly. The function graph tracer
already uses a direct call trampoline when it is being traced by itself
but it is not dynamically allocated. It's trampoline is static in the
kernel core. The infrastructure that called the function graph trampoline
can also be used to call a dynamically allocated one.
For now, only ftrace_ops that are not dynamically allocated can have
a trampoline. That is, users such as function tracer or stack tracer.
kprobes and perf allocate their ftrace_ops, and until there's a safe
way to free the trampoline, it can not be used. The dynamically allocated
ftrace_ops may, although, use the trampoline if the kernel is not
compiled with CONFIG_PREEMPT. But that will come later.
Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-07-02 23:23:31 -04:00
/ *
* As t h i s j m p t o f t r a c e _ r e t u r n c a n b e a s h o r t j u m p
* it m u s t n o t b e c o p i e d i n t o t h e t r a m p o l i n e .
* The t r a m p o l i n e w i l l a d d t h e c o d e t o j u m p
* to t h e r e t u r n .
* /
GLOBAL( f t r a c e _ r e g s _ c a l l e r _ e n d )
2014-05-08 15:21:52 -04:00
jmp f t r a c e _ r e t u r n
2014-06-25 11:59:45 -04:00
2014-05-08 15:21:52 -04:00
popfq
jmp f t r a c e _ s t u b
END( f t r a c e _ r e g s _ c a l l e r )
# else / * ! C O N F I G _ D Y N A M I C _ F T R A C E * /
ENTRY( f u n c t i o n _ h o o k )
cmpq $ f t r a c e _ s t u b , f t r a c e _ t r a c e _ f u n c t i o n
jnz t r a c e
2014-11-24 14:58:17 -05:00
fgraph_trace :
2014-05-08 15:21:52 -04:00
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
cmpq $ f t r a c e _ s t u b , f t r a c e _ g r a p h _ r e t u r n
jnz f t r a c e _ g r a p h _ c a l l e r
cmpq $ f t r a c e _ g r a p h _ e n t r y _ s t u b , f t r a c e _ g r a p h _ e n t r y
jnz f t r a c e _ g r a p h _ c a l l e r
# endif
GLOBAL( f t r a c e _ s t u b )
retq
trace :
2014-11-24 14:54:27 -05:00
ftrace_ c a l l e r _ s e t u p f t r a c e _ c a l l e r _ o p _ p t r
2014-05-08 15:21:52 -04:00
call * f t r a c e _ t r a c e _ f u n c t i o n
2014-11-24 11:43:39 -05:00
restore_ m c o u n t _ r e g s
2014-05-08 15:21:52 -04:00
2014-11-24 14:58:17 -05:00
jmp f g r a p h _ t r a c e
2014-05-08 15:21:52 -04:00
END( f u n c t i o n _ h o o k )
# endif / * C O N F I G _ D Y N A M I C _ F T R A C E * /
# endif / * C O N F I G _ F U N C T I O N _ T R A C E R * /
# ifdef C O N F I G _ F U N C T I O N _ G R A P H _ T R A C E R
ENTRY( f t r a c e _ g r a p h _ c a l l e r )
2014-11-24 11:43:39 -05:00
save_ m c o u n t _ r e g s
2014-05-08 15:21:52 -04:00
# ifdef C C _ U S I N G _ F E N T R Y
leaq S S + 1 6 ( % r s p ) , % r d i
movq $ 0 , % r d x / * N o f r a m e p o i n t e r s n e e d e d * /
# else
leaq 8 ( % r b p ) , % r d i
movq ( % r b p ) , % r d x
# endif
movq R I P ( % r s p ) , % r s i
subq $ M C O U N T _ I N S N _ S I Z E , % r s i
call p r e p a r e _ f t r a c e _ r e t u r n
2014-11-24 11:43:39 -05:00
restore_ m c o u n t _ r e g s
2014-05-08 15:21:52 -04:00
retq
END( f t r a c e _ g r a p h _ c a l l e r )
GLOBAL( r e t u r n _ t o _ h a n d l e r )
subq $ 2 4 , % r s p
/* Save the return values */
movq % r a x , ( % r s p )
movq % r d x , 8 ( % r s p )
movq % r b p , % r d i
call f t r a c e _ r e t u r n _ t o _ h a n d l e r
movq % r a x , % r d i
movq 8 ( % r s p ) , % r d x
movq ( % r s p ) , % r a x
addq $ 2 4 , % r s p
jmp * % r d i
# endif