2008-12-30 00:42:23 +03:00
/*
* Memory allocator tracing
*
* Copyright ( C ) 2008 Eduard - Gabriel Munteanu
* Copyright ( C ) 2008 Pekka Enberg < penberg @ cs . helsinki . fi >
* Copyright ( C ) 2008 Frederic Weisbecker < fweisbec @ gmail . com >
*/
2009-03-23 18:14:13 +03:00
# include <linux/tracepoint.h>
# include <linux/seq_file.h>
2008-12-30 00:42:23 +03:00
# include <linux/debugfs.h>
2009-03-23 18:14:13 +03:00
# include <linux/dcache.h>
2008-12-30 00:42:23 +03:00
# include <linux/fs.h>
2009-03-23 18:14:13 +03:00
2009-04-10 10:26:18 +04:00
# include <linux/kmemtrace.h>
2008-12-30 00:42:23 +03:00
# include "trace_output.h"
2009-03-23 18:14:13 +03:00
# include "trace.h"
2008-12-30 00:42:23 +03:00
/* Select an alternative, minimalistic output than the original one */
# define TRACE_KMEM_OPT_MINIMAL 0x1
static struct tracer_opt kmem_opts [ ] = {
/* Default disable the minimalistic output */
{ TRACER_OPT ( kmem_minimalistic , TRACE_KMEM_OPT_MINIMAL ) } ,
{ }
} ;
static struct tracer_flags kmem_tracer_flags = {
2009-03-23 18:14:13 +03:00
. val = 0 ,
. opts = kmem_opts
2008-12-30 00:42:23 +03:00
} ;
static struct trace_array * kmemtrace_array ;
2009-03-23 16:12:24 +03:00
/* Trace allocations */
static inline void kmemtrace_alloc ( enum kmemtrace_type_id type_id ,
unsigned long call_site ,
const void * ptr ,
size_t bytes_req ,
size_t bytes_alloc ,
gfp_t gfp_flags ,
int node )
{
2009-03-31 09:48:49 +04:00
struct ftrace_event_call * call = & event_kmem_alloc ;
2009-03-23 16:12:24 +03:00
struct trace_array * tr = kmemtrace_array ;
2009-03-23 18:14:13 +03:00
struct kmemtrace_alloc_entry * entry ;
struct ring_buffer_event * event ;
2009-03-23 16:12:24 +03:00
event = ring_buffer_lock_reserve ( tr - > buffer , sizeof ( * entry ) ) ;
if ( ! event )
return ;
2009-03-23 18:14:13 +03:00
entry = ring_buffer_event_data ( event ) ;
2009-03-23 16:12:24 +03:00
tracing_generic_entry_update ( & entry - > ent , 0 , 0 ) ;
2009-03-23 18:14:13 +03:00
entry - > ent . type = TRACE_KMEM_ALLOC ;
entry - > type_id = type_id ;
entry - > call_site = call_site ;
entry - > ptr = ptr ;
entry - > bytes_req = bytes_req ;
entry - > bytes_alloc = bytes_alloc ;
entry - > gfp_flags = gfp_flags ;
entry - > node = node ;
2009-03-23 16:12:24 +03:00
2009-04-08 12:15:54 +04:00
if ( ! filter_check_discard ( call , entry , tr - > buffer , event ) )
ring_buffer_unlock_commit ( tr - > buffer , event ) ;
2009-03-23 16:12:24 +03:00
trace_wake_up ( ) ;
}
static inline void kmemtrace_free ( enum kmemtrace_type_id type_id ,
unsigned long call_site ,
const void * ptr )
{
2009-03-31 09:48:49 +04:00
struct ftrace_event_call * call = & event_kmem_free ;
2009-03-23 16:12:24 +03:00
struct trace_array * tr = kmemtrace_array ;
2009-03-23 18:14:13 +03:00
struct kmemtrace_free_entry * entry ;
struct ring_buffer_event * event ;
2009-03-23 16:12:24 +03:00
event = ring_buffer_lock_reserve ( tr - > buffer , sizeof ( * entry ) ) ;
if ( ! event )
return ;
entry = ring_buffer_event_data ( event ) ;
tracing_generic_entry_update ( & entry - > ent , 0 , 0 ) ;
2009-03-23 18:14:13 +03:00
entry - > ent . type = TRACE_KMEM_FREE ;
entry - > type_id = type_id ;
entry - > call_site = call_site ;
entry - > ptr = ptr ;
2009-03-23 16:12:24 +03:00
2009-04-08 12:15:54 +04:00
if ( ! filter_check_discard ( call , entry , tr - > buffer , event ) )
ring_buffer_unlock_commit ( tr - > buffer , event ) ;
2009-03-23 16:12:24 +03:00
trace_wake_up ( ) ;
}
static void kmemtrace_kmalloc ( unsigned long call_site ,
const void * ptr ,
size_t bytes_req ,
size_t bytes_alloc ,
gfp_t gfp_flags )
{
kmemtrace_alloc ( KMEMTRACE_TYPE_KMALLOC , call_site , ptr ,
bytes_req , bytes_alloc , gfp_flags , - 1 ) ;
}
static void kmemtrace_kmem_cache_alloc ( unsigned long call_site ,
const void * ptr ,
size_t bytes_req ,
size_t bytes_alloc ,
gfp_t gfp_flags )
{
kmemtrace_alloc ( KMEMTRACE_TYPE_CACHE , call_site , ptr ,
bytes_req , bytes_alloc , gfp_flags , - 1 ) ;
}
static void kmemtrace_kmalloc_node ( unsigned long call_site ,
const void * ptr ,
size_t bytes_req ,
size_t bytes_alloc ,
gfp_t gfp_flags ,
int node )
{
kmemtrace_alloc ( KMEMTRACE_TYPE_KMALLOC , call_site , ptr ,
bytes_req , bytes_alloc , gfp_flags , node ) ;
}
static void kmemtrace_kmem_cache_alloc_node ( unsigned long call_site ,
const void * ptr ,
size_t bytes_req ,
size_t bytes_alloc ,
gfp_t gfp_flags ,
int node )
{
kmemtrace_alloc ( KMEMTRACE_TYPE_CACHE , call_site , ptr ,
bytes_req , bytes_alloc , gfp_flags , node ) ;
}
static void kmemtrace_kfree ( unsigned long call_site , const void * ptr )
{
kmemtrace_free ( KMEMTRACE_TYPE_KMALLOC , call_site , ptr ) ;
}
static void kmemtrace_kmem_cache_free ( unsigned long call_site , const void * ptr )
{
kmemtrace_free ( KMEMTRACE_TYPE_CACHE , call_site , ptr ) ;
}
static int kmemtrace_start_probes ( void )
{
int err ;
err = register_trace_kmalloc ( kmemtrace_kmalloc ) ;
if ( err )
return err ;
err = register_trace_kmem_cache_alloc ( kmemtrace_kmem_cache_alloc ) ;
if ( err )
return err ;
err = register_trace_kmalloc_node ( kmemtrace_kmalloc_node ) ;
if ( err )
return err ;
err = register_trace_kmem_cache_alloc_node ( kmemtrace_kmem_cache_alloc_node ) ;
if ( err )
return err ;
err = register_trace_kfree ( kmemtrace_kfree ) ;
if ( err )
return err ;
err = register_trace_kmem_cache_free ( kmemtrace_kmem_cache_free ) ;
return err ;
}
static void kmemtrace_stop_probes ( void )
{
unregister_trace_kmalloc ( kmemtrace_kmalloc ) ;
unregister_trace_kmem_cache_alloc ( kmemtrace_kmem_cache_alloc ) ;
unregister_trace_kmalloc_node ( kmemtrace_kmalloc_node ) ;
unregister_trace_kmem_cache_alloc_node ( kmemtrace_kmem_cache_alloc_node ) ;
unregister_trace_kfree ( kmemtrace_kfree ) ;
unregister_trace_kmem_cache_free ( kmemtrace_kmem_cache_free ) ;
}
2008-12-30 00:42:23 +03:00
static int kmem_trace_init ( struct trace_array * tr )
{
kmemtrace_array = tr ;
2009-09-04 20:12:39 +04:00
tracing_reset_online_cpus ( tr ) ;
2008-12-30 00:42:23 +03:00
2009-03-23 16:12:24 +03:00
kmemtrace_start_probes ( ) ;
2008-12-30 00:42:23 +03:00
return 0 ;
}
static void kmem_trace_reset ( struct trace_array * tr )
{
2009-03-23 16:12:24 +03:00
kmemtrace_stop_probes ( ) ;
2008-12-30 00:42:23 +03:00
}
static void kmemtrace_headers ( struct seq_file * s )
{
/* Don't need headers for the original kmemtrace output */
if ( ! ( kmem_tracer_flags . val & TRACE_KMEM_OPT_MINIMAL ) )
return ;
seq_printf ( s , " # \n " ) ;
seq_printf ( s , " # ALLOC TYPE REQ GIVEN FLAGS "
" POINTER NODE CALLER \n " ) ;
seq_printf ( s , " # FREE | | | | "
" | | | | \n " ) ;
seq_printf ( s , " # | \n \n " ) ;
}
/*
2009-03-23 16:12:26 +03:00
* The following functions give the original output from kmemtrace ,
* plus the origin CPU , since reordering occurs in - kernel now .
2008-12-30 00:42:23 +03:00
*/
2009-03-23 16:12:26 +03:00
# define KMEMTRACE_USER_ALLOC 0
# define KMEMTRACE_USER_FREE 1
struct kmemtrace_user_event {
2009-03-23 18:14:13 +03:00
u8 event_id ;
u8 type_id ;
u16 event_size ;
u32 cpu ;
u64 timestamp ;
unsigned long call_site ;
unsigned long ptr ;
2009-03-23 16:12:26 +03:00
} ;
struct kmemtrace_user_event_alloc {
2009-03-23 18:14:13 +03:00
size_t bytes_req ;
size_t bytes_alloc ;
unsigned gfp_flags ;
int node ;
2009-03-23 16:12:26 +03:00
} ;
2008-12-30 00:42:23 +03:00
static enum print_line_t
2009-07-06 12:15:04 +04:00
kmemtrace_print_alloc ( struct trace_iterator * iter , int flags )
2009-07-03 13:34:24 +04:00
{
struct trace_seq * s = & iter - > seq ;
struct kmemtrace_alloc_entry * entry ;
int ret ;
trace_assign_type ( entry , iter - > ent ) ;
ret = trace_seq_printf ( s , " type_id %d call_site %pF ptr %lu "
" bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d \n " ,
entry - > type_id , ( void * ) entry - > call_site , ( unsigned long ) entry - > ptr ,
( unsigned long ) entry - > bytes_req , ( unsigned long ) entry - > bytes_alloc ,
( unsigned long ) entry - > gfp_flags , entry - > node ) ;
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
return TRACE_TYPE_HANDLED ;
}
static enum print_line_t
2009-07-06 12:15:04 +04:00
kmemtrace_print_free ( struct trace_iterator * iter , int flags )
2008-12-30 00:42:23 +03:00
{
struct trace_seq * s = & iter - > seq ;
2009-07-03 13:34:24 +04:00
struct kmemtrace_free_entry * entry ;
int ret ;
trace_assign_type ( entry , iter - > ent ) ;
ret = trace_seq_printf ( s , " type_id %d call_site %pF ptr %lu \n " ,
entry - > type_id , ( void * ) entry - > call_site ,
( unsigned long ) entry - > ptr ) ;
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
return TRACE_TYPE_HANDLED ;
}
static enum print_line_t
2009-07-06 12:15:04 +04:00
kmemtrace_print_alloc_user ( struct trace_iterator * iter , int flags )
2009-07-03 13:34:24 +04:00
{
struct trace_seq * s = & iter - > seq ;
struct kmemtrace_alloc_entry * entry ;
2009-03-23 16:12:26 +03:00
struct kmemtrace_user_event * ev ;
2009-07-03 13:34:24 +04:00
struct kmemtrace_user_event_alloc * ev_alloc ;
trace_assign_type ( entry , iter - > ent ) ;
2008-12-30 00:42:23 +03:00
2009-03-23 16:12:26 +03:00
ev = trace_seq_reserve ( s , sizeof ( * ev ) ) ;
if ( ! ev )
return TRACE_TYPE_PARTIAL_LINE ;
2009-03-23 18:14:13 +03:00
ev - > event_id = KMEMTRACE_USER_ALLOC ;
ev - > type_id = entry - > type_id ;
ev - > event_size = sizeof ( * ev ) + sizeof ( * ev_alloc ) ;
ev - > cpu = iter - > cpu ;
ev - > timestamp = iter - > ts ;
ev - > call_site = entry - > call_site ;
ev - > ptr = ( unsigned long ) entry - > ptr ;
2009-03-23 16:12:26 +03:00
ev_alloc = trace_seq_reserve ( s , sizeof ( * ev_alloc ) ) ;
if ( ! ev_alloc )
2008-12-30 00:42:23 +03:00
return TRACE_TYPE_PARTIAL_LINE ;
2009-03-23 18:14:13 +03:00
ev_alloc - > bytes_req = entry - > bytes_req ;
ev_alloc - > bytes_alloc = entry - > bytes_alloc ;
ev_alloc - > gfp_flags = entry - > gfp_flags ;
ev_alloc - > node = entry - > node ;
2008-12-30 00:42:23 +03:00
return TRACE_TYPE_HANDLED ;
}
static enum print_line_t
2009-07-06 12:15:04 +04:00
kmemtrace_print_free_user ( struct trace_iterator * iter , int flags )
2008-12-30 00:42:23 +03:00
{
struct trace_seq * s = & iter - > seq ;
2009-07-03 13:34:24 +04:00
struct kmemtrace_free_entry * entry ;
2009-03-23 16:12:26 +03:00
struct kmemtrace_user_event * ev ;
2008-12-30 00:42:23 +03:00
2009-07-03 13:34:24 +04:00
trace_assign_type ( entry , iter - > ent ) ;
2009-03-23 16:12:26 +03:00
ev = trace_seq_reserve ( s , sizeof ( * ev ) ) ;
if ( ! ev )
2008-12-30 00:42:23 +03:00
return TRACE_TYPE_PARTIAL_LINE ;
2009-03-23 18:14:13 +03:00
ev - > event_id = KMEMTRACE_USER_FREE ;
ev - > type_id = entry - > type_id ;
ev - > event_size = sizeof ( * ev ) ;
ev - > cpu = iter - > cpu ;
ev - > timestamp = iter - > ts ;
ev - > call_site = entry - > call_site ;
ev - > ptr = ( unsigned long ) entry - > ptr ;
2008-12-30 00:42:23 +03:00
return TRACE_TYPE_HANDLED ;
}
/* The two other following provide a more minimalistic output */
static enum print_line_t
2009-07-03 13:34:24 +04:00
kmemtrace_print_alloc_compress ( struct trace_iterator * iter )
2008-12-30 00:42:23 +03:00
{
2009-07-03 13:34:24 +04:00
struct kmemtrace_alloc_entry * entry ;
2008-12-30 00:42:23 +03:00
struct trace_seq * s = & iter - > seq ;
int ret ;
2009-07-03 13:34:24 +04:00
trace_assign_type ( entry , iter - > ent ) ;
2008-12-30 00:42:23 +03:00
/* Alloc entry */
ret = trace_seq_printf ( s , " + " ) ;
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
/* Type */
switch ( entry - > type_id ) {
case KMEMTRACE_TYPE_KMALLOC :
ret = trace_seq_printf ( s , " K " ) ;
break ;
case KMEMTRACE_TYPE_CACHE :
ret = trace_seq_printf ( s , " C " ) ;
break ;
case KMEMTRACE_TYPE_PAGES :
ret = trace_seq_printf ( s , " P " ) ;
break ;
default :
ret = trace_seq_printf ( s , " ? " ) ;
}
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
/* Requested */
2009-01-30 00:49:45 +03:00
ret = trace_seq_printf ( s , " %4zu " , entry - > bytes_req ) ;
2008-12-30 00:42:23 +03:00
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
/* Allocated */
2009-01-30 00:49:45 +03:00
ret = trace_seq_printf ( s , " %4zu " , entry - > bytes_alloc ) ;
2008-12-30 00:42:23 +03:00
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
/* Flags
* TODO : would be better to see the name of the GFP flag names
*/
ret = trace_seq_printf ( s , " %08x " , entry - > gfp_flags ) ;
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
/* Pointer to allocated */
ret = trace_seq_printf ( s , " 0x%tx " , ( ptrdiff_t ) entry - > ptr ) ;
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
2009-07-09 06:46:30 +04:00
/* Node and call site*/
ret = trace_seq_printf ( s , " %4d %pf \n " , entry - > node ,
( void * ) entry - > call_site ) ;
2008-12-30 00:42:23 +03:00
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
return TRACE_TYPE_HANDLED ;
}
static enum print_line_t
2009-07-03 13:34:24 +04:00
kmemtrace_print_free_compress ( struct trace_iterator * iter )
2008-12-30 00:42:23 +03:00
{
2009-07-03 13:34:24 +04:00
struct kmemtrace_free_entry * entry ;
2008-12-30 00:42:23 +03:00
struct trace_seq * s = & iter - > seq ;
int ret ;
2009-07-03 13:34:24 +04:00
trace_assign_type ( entry , iter - > ent ) ;
2008-12-30 00:42:23 +03:00
/* Free entry */
ret = trace_seq_printf ( s , " - " ) ;
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
/* Type */
switch ( entry - > type_id ) {
case KMEMTRACE_TYPE_KMALLOC :
ret = trace_seq_printf ( s , " K " ) ;
break ;
case KMEMTRACE_TYPE_CACHE :
ret = trace_seq_printf ( s , " C " ) ;
break ;
case KMEMTRACE_TYPE_PAGES :
ret = trace_seq_printf ( s , " P " ) ;
break ;
default :
ret = trace_seq_printf ( s , " ? " ) ;
}
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
/* Skip requested/allocated/flags */
ret = trace_seq_printf ( s , " " ) ;
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
/* Pointer to allocated */
ret = trace_seq_printf ( s , " 0x%tx " , ( ptrdiff_t ) entry - > ptr ) ;
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
2009-07-09 06:46:30 +04:00
/* Skip node and print call site*/
ret = trace_seq_printf ( s , " %pf \n " , ( void * ) entry - > call_site ) ;
2008-12-30 00:42:23 +03:00
if ( ! ret )
return TRACE_TYPE_PARTIAL_LINE ;
return TRACE_TYPE_HANDLED ;
}
static enum print_line_t kmemtrace_print_line ( struct trace_iterator * iter )
{
struct trace_entry * entry = iter - > ent ;
2009-07-03 13:34:24 +04:00
if ( ! ( kmem_tracer_flags . val & TRACE_KMEM_OPT_MINIMAL ) )
return TRACE_TYPE_UNHANDLED ;
2008-12-30 00:42:23 +03:00
2009-07-03 13:34:24 +04:00
switch ( entry - > type ) {
case TRACE_KMEM_ALLOC :
return kmemtrace_print_alloc_compress ( iter ) ;
case TRACE_KMEM_FREE :
return kmemtrace_print_free_compress ( iter ) ;
2008-12-30 00:42:23 +03:00
default :
return TRACE_TYPE_UNHANDLED ;
}
}
2009-07-03 13:34:24 +04:00
static struct trace_event kmem_trace_alloc = {
. type = TRACE_KMEM_ALLOC ,
2009-07-06 12:15:04 +04:00
. trace = kmemtrace_print_alloc ,
. binary = kmemtrace_print_alloc_user ,
2009-07-03 13:34:24 +04:00
} ;
static struct trace_event kmem_trace_free = {
. type = TRACE_KMEM_FREE ,
2009-07-06 12:15:04 +04:00
. trace = kmemtrace_print_free ,
. binary = kmemtrace_print_free_user ,
2009-07-03 13:34:24 +04:00
} ;
2008-12-30 00:42:23 +03:00
static struct tracer kmem_tracer __read_mostly = {
2009-03-23 18:14:13 +03:00
. name = " kmemtrace " ,
. init = kmem_trace_init ,
. reset = kmem_trace_reset ,
. print_line = kmemtrace_print_line ,
. print_header = kmemtrace_headers ,
. flags = & kmem_tracer_flags
2008-12-30 00:42:23 +03:00
} ;
2009-01-06 12:16:35 +03:00
void kmemtrace_init ( void )
{
/* earliest opportunity to start kmem tracing */
}
2008-12-30 00:42:23 +03:00
static int __init init_kmem_tracer ( void )
{
2009-07-03 13:34:24 +04:00
if ( ! register_ftrace_event ( & kmem_trace_alloc ) ) {
pr_warning ( " Warning: could not register kmem events \n " ) ;
return 1 ;
}
if ( ! register_ftrace_event ( & kmem_trace_free ) ) {
pr_warning ( " Warning: could not register kmem events \n " ) ;
return 1 ;
}
2009-09-28 11:55:40 +04:00
if ( register_tracer ( & kmem_tracer ) ! = 0 ) {
2009-07-03 13:34:24 +04:00
pr_warning ( " Warning: could not register the kmem tracer \n " ) ;
return 1 ;
}
return 0 ;
2008-12-30 00:42:23 +03:00
}
device_initcall ( init_kmem_tracer ) ;