2009-12-15 02:14:59 +03:00
/*
* Copyright ( c ) 2009 , Christoph Hellwig
* All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it would be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA
*/
# undef TRACE_SYSTEM
# define TRACE_SYSTEM xfs
# if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
# define _TRACE_XFS_H
# include <linux/tracepoint.h>
struct xfs_agf ;
struct xfs_alloc_arg ;
struct xfs_attr_list_context ;
struct xfs_buf_log_item ;
struct xfs_da_args ;
struct xfs_da_node_entry ;
struct xfs_dquot ;
2011-10-11 19:14:11 +04:00
struct xfs_log_item ;
2009-12-15 02:14:59 +03:00
struct xlog_ticket ;
2012-06-14 18:22:15 +04:00
struct xlog ;
2010-04-13 09:06:46 +04:00
struct xlog_recover ;
struct xlog_recover_item ;
struct xfs_buf_log_format ;
struct xfs_inode_log_format ;
2012-08-01 18:56:49 +04:00
struct xfs_bmbt_irec ;
2009-12-15 02:14:59 +03:00
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_attr_list_class ,
TP_PROTO ( struct xfs_attr_list_context * ctx ) ,
TP_ARGS ( ctx ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( u32 , hashval )
__field ( u32 , blkno )
__field ( u32 , offset )
__field ( void * , alist )
__field ( int , bufsize )
__field ( int , count )
__field ( int , firstu )
__field ( int , dupcnt )
__field ( int , flags )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ctx - > dp ) - > i_sb - > s_dev ;
__entry - > ino = ctx - > dp - > i_ino ;
__entry - > hashval = ctx - > cursor - > hashval ;
__entry - > blkno = ctx - > cursor - > blkno ;
__entry - > offset = ctx - > cursor - > offset ;
__entry - > alist = ctx - > alist ;
__entry - > bufsize = ctx - > bufsize ;
__entry - > count = ctx - > count ;
__entry - > firstu = ctx - > firstu ;
__entry - > flags = ctx - > flags ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
" alist 0x%p size %u count %u firstu %u flags %d %s " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > hashval ,
__entry - > blkno ,
__entry - > offset ,
__entry - > dupcnt ,
__entry - > alist ,
__entry - > bufsize ,
__entry - > count ,
__entry - > firstu ,
__entry - > flags ,
__print_flags ( __entry - > flags , " | " , XFS_ATTR_FLAGS )
)
)
2009-12-15 02:14:59 +03:00
# define DEFINE_ATTR_LIST_EVENT(name) \
2009-12-21 17:03:03 +03:00
DEFINE_EVENT ( xfs_attr_list_class , name , \
2009-12-15 02:14:59 +03:00
TP_PROTO ( struct xfs_attr_list_context * ctx ) , \
2009-12-21 17:03:03 +03:00
TP_ARGS ( ctx ) )
2009-12-15 02:14:59 +03:00
DEFINE_ATTR_LIST_EVENT ( xfs_attr_list_sf ) ;
DEFINE_ATTR_LIST_EVENT ( xfs_attr_list_sf_all ) ;
DEFINE_ATTR_LIST_EVENT ( xfs_attr_list_leaf ) ;
DEFINE_ATTR_LIST_EVENT ( xfs_attr_list_leaf_end ) ;
DEFINE_ATTR_LIST_EVENT ( xfs_attr_list_full ) ;
DEFINE_ATTR_LIST_EVENT ( xfs_attr_list_add ) ;
DEFINE_ATTR_LIST_EVENT ( xfs_attr_list_wrong_blk ) ;
DEFINE_ATTR_LIST_EVENT ( xfs_attr_list_notfound ) ;
2010-05-24 12:25:57 +04:00
DECLARE_EVENT_CLASS ( xfs_perag_class ,
TP_PROTO ( struct xfs_mount * mp , xfs_agnumber_t agno , int refcount ,
unsigned long caller_ip ) ,
TP_ARGS ( mp , agno , refcount , caller_ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_agnumber_t , agno )
__field ( int , refcount )
__field ( unsigned long , caller_ip )
) ,
TP_fast_assign (
__entry - > dev = mp - > m_super - > s_dev ;
__entry - > agno = agno ;
__entry - > refcount = refcount ;
__entry - > caller_ip = caller_ip ;
) ,
TP_printk ( " dev %d:%d agno %u refcount %d caller %pf " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > agno ,
__entry - > refcount ,
( char * ) __entry - > caller_ip )
) ;
# define DEFINE_PERAG_REF_EVENT(name) \
DEFINE_EVENT ( xfs_perag_class , name , \
TP_PROTO ( struct xfs_mount * mp , xfs_agnumber_t agno , int refcount , \
unsigned long caller_ip ) , \
TP_ARGS ( mp , agno , refcount , caller_ip ) )
DEFINE_PERAG_REF_EVENT ( xfs_perag_get ) ;
2010-09-24 12:40:15 +04:00
DEFINE_PERAG_REF_EVENT ( xfs_perag_get_tag ) ;
2010-05-24 12:25:57 +04:00
DEFINE_PERAG_REF_EVENT ( xfs_perag_put ) ;
2010-07-20 03:43:39 +04:00
DEFINE_PERAG_REF_EVENT ( xfs_perag_set_reclaim ) ;
DEFINE_PERAG_REF_EVENT ( xfs_perag_clear_reclaim ) ;
2010-05-24 12:25:57 +04:00
2009-12-15 02:14:59 +03:00
TRACE_EVENT ( xfs_attr_list_node_descend ,
TP_PROTO ( struct xfs_attr_list_context * ctx ,
struct xfs_da_node_entry * btree ) ,
TP_ARGS ( ctx , btree ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( u32 , hashval )
__field ( u32 , blkno )
__field ( u32 , offset )
__field ( void * , alist )
__field ( int , bufsize )
__field ( int , count )
__field ( int , firstu )
__field ( int , dupcnt )
__field ( int , flags )
__field ( u32 , bt_hashval )
__field ( u32 , bt_before )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ctx - > dp ) - > i_sb - > s_dev ;
__entry - > ino = ctx - > dp - > i_ino ;
__entry - > hashval = ctx - > cursor - > hashval ;
__entry - > blkno = ctx - > cursor - > blkno ;
__entry - > offset = ctx - > cursor - > offset ;
__entry - > alist = ctx - > alist ;
__entry - > bufsize = ctx - > bufsize ;
__entry - > count = ctx - > count ;
__entry - > firstu = ctx - > firstu ;
__entry - > flags = ctx - > flags ;
__entry - > bt_hashval = be32_to_cpu ( btree - > hashval ) ;
__entry - > bt_before = be32_to_cpu ( btree - > before ) ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
" alist 0x%p size %u count %u firstu %u flags %d %s "
" node hashval %u, node before %u " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > hashval ,
__entry - > blkno ,
__entry - > offset ,
__entry - > dupcnt ,
__entry - > alist ,
__entry - > bufsize ,
__entry - > count ,
__entry - > firstu ,
__entry - > flags ,
__print_flags ( __entry - > flags , " | " , XFS_ATTR_FLAGS ) ,
__entry - > bt_hashval ,
__entry - > bt_before )
) ;
TRACE_EVENT ( xfs_iext_insert ,
TP_PROTO ( struct xfs_inode * ip , xfs_extnum_t idx ,
struct xfs_bmbt_irec * r , int state , unsigned long caller_ip ) ,
TP_ARGS ( ip , idx , r , state , caller_ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( xfs_extnum_t , idx )
__field ( xfs_fileoff_t , startoff )
__field ( xfs_fsblock_t , startblock )
__field ( xfs_filblks_t , blockcount )
__field ( xfs_exntst_t , state )
__field ( int , bmap_state )
__field ( unsigned long , caller_ip )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
__entry - > idx = idx ;
__entry - > startoff = r - > br_startoff ;
__entry - > startblock = r - > br_startblock ;
__entry - > blockcount = r - > br_blockcount ;
__entry - > state = r - > br_state ;
__entry - > bmap_state = state ;
__entry - > caller_ip = caller_ip ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx state %s idx %ld "
2010-02-16 01:02:19 +03:00
" offset %lld block %lld count %lld flag %d caller %pf " ,
2009-12-15 02:14:59 +03:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__print_flags ( __entry - > bmap_state , " | " , XFS_BMAP_EXT_FLAGS ) ,
( long ) __entry - > idx ,
__entry - > startoff ,
2010-02-16 01:02:19 +03:00
( __int64_t ) __entry - > startblock ,
2009-12-15 02:14:59 +03:00
__entry - > blockcount ,
__entry - > state ,
( char * ) __entry - > caller_ip )
) ;
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_bmap_class ,
TP_PROTO ( struct xfs_inode * ip , xfs_extnum_t idx , int state ,
unsigned long caller_ip ) ,
TP_ARGS ( ip , idx , state , caller_ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( xfs_extnum_t , idx )
__field ( xfs_fileoff_t , startoff )
__field ( xfs_fsblock_t , startblock )
__field ( xfs_filblks_t , blockcount )
__field ( xfs_exntst_t , state )
__field ( int , bmap_state )
__field ( unsigned long , caller_ip )
) ,
TP_fast_assign (
struct xfs_ifork * ifp = ( state & BMAP_ATTRFORK ) ?
ip - > i_afp : & ip - > i_df ;
struct xfs_bmbt_irec r ;
xfs_bmbt_get_all ( xfs_iext_get_ext ( ifp , idx ) , & r ) ;
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
__entry - > idx = idx ;
__entry - > startoff = r . br_startoff ;
__entry - > startblock = r . br_startblock ;
__entry - > blockcount = r . br_blockcount ;
__entry - > state = r . br_state ;
__entry - > bmap_state = state ;
__entry - > caller_ip = caller_ip ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx state %s idx %ld "
2010-02-16 01:02:19 +03:00
" offset %lld block %lld count %lld flag %d caller %pf " ,
2009-12-21 17:03:03 +03:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__print_flags ( __entry - > bmap_state , " | " , XFS_BMAP_EXT_FLAGS ) ,
( long ) __entry - > idx ,
__entry - > startoff ,
2010-02-16 01:02:19 +03:00
( __int64_t ) __entry - > startblock ,
2009-12-21 17:03:03 +03:00
__entry - > blockcount ,
__entry - > state ,
( char * ) __entry - > caller_ip )
)
2009-12-15 02:14:59 +03:00
# define DEFINE_BMAP_EVENT(name) \
2009-12-21 17:03:03 +03:00
DEFINE_EVENT ( xfs_bmap_class , name , \
2009-12-15 02:14:59 +03:00
TP_PROTO ( struct xfs_inode * ip , xfs_extnum_t idx , int state , \
unsigned long caller_ip ) , \
2009-12-21 17:03:03 +03:00
TP_ARGS ( ip , idx , state , caller_ip ) )
2009-12-15 02:14:59 +03:00
DEFINE_BMAP_EVENT ( xfs_iext_remove ) ;
DEFINE_BMAP_EVENT ( xfs_bmap_pre_update ) ;
DEFINE_BMAP_EVENT ( xfs_bmap_post_update ) ;
DEFINE_BMAP_EVENT ( xfs_extlist ) ;
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_buf_class ,
TP_PROTO ( struct xfs_buf * bp , unsigned long caller_ip ) ,
TP_ARGS ( bp , caller_ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_daddr_t , bno )
2012-04-23 09:58:51 +04:00
__field ( int , nblks )
2009-12-21 17:03:03 +03:00
__field ( int , hold )
__field ( int , pincount )
__field ( unsigned , lockval )
__field ( unsigned , flags )
__field ( unsigned long , caller_ip )
) ,
TP_fast_assign (
__entry - > dev = bp - > b_target - > bt_dev ;
__entry - > bno = bp - > b_bn ;
2012-04-23 09:58:51 +04:00
__entry - > nblks = bp - > b_length ;
2009-12-21 17:03:03 +03:00
__entry - > hold = atomic_read ( & bp - > b_hold ) ;
__entry - > pincount = atomic_read ( & bp - > b_pin_count ) ;
2011-07-08 16:36:19 +04:00
__entry - > lockval = bp - > b_sema . count ;
2009-12-21 17:03:03 +03:00
__entry - > flags = bp - > b_flags ;
__entry - > caller_ip = caller_ip ;
) ,
2012-04-23 09:58:51 +04:00
TP_printk ( " dev %d:%d bno 0x%llx nblks 0x%x hold %d pincount %d "
2009-12-21 17:03:03 +03:00
" lock %d flags %s caller %pf " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
( unsigned long long ) __entry - > bno ,
2012-04-23 09:58:51 +04:00
__entry - > nblks ,
2009-12-21 17:03:03 +03:00
__entry - > hold ,
__entry - > pincount ,
__entry - > lockval ,
__print_flags ( __entry - > flags , " | " , XFS_BUF_FLAGS ) ,
( void * ) __entry - > caller_ip )
2009-12-15 02:14:59 +03:00
)
2009-12-21 17:03:03 +03:00
# define DEFINE_BUF_EVENT(name) \
DEFINE_EVENT ( xfs_buf_class , name , \
TP_PROTO ( struct xfs_buf * bp , unsigned long caller_ip ) , \
TP_ARGS ( bp , caller_ip ) )
2009-12-15 02:14:59 +03:00
DEFINE_BUF_EVENT ( xfs_buf_init ) ;
DEFINE_BUF_EVENT ( xfs_buf_free ) ;
DEFINE_BUF_EVENT ( xfs_buf_hold ) ;
DEFINE_BUF_EVENT ( xfs_buf_rele ) ;
DEFINE_BUF_EVENT ( xfs_buf_iodone ) ;
DEFINE_BUF_EVENT ( xfs_buf_iorequest ) ;
DEFINE_BUF_EVENT ( xfs_buf_bawrite ) ;
DEFINE_BUF_EVENT ( xfs_buf_lock ) ;
DEFINE_BUF_EVENT ( xfs_buf_lock_done ) ;
2011-07-08 16:36:19 +04:00
DEFINE_BUF_EVENT ( xfs_buf_trylock ) ;
2009-12-15 02:14:59 +03:00
DEFINE_BUF_EVENT ( xfs_buf_unlock ) ;
DEFINE_BUF_EVENT ( xfs_buf_iowait ) ;
DEFINE_BUF_EVENT ( xfs_buf_iowait_done ) ;
DEFINE_BUF_EVENT ( xfs_buf_delwri_queue ) ;
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 09:58:39 +04:00
DEFINE_BUF_EVENT ( xfs_buf_delwri_queued ) ;
2009-12-15 02:14:59 +03:00
DEFINE_BUF_EVENT ( xfs_buf_delwri_split ) ;
2010-09-24 14:07:47 +04:00
DEFINE_BUF_EVENT ( xfs_buf_get_uncached ) ;
2009-12-15 02:14:59 +03:00
DEFINE_BUF_EVENT ( xfs_bdstrat_shut ) ;
DEFINE_BUF_EVENT ( xfs_buf_item_relse ) ;
DEFINE_BUF_EVENT ( xfs_buf_item_iodone ) ;
DEFINE_BUF_EVENT ( xfs_buf_item_iodone_async ) ;
DEFINE_BUF_EVENT ( xfs_buf_error_relse ) ;
DEFINE_BUF_EVENT ( xfs_trans_read_buf_io ) ;
DEFINE_BUF_EVENT ( xfs_trans_read_buf_shut ) ;
/* not really buffer traces, but the buf provides useful information */
DEFINE_BUF_EVENT ( xfs_btree_corrupt ) ;
DEFINE_BUF_EVENT ( xfs_da_btree_corrupt ) ;
DEFINE_BUF_EVENT ( xfs_reset_dqcounts ) ;
DEFINE_BUF_EVENT ( xfs_inode_item_push ) ;
/* pass flags explicitly */
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_buf_flags_class ,
TP_PROTO ( struct xfs_buf * bp , unsigned flags , unsigned long caller_ip ) ,
TP_ARGS ( bp , flags , caller_ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_daddr_t , bno )
__field ( size_t , buffer_length )
__field ( int , hold )
__field ( int , pincount )
__field ( unsigned , lockval )
__field ( unsigned , flags )
__field ( unsigned long , caller_ip )
) ,
TP_fast_assign (
__entry - > dev = bp - > b_target - > bt_dev ;
__entry - > bno = bp - > b_bn ;
2012-04-23 09:58:51 +04:00
__entry - > buffer_length = BBTOB ( bp - > b_length ) ;
2009-12-21 17:03:03 +03:00
__entry - > flags = flags ;
__entry - > hold = atomic_read ( & bp - > b_hold ) ;
__entry - > pincount = atomic_read ( & bp - > b_pin_count ) ;
2011-07-08 16:36:19 +04:00
__entry - > lockval = bp - > b_sema . count ;
2009-12-21 17:03:03 +03:00
__entry - > caller_ip = caller_ip ;
) ,
TP_printk ( " dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
" lock %d flags %s caller %pf " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
( unsigned long long ) __entry - > bno ,
__entry - > buffer_length ,
__entry - > hold ,
__entry - > pincount ,
__entry - > lockval ,
__print_flags ( __entry - > flags , " | " , XFS_BUF_FLAGS ) ,
( void * ) __entry - > caller_ip )
2009-12-15 02:14:59 +03:00
)
2009-12-21 17:03:03 +03:00
# define DEFINE_BUF_FLAGS_EVENT(name) \
DEFINE_EVENT ( xfs_buf_flags_class , name , \
TP_PROTO ( struct xfs_buf * bp , unsigned flags , unsigned long caller_ip ) , \
TP_ARGS ( bp , flags , caller_ip ) )
2009-12-15 02:14:59 +03:00
DEFINE_BUF_FLAGS_EVENT ( xfs_buf_find ) ;
DEFINE_BUF_FLAGS_EVENT ( xfs_buf_get ) ;
DEFINE_BUF_FLAGS_EVENT ( xfs_buf_read ) ;
TRACE_EVENT ( xfs_buf_ioerror ,
TP_PROTO ( struct xfs_buf * bp , int error , unsigned long caller_ip ) ,
TP_ARGS ( bp , error , caller_ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_daddr_t , bno )
__field ( size_t , buffer_length )
__field ( unsigned , flags )
__field ( int , hold )
__field ( int , pincount )
__field ( unsigned , lockval )
__field ( int , error )
__field ( unsigned long , caller_ip )
) ,
TP_fast_assign (
__entry - > dev = bp - > b_target - > bt_dev ;
__entry - > bno = bp - > b_bn ;
2012-04-23 09:58:51 +04:00
__entry - > buffer_length = BBTOB ( bp - > b_length ) ;
2009-12-15 02:14:59 +03:00
__entry - > hold = atomic_read ( & bp - > b_hold ) ;
__entry - > pincount = atomic_read ( & bp - > b_pin_count ) ;
2011-07-08 16:36:19 +04:00
__entry - > lockval = bp - > b_sema . count ;
2009-12-15 02:14:59 +03:00
__entry - > error = error ;
__entry - > flags = bp - > b_flags ;
__entry - > caller_ip = caller_ip ;
) ,
TP_printk ( " dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
" lock %d error %d flags %s caller %pf " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
( unsigned long long ) __entry - > bno ,
__entry - > buffer_length ,
__entry - > hold ,
__entry - > pincount ,
__entry - > lockval ,
__entry - > error ,
__print_flags ( __entry - > flags , " | " , XFS_BUF_FLAGS ) ,
( void * ) __entry - > caller_ip )
) ;
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_buf_item_class ,
TP_PROTO ( struct xfs_buf_log_item * bip ) ,
TP_ARGS ( bip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_daddr_t , buf_bno )
__field ( size_t , buf_len )
__field ( int , buf_hold )
__field ( int , buf_pincount )
__field ( int , buf_lockval )
__field ( unsigned , buf_flags )
__field ( unsigned , bli_recur )
__field ( int , bli_refcount )
__field ( unsigned , bli_flags )
__field ( void * , li_desc )
__field ( unsigned , li_flags )
) ,
TP_fast_assign (
__entry - > dev = bip - > bli_buf - > b_target - > bt_dev ;
__entry - > bli_flags = bip - > bli_flags ;
__entry - > bli_recur = bip - > bli_recur ;
__entry - > bli_refcount = atomic_read ( & bip - > bli_refcount ) ;
__entry - > buf_bno = bip - > bli_buf - > b_bn ;
2012-04-23 09:58:51 +04:00
__entry - > buf_len = BBTOB ( bip - > bli_buf - > b_length ) ;
2009-12-21 17:03:03 +03:00
__entry - > buf_flags = bip - > bli_buf - > b_flags ;
__entry - > buf_hold = atomic_read ( & bip - > bli_buf - > b_hold ) ;
__entry - > buf_pincount = atomic_read ( & bip - > bli_buf - > b_pin_count ) ;
2011-07-08 16:36:19 +04:00
__entry - > buf_lockval = bip - > bli_buf - > b_sema . count ;
2009-12-21 17:03:03 +03:00
__entry - > li_desc = bip - > bli_item . li_desc ;
__entry - > li_flags = bip - > bli_item . li_flags ;
) ,
TP_printk ( " dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
" lock %d flags %s recur %d refcount %d bliflags %s "
" lidesc 0x%p liflags %s " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
( unsigned long long ) __entry - > buf_bno ,
__entry - > buf_len ,
__entry - > buf_hold ,
__entry - > buf_pincount ,
__entry - > buf_lockval ,
__print_flags ( __entry - > buf_flags , " | " , XFS_BUF_FLAGS ) ,
__entry - > bli_recur ,
__entry - > bli_refcount ,
__print_flags ( __entry - > bli_flags , " | " , XFS_BLI_FLAGS ) ,
__entry - > li_desc ,
__print_flags ( __entry - > li_flags , " | " , XFS_LI_FLAGS ) )
2009-12-15 02:14:59 +03:00
)
2009-12-21 17:03:03 +03:00
# define DEFINE_BUF_ITEM_EVENT(name) \
DEFINE_EVENT ( xfs_buf_item_class , name , \
TP_PROTO ( struct xfs_buf_log_item * bip ) , \
TP_ARGS ( bip ) )
2009-12-15 02:14:59 +03:00
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_size ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_size_stale ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_format ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_format_stale ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_pin ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_unpin ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_unpin_stale ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_unlock ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_unlock_stale ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_committed ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_buf_item_push ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_get_buf ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_get_buf_recur ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_getsb ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_getsb_recur ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_read_buf ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_read_buf_recur ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_log_buf ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_brelse ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_bjoin ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_bhold ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_bhold_release ) ;
DEFINE_BUF_ITEM_EVENT ( xfs_trans_binval ) ;
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_lock_class ,
TP_PROTO ( struct xfs_inode * ip , unsigned lock_flags ,
unsigned long caller_ip ) ,
TP_ARGS ( ip , lock_flags , caller_ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( int , lock_flags )
__field ( unsigned long , caller_ip )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
__entry - > lock_flags = lock_flags ;
__entry - > caller_ip = caller_ip ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx flags %s caller %pf " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__print_flags ( __entry - > lock_flags , " | " , XFS_LOCK_FLAGS ) ,
( void * ) __entry - > caller_ip )
)
2009-12-15 02:14:59 +03:00
# define DEFINE_LOCK_EVENT(name) \
2009-12-21 17:03:03 +03:00
DEFINE_EVENT ( xfs_lock_class , name , \
2009-12-15 02:14:59 +03:00
TP_PROTO ( struct xfs_inode * ip , unsigned lock_flags , \
unsigned long caller_ip ) , \
2009-12-21 17:03:03 +03:00
TP_ARGS ( ip , lock_flags , caller_ip ) )
2009-12-15 02:14:59 +03:00
DEFINE_LOCK_EVENT ( xfs_ilock ) ;
DEFINE_LOCK_EVENT ( xfs_ilock_nowait ) ;
DEFINE_LOCK_EVENT ( xfs_ilock_demote ) ;
DEFINE_LOCK_EVENT ( xfs_iunlock ) ;
2010-06-24 05:57:09 +04:00
DECLARE_EVENT_CLASS ( xfs_inode_class ,
2009-12-21 17:03:03 +03:00
TP_PROTO ( struct xfs_inode * ip ) ,
TP_ARGS ( ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino )
)
2010-06-24 05:57:09 +04:00
# define DEFINE_INODE_EVENT(name) \
DEFINE_EVENT ( xfs_inode_class , name , \
2009-12-15 02:14:59 +03:00
TP_PROTO ( struct xfs_inode * ip ) , \
2009-12-21 17:03:03 +03:00
TP_ARGS ( ip ) )
2010-06-24 05:57:09 +04:00
DEFINE_INODE_EVENT ( xfs_iget_skip ) ;
DEFINE_INODE_EVENT ( xfs_iget_reclaim ) ;
DEFINE_INODE_EVENT ( xfs_iget_reclaim_fail ) ;
DEFINE_INODE_EVENT ( xfs_iget_hit ) ;
DEFINE_INODE_EVENT ( xfs_iget_miss ) ;
2009-12-15 02:14:59 +03:00
2010-06-24 05:57:09 +04:00
DEFINE_INODE_EVENT ( xfs_getattr ) ;
DEFINE_INODE_EVENT ( xfs_setattr ) ;
DEFINE_INODE_EVENT ( xfs_readlink ) ;
DEFINE_INODE_EVENT ( xfs_alloc_file_space ) ;
DEFINE_INODE_EVENT ( xfs_free_file_space ) ;
DEFINE_INODE_EVENT ( xfs_readdir ) ;
2010-07-20 11:54:41 +04:00
# ifdef CONFIG_XFS_POSIX_ACL
2011-07-23 19:37:31 +04:00
DEFINE_INODE_EVENT ( xfs_get_acl ) ;
2010-07-20 11:54:41 +04:00
# endif
2010-06-24 05:57:09 +04:00
DEFINE_INODE_EVENT ( xfs_vm_bmap ) ;
DEFINE_INODE_EVENT ( xfs_file_ioctl ) ;
DEFINE_INODE_EVENT ( xfs_file_compat_ioctl ) ;
DEFINE_INODE_EVENT ( xfs_ioctl_setattr ) ;
2011-10-02 18:25:16 +04:00
DEFINE_INODE_EVENT ( xfs_dir_fsync ) ;
2010-06-24 05:57:09 +04:00
DEFINE_INODE_EVENT ( xfs_file_fsync ) ;
DEFINE_INODE_EVENT ( xfs_destroy_inode ) ;
2010-06-07 22:34:48 +04:00
DEFINE_INODE_EVENT ( xfs_evict_inode ) ;
2012-06-07 01:01:28 +04:00
DEFINE_INODE_EVENT ( xfs_update_time ) ;
2010-06-24 05:57:09 +04:00
DEFINE_INODE_EVENT ( xfs_dquot_dqalloc ) ;
DEFINE_INODE_EVENT ( xfs_dquot_dqdetach ) ;
DECLARE_EVENT_CLASS ( xfs_iref_class ,
2009-12-21 17:03:03 +03:00
TP_PROTO ( struct xfs_inode * ip , unsigned long caller_ip ) ,
TP_ARGS ( ip , caller_ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( int , count )
2010-03-08 03:24:07 +03:00
__field ( int , pincount )
2009-12-21 17:03:03 +03:00
__field ( unsigned long , caller_ip )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
__entry - > count = atomic_read ( & VFS_I ( ip ) - > i_count ) ;
2010-03-08 03:24:07 +03:00
__entry - > pincount = atomic_read ( & ip - > i_pincount ) ;
2009-12-21 17:03:03 +03:00
__entry - > caller_ip = caller_ip ;
) ,
2010-03-08 03:24:07 +03:00
TP_printk ( " dev %d:%d ino 0x%llx count %d pincount %d caller %pf " ,
2009-12-21 17:03:03 +03:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > count ,
2010-03-08 03:24:07 +03:00
__entry - > pincount ,
2009-12-21 17:03:03 +03:00
( char * ) __entry - > caller_ip )
)
2010-06-24 05:57:09 +04:00
# define DEFINE_IREF_EVENT(name) \
DEFINE_EVENT ( xfs_iref_class , name , \
2009-12-15 02:14:59 +03:00
TP_PROTO ( struct xfs_inode * ip , unsigned long caller_ip ) , \
2009-12-21 17:03:03 +03:00
TP_ARGS ( ip , caller_ip ) )
2010-06-24 05:57:09 +04:00
DEFINE_IREF_EVENT ( xfs_ihold ) ;
DEFINE_IREF_EVENT ( xfs_irele ) ;
DEFINE_IREF_EVENT ( xfs_inode_pin ) ;
DEFINE_IREF_EVENT ( xfs_inode_unpin ) ;
DEFINE_IREF_EVENT ( xfs_inode_unpin_nowait ) ;
DECLARE_EVENT_CLASS ( xfs_namespace_class ,
TP_PROTO ( struct xfs_inode * dp , struct xfs_name * name ) ,
TP_ARGS ( dp , name ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , dp_ino )
2012-02-28 15:01:40 +04:00
__field ( int , namelen )
2010-06-24 05:57:09 +04:00
__dynamic_array ( char , name , name - > len )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( dp ) - > i_sb - > s_dev ;
__entry - > dp_ino = dp - > i_ino ;
2012-02-28 15:01:40 +04:00
__entry - > namelen = name - > len ;
2010-06-24 05:57:09 +04:00
memcpy ( __get_str ( name ) , name - > name , name - > len ) ;
) ,
2012-02-28 15:01:40 +04:00
TP_printk ( " dev %d:%d dp ino 0x%llx name %.*s " ,
2010-06-24 05:57:09 +04:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > dp_ino ,
2012-02-28 15:01:40 +04:00
__entry - > namelen ,
2010-06-24 05:57:09 +04:00
__get_str ( name ) )
)
# define DEFINE_NAMESPACE_EVENT(name) \
DEFINE_EVENT ( xfs_namespace_class , name , \
TP_PROTO ( struct xfs_inode * dp , struct xfs_name * name ) , \
TP_ARGS ( dp , name ) )
DEFINE_NAMESPACE_EVENT ( xfs_remove ) ;
DEFINE_NAMESPACE_EVENT ( xfs_link ) ;
DEFINE_NAMESPACE_EVENT ( xfs_lookup ) ;
DEFINE_NAMESPACE_EVENT ( xfs_create ) ;
DEFINE_NAMESPACE_EVENT ( xfs_symlink ) ;
2010-03-08 03:24:07 +03:00
2010-06-24 05:57:09 +04:00
TRACE_EVENT ( xfs_rename ,
TP_PROTO ( struct xfs_inode * src_dp , struct xfs_inode * target_dp ,
struct xfs_name * src_name , struct xfs_name * target_name ) ,
TP_ARGS ( src_dp , target_dp , src_name , target_name ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , src_dp_ino )
__field ( xfs_ino_t , target_dp_ino )
2012-02-28 15:01:40 +04:00
__field ( int , src_namelen )
__field ( int , target_namelen )
2010-06-24 05:57:09 +04:00
__dynamic_array ( char , src_name , src_name - > len )
__dynamic_array ( char , target_name , target_name - > len )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( src_dp ) - > i_sb - > s_dev ;
__entry - > src_dp_ino = src_dp - > i_ino ;
__entry - > target_dp_ino = target_dp - > i_ino ;
2012-02-28 15:01:40 +04:00
__entry - > src_namelen = src_name - > len ;
__entry - > target_namelen = target_name - > len ;
2010-06-24 05:57:09 +04:00
memcpy ( __get_str ( src_name ) , src_name - > name , src_name - > len ) ;
2012-02-28 15:01:40 +04:00
memcpy ( __get_str ( target_name ) , target_name - > name ,
target_name - > len ) ;
2010-06-24 05:57:09 +04:00
) ,
TP_printk ( " dev %d:%d src dp ino 0x%llx target dp ino 0x%llx "
2012-02-28 15:01:40 +04:00
" src name %.*s target name %.*s " ,
2010-06-24 05:57:09 +04:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > src_dp_ino ,
__entry - > target_dp_ino ,
2012-02-28 15:01:40 +04:00
__entry - > src_namelen ,
2010-06-24 05:57:09 +04:00
__get_str ( src_name ) ,
2012-02-28 15:01:40 +04:00
__entry - > target_namelen ,
2010-06-24 05:57:09 +04:00
__get_str ( target_name ) )
)
2009-12-15 02:14:59 +03:00
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_dquot_class ,
TP_PROTO ( struct xfs_dquot * dqp ) ,
TP_ARGS ( dqp ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
2010-02-15 01:01:45 +03:00
__field ( u32 , id )
2009-12-21 17:03:03 +03:00
__field ( unsigned , flags )
__field ( unsigned , nrefs )
__field ( unsigned long long , res_bcount )
__field ( unsigned long long , bcount )
__field ( unsigned long long , icount )
__field ( unsigned long long , blk_hardlimit )
__field ( unsigned long long , blk_softlimit )
__field ( unsigned long long , ino_hardlimit )
__field ( unsigned long long , ino_softlimit )
2009-12-15 02:14:59 +03:00
) , \
2009-12-21 17:03:03 +03:00
TP_fast_assign (
__entry - > dev = dqp - > q_mount - > m_super - > s_dev ;
2010-02-15 01:01:45 +03:00
__entry - > id = be32_to_cpu ( dqp - > q_core . d_id ) ;
2009-12-21 17:03:03 +03:00
__entry - > flags = dqp - > dq_flags ;
__entry - > nrefs = dqp - > q_nrefs ;
__entry - > res_bcount = dqp - > q_res_bcount ;
__entry - > bcount = be64_to_cpu ( dqp - > q_core . d_bcount ) ;
__entry - > icount = be64_to_cpu ( dqp - > q_core . d_icount ) ;
__entry - > blk_hardlimit =
be64_to_cpu ( dqp - > q_core . d_blk_hardlimit ) ;
__entry - > blk_softlimit =
be64_to_cpu ( dqp - > q_core . d_blk_softlimit ) ;
__entry - > ino_hardlimit =
be64_to_cpu ( dqp - > q_core . d_ino_hardlimit ) ;
__entry - > ino_softlimit =
be64_to_cpu ( dqp - > q_core . d_ino_softlimit ) ;
) ,
TP_printk ( " dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx "
2010-02-15 01:01:45 +03:00
" bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx "
" icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx] " ,
2009-12-21 17:03:03 +03:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
2010-02-15 01:01:45 +03:00
__entry - > id ,
2009-12-21 17:03:03 +03:00
__print_flags ( __entry - > flags , " | " , XFS_DQ_FLAGS ) ,
__entry - > nrefs ,
__entry - > res_bcount ,
__entry - > bcount ,
__entry - > blk_hardlimit ,
__entry - > blk_softlimit ,
__entry - > icount ,
__entry - > ino_hardlimit ,
__entry - > ino_softlimit )
2009-12-15 02:14:59 +03:00
)
2009-12-21 17:03:03 +03:00
# define DEFINE_DQUOT_EVENT(name) \
DEFINE_EVENT ( xfs_dquot_class , name , \
TP_PROTO ( struct xfs_dquot * dqp ) , \
TP_ARGS ( dqp ) )
2009-12-15 02:14:59 +03:00
DEFINE_DQUOT_EVENT ( xfs_dqadjust ) ;
DEFINE_DQUOT_EVENT ( xfs_dqreclaim_want ) ;
DEFINE_DQUOT_EVENT ( xfs_dqreclaim_dirty ) ;
2012-02-01 17:57:20 +04:00
DEFINE_DQUOT_EVENT ( xfs_dqreclaim_busy ) ;
DEFINE_DQUOT_EVENT ( xfs_dqreclaim_done ) ;
2009-12-15 02:14:59 +03:00
DEFINE_DQUOT_EVENT ( xfs_dqattach_found ) ;
DEFINE_DQUOT_EVENT ( xfs_dqattach_get ) ;
DEFINE_DQUOT_EVENT ( xfs_dqalloc ) ;
DEFINE_DQUOT_EVENT ( xfs_dqtobp_read ) ;
DEFINE_DQUOT_EVENT ( xfs_dqread ) ;
DEFINE_DQUOT_EVENT ( xfs_dqread_fail ) ;
DEFINE_DQUOT_EVENT ( xfs_dqget_hit ) ;
DEFINE_DQUOT_EVENT ( xfs_dqget_miss ) ;
2012-03-13 12:52:35 +04:00
DEFINE_DQUOT_EVENT ( xfs_dqget_freeing ) ;
DEFINE_DQUOT_EVENT ( xfs_dqget_dup ) ;
2009-12-15 02:14:59 +03:00
DEFINE_DQUOT_EVENT ( xfs_dqput ) ;
DEFINE_DQUOT_EVENT ( xfs_dqput_wait ) ;
DEFINE_DQUOT_EVENT ( xfs_dqput_free ) ;
DEFINE_DQUOT_EVENT ( xfs_dqrele ) ;
DEFINE_DQUOT_EVENT ( xfs_dqflush ) ;
DEFINE_DQUOT_EVENT ( xfs_dqflush_force ) ;
DEFINE_DQUOT_EVENT ( xfs_dqflush_done ) ;
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_loggrant_class ,
2012-06-14 18:22:15 +04:00
TP_PROTO ( struct xlog * log , struct xlog_ticket * tic ) ,
2009-12-21 17:03:03 +03:00
TP_ARGS ( log , tic ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( unsigned , trans_type )
__field ( char , ocnt )
__field ( char , cnt )
__field ( int , curr_res )
__field ( int , unit_res )
__field ( unsigned int , flags )
2010-12-21 04:02:25 +03:00
__field ( int , reserveq )
__field ( int , writeq )
2009-12-21 17:03:03 +03:00
__field ( int , grant_reserve_cycle )
__field ( int , grant_reserve_bytes )
__field ( int , grant_write_cycle )
__field ( int , grant_write_bytes )
__field ( int , curr_cycle )
__field ( int , curr_block )
__field ( xfs_lsn_t , tail_lsn )
) ,
TP_fast_assign (
__entry - > dev = log - > l_mp - > m_super - > s_dev ;
__entry - > trans_type = tic - > t_trans_type ;
__entry - > ocnt = tic - > t_ocnt ;
__entry - > cnt = tic - > t_cnt ;
__entry - > curr_res = tic - > t_curr_res ;
__entry - > unit_res = tic - > t_unit_res ;
__entry - > flags = tic - > t_flags ;
2012-02-20 06:31:25 +04:00
__entry - > reserveq = list_empty ( & log - > l_reserve_head . waiters ) ;
__entry - > writeq = list_empty ( & log - > l_write_head . waiters ) ;
xlog_crack_grant_head ( & log - > l_reserve_head . grant ,
2010-12-21 04:08:20 +03:00
& __entry - > grant_reserve_cycle ,
& __entry - > grant_reserve_bytes ) ;
2012-02-20 06:31:25 +04:00
xlog_crack_grant_head ( & log - > l_write_head . grant ,
2010-12-21 04:08:20 +03:00
& __entry - > grant_write_cycle ,
& __entry - > grant_write_bytes ) ;
2009-12-21 17:03:03 +03:00
__entry - > curr_cycle = log - > l_curr_cycle ;
__entry - > curr_block = log - > l_curr_block ;
2010-12-21 04:28:39 +03:00
__entry - > tail_lsn = atomic64_read ( & log - > l_tail_lsn ) ;
2009-12-21 17:03:03 +03:00
) ,
TP_printk ( " dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
2010-12-21 04:02:25 +03:00
" t_unit_res %u t_flags %s reserveq %s "
" writeq %s grant_reserve_cycle %d "
2009-12-21 17:03:03 +03:00
" grant_reserve_bytes %d grant_write_cycle %d "
" grant_write_bytes %d curr_cycle %d curr_block %d "
" tail_cycle %d tail_block %d " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__print_symbolic ( __entry - > trans_type , XFS_TRANS_TYPES ) ,
__entry - > ocnt ,
__entry - > cnt ,
__entry - > curr_res ,
__entry - > unit_res ,
__print_flags ( __entry - > flags , " | " , XLOG_TIC_FLAGS ) ,
2010-12-21 04:02:25 +03:00
__entry - > reserveq ? " empty " : " active " ,
__entry - > writeq ? " empty " : " active " ,
2009-12-21 17:03:03 +03:00
__entry - > grant_reserve_cycle ,
__entry - > grant_reserve_bytes ,
__entry - > grant_write_cycle ,
__entry - > grant_write_bytes ,
__entry - > curr_cycle ,
__entry - > curr_block ,
CYCLE_LSN ( __entry - > tail_lsn ) ,
BLOCK_LSN ( __entry - > tail_lsn )
)
)
2009-12-15 02:14:59 +03:00
2009-12-21 17:03:03 +03:00
# define DEFINE_LOGGRANT_EVENT(name) \
DEFINE_EVENT ( xfs_loggrant_class , name , \
2012-06-14 18:22:15 +04:00
TP_PROTO ( struct xlog * log , struct xlog_ticket * tic ) , \
2009-12-21 17:03:03 +03:00
TP_ARGS ( log , tic ) )
2009-12-15 02:14:59 +03:00
DEFINE_LOGGRANT_EVENT ( xfs_log_done_nonperm ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_done_perm ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_umount_write ) ;
2011-11-28 12:17:36 +04:00
DEFINE_LOGGRANT_EVENT ( xfs_log_grant_sleep ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_grant_wake ) ;
2010-12-21 04:29:01 +03:00
DEFINE_LOGGRANT_EVENT ( xfs_log_grant_wake_up ) ;
2012-02-20 06:31:31 +04:00
DEFINE_LOGGRANT_EVENT ( xfs_log_reserve ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_reserve_exit ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_regrant ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_regrant_exit ) ;
2009-12-15 02:14:59 +03:00
DEFINE_LOGGRANT_EVENT ( xfs_log_regrant_reserve_enter ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_regrant_reserve_exit ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_regrant_reserve_sub ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_ungrant_enter ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_ungrant_exit ) ;
DEFINE_LOGGRANT_EVENT ( xfs_log_ungrant_sub ) ;
2011-10-11 19:14:11 +04:00
DECLARE_EVENT_CLASS ( xfs_log_item_class ,
TP_PROTO ( struct xfs_log_item * lip ) ,
TP_ARGS ( lip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( void * , lip )
__field ( uint , type )
__field ( uint , flags )
__field ( xfs_lsn_t , lsn )
) ,
TP_fast_assign (
__entry - > dev = lip - > li_mountp - > m_super - > s_dev ;
__entry - > lip = lip ;
__entry - > type = lip - > li_type ;
__entry - > flags = lip - > li_flags ;
__entry - > lsn = lip - > li_lsn ;
) ,
TP_printk ( " dev %d:%d lip 0x%p lsn %d/%d type %s flags %s " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > lip ,
CYCLE_LSN ( __entry - > lsn ) , BLOCK_LSN ( __entry - > lsn ) ,
__print_symbolic ( __entry - > type , XFS_LI_TYPE_DESC ) ,
__print_flags ( __entry - > flags , " | " , XFS_LI_FLAGS ) )
)
2012-04-24 10:33:31 +04:00
TRACE_EVENT ( xfs_log_force ,
TP_PROTO ( struct xfs_mount * mp , xfs_lsn_t lsn ) ,
TP_ARGS ( mp , lsn ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_lsn_t , lsn )
) ,
TP_fast_assign (
__entry - > dev = mp - > m_super - > s_dev ;
__entry - > lsn = lsn ;
) ,
TP_printk ( " dev %d:%d lsn 0x%llx " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > lsn )
)
2011-10-11 19:14:11 +04:00
# define DEFINE_LOG_ITEM_EVENT(name) \
DEFINE_EVENT ( xfs_log_item_class , name , \
TP_PROTO ( struct xfs_log_item * lip ) , \
TP_ARGS ( lip ) )
DEFINE_LOG_ITEM_EVENT ( xfs_ail_push ) ;
DEFINE_LOG_ITEM_EVENT ( xfs_ail_pinned ) ;
DEFINE_LOG_ITEM_EVENT ( xfs_ail_locked ) ;
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 09:58:39 +04:00
DEFINE_LOG_ITEM_EVENT ( xfs_ail_flushing ) ;
2011-10-11 19:14:11 +04:00
2010-05-24 12:25:57 +04:00
DECLARE_EVENT_CLASS ( xfs_file_class ,
TP_PROTO ( struct xfs_inode * ip , size_t count , loff_t offset , int flags ) ,
TP_ARGS ( ip , count , offset , flags ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( xfs_fsize_t , size )
__field ( loff_t , offset )
__field ( size_t , count )
__field ( int , flags )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
__entry - > size = ip - > i_d . di_size ;
__entry - > offset = offset ;
__entry - > count = count ;
__entry - > flags = flags ;
) ,
2011-12-19 00:00:12 +04:00
TP_printk ( " dev %d:%d ino 0x%llx size 0x%llx "
2010-05-24 12:25:57 +04:00
" offset 0x%llx count 0x%zx ioflags %s " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > size ,
__entry - > offset ,
__entry - > count ,
__print_flags ( __entry - > flags , " | " , XFS_IO_FLAGS ) )
2009-12-15 02:14:59 +03:00
)
2010-05-24 12:25:57 +04:00
# define DEFINE_RW_EVENT(name) \
DEFINE_EVENT ( xfs_file_class , name , \
TP_PROTO ( struct xfs_inode * ip , size_t count , loff_t offset , int flags ) , \
TP_ARGS ( ip , count , offset , flags ) )
2009-12-15 02:14:59 +03:00
DEFINE_RW_EVENT ( xfs_file_read ) ;
DEFINE_RW_EVENT ( xfs_file_buffered_write ) ;
DEFINE_RW_EVENT ( xfs_file_direct_write ) ;
DEFINE_RW_EVENT ( xfs_file_splice_read ) ;
DEFINE_RW_EVENT ( xfs_file_splice_write ) ;
2010-05-24 12:25:57 +04:00
DECLARE_EVENT_CLASS ( xfs_page_class ,
TP_PROTO ( struct inode * inode , struct page * page , unsigned long off ) ,
TP_ARGS ( inode , page , off ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( pgoff_t , pgoff )
__field ( loff_t , size )
__field ( unsigned long , offset )
__field ( int , delalloc )
__field ( int , unwritten )
) ,
TP_fast_assign (
2010-06-24 03:46:01 +04:00
int delalloc = - 1 , unwritten = - 1 ;
2010-05-24 12:25:57 +04:00
if ( page_has_buffers ( page ) )
2010-06-24 03:46:01 +04:00
xfs_count_page_state ( page , & delalloc , & unwritten ) ;
2010-05-24 12:25:57 +04:00
__entry - > dev = inode - > i_sb - > s_dev ;
__entry - > ino = XFS_I ( inode ) - > i_ino ;
__entry - > pgoff = page_offset ( page ) ;
__entry - > size = i_size_read ( inode ) ;
__entry - > offset = off ;
__entry - > delalloc = delalloc ;
__entry - > unwritten = unwritten ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
2010-06-24 03:46:01 +04:00
" delalloc %d unwritten %d " ,
2010-05-24 12:25:57 +04:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > pgoff ,
__entry - > size ,
__entry - > offset ,
__entry - > delalloc ,
__entry - > unwritten )
2009-12-15 02:14:59 +03:00
)
2010-05-24 12:25:57 +04:00
# define DEFINE_PAGE_EVENT(name) \
DEFINE_EVENT ( xfs_page_class , name , \
TP_PROTO ( struct inode * inode , struct page * page , unsigned long off ) , \
TP_ARGS ( inode , page , off ) )
2009-12-15 02:14:59 +03:00
DEFINE_PAGE_EVENT ( xfs_writepage ) ;
DEFINE_PAGE_EVENT ( xfs_releasepage ) ;
DEFINE_PAGE_EVENT ( xfs_invalidatepage ) ;
2010-12-10 11:42:20 +03:00
DECLARE_EVENT_CLASS ( xfs_imap_class ,
2010-05-24 12:25:57 +04:00
TP_PROTO ( struct xfs_inode * ip , xfs_off_t offset , ssize_t count ,
2010-12-10 11:42:20 +03:00
int type , struct xfs_bmbt_irec * irec ) ,
TP_ARGS ( ip , offset , count , type , irec ) ,
2010-05-24 12:25:57 +04:00
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( loff_t , size )
__field ( loff_t , offset )
__field ( size_t , count )
2010-12-10 11:42:20 +03:00
__field ( int , type )
2010-05-24 12:25:57 +04:00
__field ( xfs_fileoff_t , startoff )
__field ( xfs_fsblock_t , startblock )
__field ( xfs_filblks_t , blockcount )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
__entry - > size = ip - > i_d . di_size ;
__entry - > offset = offset ;
__entry - > count = count ;
2010-12-10 11:42:20 +03:00
__entry - > type = type ;
2010-05-24 12:25:57 +04:00
__entry - > startoff = irec ? irec - > br_startoff : 0 ;
__entry - > startblock = irec ? irec - > br_startblock : 0 ;
__entry - > blockcount = irec ? irec - > br_blockcount : 0 ;
) ,
2011-12-19 00:00:12 +04:00
TP_printk ( " dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx count %zd "
" type %s startoff 0x%llx startblock %lld blockcount 0x%llx " ,
2010-05-24 12:25:57 +04:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > size ,
__entry - > offset ,
__entry - > count ,
2010-12-10 11:42:20 +03:00
__print_symbolic ( __entry - > type , XFS_IO_TYPES ) ,
2010-05-24 12:25:57 +04:00
__entry - > startoff ,
( __int64_t ) __entry - > startblock ,
__entry - > blockcount )
2009-12-15 02:14:59 +03:00
)
2010-05-24 12:25:57 +04:00
# define DEFINE_IOMAP_EVENT(name) \
2010-12-10 11:42:20 +03:00
DEFINE_EVENT ( xfs_imap_class , name , \
2010-05-24 12:25:57 +04:00
TP_PROTO ( struct xfs_inode * ip , xfs_off_t offset , ssize_t count , \
2010-12-10 11:42:20 +03:00
int type , struct xfs_bmbt_irec * irec ) , \
TP_ARGS ( ip , offset , count , type , irec ) )
DEFINE_IOMAP_EVENT ( xfs_map_blocks_found ) ;
DEFINE_IOMAP_EVENT ( xfs_map_blocks_alloc ) ;
DEFINE_IOMAP_EVENT ( xfs_get_blocks_found ) ;
DEFINE_IOMAP_EVENT ( xfs_get_blocks_alloc ) ;
2009-12-15 02:14:59 +03:00
2010-05-24 12:25:57 +04:00
DECLARE_EVENT_CLASS ( xfs_simple_io_class ,
TP_PROTO ( struct xfs_inode * ip , xfs_off_t offset , ssize_t count ) ,
TP_ARGS ( ip , offset , count ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
2011-07-18 07:40:19 +04:00
__field ( loff_t , isize )
__field ( loff_t , disize )
2010-05-24 12:25:57 +04:00
__field ( loff_t , offset )
__field ( size_t , count )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
2011-12-19 00:00:11 +04:00
__entry - > isize = VFS_I ( ip ) - > i_size ;
2011-07-18 07:40:19 +04:00
__entry - > disize = ip - > i_d . di_size ;
2010-05-24 12:25:57 +04:00
__entry - > offset = offset ;
__entry - > count = count ;
) ,
2011-12-19 00:00:12 +04:00
TP_printk ( " dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx "
2010-05-24 12:25:57 +04:00
" offset 0x%llx count %zd " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
2011-07-18 07:40:19 +04:00
__entry - > isize ,
__entry - > disize ,
2010-05-24 12:25:57 +04:00
__entry - > offset ,
__entry - > count )
2009-12-15 02:14:59 +03:00
) ;
2010-05-24 12:25:57 +04:00
# define DEFINE_SIMPLE_IO_EVENT(name) \
DEFINE_EVENT ( xfs_simple_io_class , name , \
TP_PROTO ( struct xfs_inode * ip , xfs_off_t offset , ssize_t count ) , \
TP_ARGS ( ip , offset , count ) )
2009-12-15 02:14:59 +03:00
DEFINE_SIMPLE_IO_EVENT ( xfs_delalloc_enospc ) ;
DEFINE_SIMPLE_IO_EVENT ( xfs_unwritten_convert ) ;
2010-12-10 11:42:20 +03:00
DEFINE_SIMPLE_IO_EVENT ( xfs_get_blocks_notfound ) ;
2011-07-18 07:40:19 +04:00
DEFINE_SIMPLE_IO_EVENT ( xfs_setfilesize ) ;
2009-12-15 02:14:59 +03:00
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_itrunc_class ,
TP_PROTO ( struct xfs_inode * ip , xfs_fsize_t new_size ) ,
TP_ARGS ( ip , new_size ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( xfs_fsize_t , size )
__field ( xfs_fsize_t , new_size )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
__entry - > size = ip - > i_d . di_size ;
__entry - > new_size = new_size ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > size ,
__entry - > new_size )
)
2009-12-15 02:14:59 +03:00
# define DEFINE_ITRUNC_EVENT(name) \
2009-12-21 17:03:03 +03:00
DEFINE_EVENT ( xfs_itrunc_class , name , \
2009-12-15 02:14:59 +03:00
TP_PROTO ( struct xfs_inode * ip , xfs_fsize_t new_size ) , \
2009-12-21 17:03:03 +03:00
TP_ARGS ( ip , new_size ) )
2011-12-19 00:00:04 +04:00
DEFINE_ITRUNC_EVENT ( xfs_itruncate_extents_start ) ;
DEFINE_ITRUNC_EVENT ( xfs_itruncate_extents_end ) ;
2009-12-15 02:14:59 +03:00
TRACE_EVENT ( xfs_pagecache_inval ,
TP_PROTO ( struct xfs_inode * ip , xfs_off_t start , xfs_off_t finish ) ,
TP_ARGS ( ip , start , finish ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( xfs_fsize_t , size )
__field ( xfs_off_t , start )
__field ( xfs_off_t , finish )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
__entry - > size = ip - > i_d . di_size ;
__entry - > start = start ;
__entry - > finish = finish ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > size ,
__entry - > start ,
__entry - > finish )
) ;
TRACE_EVENT ( xfs_bunmap ,
TP_PROTO ( struct xfs_inode * ip , xfs_fileoff_t bno , xfs_filblks_t len ,
int flags , unsigned long caller_ip ) ,
TP_ARGS ( ip , bno , len , flags , caller_ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( xfs_fsize_t , size )
__field ( xfs_fileoff_t , bno )
__field ( xfs_filblks_t , len )
__field ( unsigned long , caller_ip )
__field ( int , flags )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > ino = ip - > i_ino ;
__entry - > size = ip - > i_d . di_size ;
__entry - > bno = bno ;
__entry - > len = len ;
__entry - > caller_ip = caller_ip ;
__entry - > flags = flags ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx "
" flags %s caller %pf " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > size ,
__entry - > bno ,
__entry - > len ,
__print_flags ( __entry - > flags , " | " , XFS_BMAPI_FLAGS ) ,
( void * ) __entry - > caller_ip )
) ;
2012-04-29 14:41:10 +04:00
DECLARE_EVENT_CLASS ( xfs_extent_busy_class ,
2009-12-15 02:14:59 +03:00
TP_PROTO ( struct xfs_mount * mp , xfs_agnumber_t agno ,
xfs: Improve scalability of busy extent tracking
When we free a metadata extent, we record it in the per-AG busy
extent array so that it is not re-used before the freeing
transaction hits the disk. This array is fixed size, so when it
overflows we make further allocation transactions synchronous
because we cannot track more freed extents until those transactions
hit the disk and are completed. Under heavy mixed allocation and
freeing workloads with large log buffers, we can overflow this array
quite easily.
Further, the array is sparsely populated, which means that inserts
need to search for a free slot, and array searches often have to
search many more slots that are actually used to check all the
busy extents. Quite inefficient, really.
To enable this aspect of extent freeing to scale better, we need
a structure that can grow dynamically. While in other areas of
XFS we have used radix trees, the extents being freed are at random
locations on disk so are better suited to being indexed by an rbtree.
So, use a per-AG rbtree indexed by block number to track busy
extents. This incures a memory allocation when marking an extent
busy, but should not occur too often in low memory situations. This
should scale to an arbitrary number of extents so should not be a
limitation for features such as in-memory aggregation of
transactions.
However, there are still situations where we can't avoid allocating
busy extents (such as allocation from the AGFL). To minimise the
overhead of such occurences, we need to avoid doing a synchronous
log force while holding the AGF locked to ensure that the previous
transactions are safely on disk before we use the extent. We can do
this by marking the transaction doing the allocation as synchronous
rather issuing a log force.
Because of the locking involved and the ordering of transactions,
the synchronous transaction provides the same guarantees as a
synchronous log force because it ensures that all the prior
transactions are already on disk when the synchronous transaction
hits the disk. i.e. it preserves the free->allocate order of the
extent correctly in recovery.
By doing this, we avoid holding the AGF locked while log writes are
in progress, hence reducing the length of time the lock is held and
therefore we increase the rate at which we can allocate and free
from the allocation group, thereby increasing overall throughput.
The only problem with this approach is that when a metadata buffer is
marked stale (e.g. a directory block is removed), then buffer remains
pinned and locked until the log goes to disk. The issue here is that
if that stale buffer is reallocated in a subsequent transaction, the
attempt to lock that buffer in the transaction will hang waiting
the log to go to disk to unlock and unpin the buffer. Hence if
someone tries to lock a pinned, stale, locked buffer we need to
push on the log to get it unlocked ASAP. Effectively we are trading
off a guaranteed log force for a much less common trigger for log
force to occur.
Ideally we should not reallocate busy extents. That is a much more
complex fix to the problem as it involves direct intervention in the
allocation btree searches in many places. This is left to a future
set of modifications.
Finally, now that we track busy extents in allocated memory, we
don't need the descriptors in the transaction structure to point to
them. We can replace the complex busy chunk infrastructure with a
simple linked list of busy extents. This allows us to remove a large
chunk of code, making the overall change a net reduction in code
size.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 06:07:08 +04:00
xfs_agblock_t agbno , xfs_extlen_t len ) ,
TP_ARGS ( mp , agno , agbno , len ) ,
2009-12-15 02:14:59 +03:00
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_agnumber_t , agno )
xfs: Improve scalability of busy extent tracking
When we free a metadata extent, we record it in the per-AG busy
extent array so that it is not re-used before the freeing
transaction hits the disk. This array is fixed size, so when it
overflows we make further allocation transactions synchronous
because we cannot track more freed extents until those transactions
hit the disk and are completed. Under heavy mixed allocation and
freeing workloads with large log buffers, we can overflow this array
quite easily.
Further, the array is sparsely populated, which means that inserts
need to search for a free slot, and array searches often have to
search many more slots that are actually used to check all the
busy extents. Quite inefficient, really.
To enable this aspect of extent freeing to scale better, we need
a structure that can grow dynamically. While in other areas of
XFS we have used radix trees, the extents being freed are at random
locations on disk so are better suited to being indexed by an rbtree.
So, use a per-AG rbtree indexed by block number to track busy
extents. This incures a memory allocation when marking an extent
busy, but should not occur too often in low memory situations. This
should scale to an arbitrary number of extents so should not be a
limitation for features such as in-memory aggregation of
transactions.
However, there are still situations where we can't avoid allocating
busy extents (such as allocation from the AGFL). To minimise the
overhead of such occurences, we need to avoid doing a synchronous
log force while holding the AGF locked to ensure that the previous
transactions are safely on disk before we use the extent. We can do
this by marking the transaction doing the allocation as synchronous
rather issuing a log force.
Because of the locking involved and the ordering of transactions,
the synchronous transaction provides the same guarantees as a
synchronous log force because it ensures that all the prior
transactions are already on disk when the synchronous transaction
hits the disk. i.e. it preserves the free->allocate order of the
extent correctly in recovery.
By doing this, we avoid holding the AGF locked while log writes are
in progress, hence reducing the length of time the lock is held and
therefore we increase the rate at which we can allocate and free
from the allocation group, thereby increasing overall throughput.
The only problem with this approach is that when a metadata buffer is
marked stale (e.g. a directory block is removed), then buffer remains
pinned and locked until the log goes to disk. The issue here is that
if that stale buffer is reallocated in a subsequent transaction, the
attempt to lock that buffer in the transaction will hang waiting
the log to go to disk to unlock and unpin the buffer. Hence if
someone tries to lock a pinned, stale, locked buffer we need to
push on the log to get it unlocked ASAP. Effectively we are trading
off a guaranteed log force for a much less common trigger for log
force to occur.
Ideally we should not reallocate busy extents. That is a much more
complex fix to the problem as it involves direct intervention in the
allocation btree searches in many places. This is left to a future
set of modifications.
Finally, now that we track busy extents in allocated memory, we
don't need the descriptors in the transaction structure to point to
them. We can replace the complex busy chunk infrastructure with a
simple linked list of busy extents. This allows us to remove a large
chunk of code, making the overall change a net reduction in code
size.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 06:07:08 +04:00
__field ( xfs_agblock_t , agbno )
__field ( xfs_extlen_t , len )
2009-12-15 02:14:59 +03:00
) ,
TP_fast_assign (
__entry - > dev = mp - > m_super - > s_dev ;
__entry - > agno = agno ;
xfs: Improve scalability of busy extent tracking
When we free a metadata extent, we record it in the per-AG busy
extent array so that it is not re-used before the freeing
transaction hits the disk. This array is fixed size, so when it
overflows we make further allocation transactions synchronous
because we cannot track more freed extents until those transactions
hit the disk and are completed. Under heavy mixed allocation and
freeing workloads with large log buffers, we can overflow this array
quite easily.
Further, the array is sparsely populated, which means that inserts
need to search for a free slot, and array searches often have to
search many more slots that are actually used to check all the
busy extents. Quite inefficient, really.
To enable this aspect of extent freeing to scale better, we need
a structure that can grow dynamically. While in other areas of
XFS we have used radix trees, the extents being freed are at random
locations on disk so are better suited to being indexed by an rbtree.
So, use a per-AG rbtree indexed by block number to track busy
extents. This incures a memory allocation when marking an extent
busy, but should not occur too often in low memory situations. This
should scale to an arbitrary number of extents so should not be a
limitation for features such as in-memory aggregation of
transactions.
However, there are still situations where we can't avoid allocating
busy extents (such as allocation from the AGFL). To minimise the
overhead of such occurences, we need to avoid doing a synchronous
log force while holding the AGF locked to ensure that the previous
transactions are safely on disk before we use the extent. We can do
this by marking the transaction doing the allocation as synchronous
rather issuing a log force.
Because of the locking involved and the ordering of transactions,
the synchronous transaction provides the same guarantees as a
synchronous log force because it ensures that all the prior
transactions are already on disk when the synchronous transaction
hits the disk. i.e. it preserves the free->allocate order of the
extent correctly in recovery.
By doing this, we avoid holding the AGF locked while log writes are
in progress, hence reducing the length of time the lock is held and
therefore we increase the rate at which we can allocate and free
from the allocation group, thereby increasing overall throughput.
The only problem with this approach is that when a metadata buffer is
marked stale (e.g. a directory block is removed), then buffer remains
pinned and locked until the log goes to disk. The issue here is that
if that stale buffer is reallocated in a subsequent transaction, the
attempt to lock that buffer in the transaction will hang waiting
the log to go to disk to unlock and unpin the buffer. Hence if
someone tries to lock a pinned, stale, locked buffer we need to
push on the log to get it unlocked ASAP. Effectively we are trading
off a guaranteed log force for a much less common trigger for log
force to occur.
Ideally we should not reallocate busy extents. That is a much more
complex fix to the problem as it involves direct intervention in the
allocation btree searches in many places. This is left to a future
set of modifications.
Finally, now that we track busy extents in allocated memory, we
don't need the descriptors in the transaction structure to point to
them. We can replace the complex busy chunk infrastructure with a
simple linked list of busy extents. This allows us to remove a large
chunk of code, making the overall change a net reduction in code
size.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 06:07:08 +04:00
__entry - > agbno = agbno ;
__entry - > len = len ;
2009-12-15 02:14:59 +03:00
) ,
xfs: Improve scalability of busy extent tracking
When we free a metadata extent, we record it in the per-AG busy
extent array so that it is not re-used before the freeing
transaction hits the disk. This array is fixed size, so when it
overflows we make further allocation transactions synchronous
because we cannot track more freed extents until those transactions
hit the disk and are completed. Under heavy mixed allocation and
freeing workloads with large log buffers, we can overflow this array
quite easily.
Further, the array is sparsely populated, which means that inserts
need to search for a free slot, and array searches often have to
search many more slots that are actually used to check all the
busy extents. Quite inefficient, really.
To enable this aspect of extent freeing to scale better, we need
a structure that can grow dynamically. While in other areas of
XFS we have used radix trees, the extents being freed are at random
locations on disk so are better suited to being indexed by an rbtree.
So, use a per-AG rbtree indexed by block number to track busy
extents. This incures a memory allocation when marking an extent
busy, but should not occur too often in low memory situations. This
should scale to an arbitrary number of extents so should not be a
limitation for features such as in-memory aggregation of
transactions.
However, there are still situations where we can't avoid allocating
busy extents (such as allocation from the AGFL). To minimise the
overhead of such occurences, we need to avoid doing a synchronous
log force while holding the AGF locked to ensure that the previous
transactions are safely on disk before we use the extent. We can do
this by marking the transaction doing the allocation as synchronous
rather issuing a log force.
Because of the locking involved and the ordering of transactions,
the synchronous transaction provides the same guarantees as a
synchronous log force because it ensures that all the prior
transactions are already on disk when the synchronous transaction
hits the disk. i.e. it preserves the free->allocate order of the
extent correctly in recovery.
By doing this, we avoid holding the AGF locked while log writes are
in progress, hence reducing the length of time the lock is held and
therefore we increase the rate at which we can allocate and free
from the allocation group, thereby increasing overall throughput.
The only problem with this approach is that when a metadata buffer is
marked stale (e.g. a directory block is removed), then buffer remains
pinned and locked until the log goes to disk. The issue here is that
if that stale buffer is reallocated in a subsequent transaction, the
attempt to lock that buffer in the transaction will hang waiting
the log to go to disk to unlock and unpin the buffer. Hence if
someone tries to lock a pinned, stale, locked buffer we need to
push on the log to get it unlocked ASAP. Effectively we are trading
off a guaranteed log force for a much less common trigger for log
force to occur.
Ideally we should not reallocate busy extents. That is a much more
complex fix to the problem as it involves direct intervention in the
allocation btree searches in many places. This is left to a future
set of modifications.
Finally, now that we track busy extents in allocated memory, we
don't need the descriptors in the transaction structure to point to
them. We can replace the complex busy chunk infrastructure with a
simple linked list of busy extents. This allows us to remove a large
chunk of code, making the overall change a net reduction in code
size.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 06:07:08 +04:00
TP_printk ( " dev %d:%d agno %u agbno %u len %u " ,
2009-12-15 02:14:59 +03:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > agno ,
xfs: Improve scalability of busy extent tracking
When we free a metadata extent, we record it in the per-AG busy
extent array so that it is not re-used before the freeing
transaction hits the disk. This array is fixed size, so when it
overflows we make further allocation transactions synchronous
because we cannot track more freed extents until those transactions
hit the disk and are completed. Under heavy mixed allocation and
freeing workloads with large log buffers, we can overflow this array
quite easily.
Further, the array is sparsely populated, which means that inserts
need to search for a free slot, and array searches often have to
search many more slots that are actually used to check all the
busy extents. Quite inefficient, really.
To enable this aspect of extent freeing to scale better, we need
a structure that can grow dynamically. While in other areas of
XFS we have used radix trees, the extents being freed are at random
locations on disk so are better suited to being indexed by an rbtree.
So, use a per-AG rbtree indexed by block number to track busy
extents. This incures a memory allocation when marking an extent
busy, but should not occur too often in low memory situations. This
should scale to an arbitrary number of extents so should not be a
limitation for features such as in-memory aggregation of
transactions.
However, there are still situations where we can't avoid allocating
busy extents (such as allocation from the AGFL). To minimise the
overhead of such occurences, we need to avoid doing a synchronous
log force while holding the AGF locked to ensure that the previous
transactions are safely on disk before we use the extent. We can do
this by marking the transaction doing the allocation as synchronous
rather issuing a log force.
Because of the locking involved and the ordering of transactions,
the synchronous transaction provides the same guarantees as a
synchronous log force because it ensures that all the prior
transactions are already on disk when the synchronous transaction
hits the disk. i.e. it preserves the free->allocate order of the
extent correctly in recovery.
By doing this, we avoid holding the AGF locked while log writes are
in progress, hence reducing the length of time the lock is held and
therefore we increase the rate at which we can allocate and free
from the allocation group, thereby increasing overall throughput.
The only problem with this approach is that when a metadata buffer is
marked stale (e.g. a directory block is removed), then buffer remains
pinned and locked until the log goes to disk. The issue here is that
if that stale buffer is reallocated in a subsequent transaction, the
attempt to lock that buffer in the transaction will hang waiting
the log to go to disk to unlock and unpin the buffer. Hence if
someone tries to lock a pinned, stale, locked buffer we need to
push on the log to get it unlocked ASAP. Effectively we are trading
off a guaranteed log force for a much less common trigger for log
force to occur.
Ideally we should not reallocate busy extents. That is a much more
complex fix to the problem as it involves direct intervention in the
allocation btree searches in many places. This is left to a future
set of modifications.
Finally, now that we track busy extents in allocated memory, we
don't need the descriptors in the transaction structure to point to
them. We can replace the complex busy chunk infrastructure with a
simple linked list of busy extents. This allows us to remove a large
chunk of code, making the overall change a net reduction in code
size.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 06:07:08 +04:00
__entry - > agbno ,
__entry - > len )
2009-12-15 02:14:59 +03:00
) ;
2011-04-24 23:06:16 +04:00
# define DEFINE_BUSY_EVENT(name) \
2012-04-29 14:41:10 +04:00
DEFINE_EVENT ( xfs_extent_busy_class , name , \
2011-04-24 23:06:16 +04:00
TP_PROTO ( struct xfs_mount * mp , xfs_agnumber_t agno , \
xfs_agblock_t agbno , xfs_extlen_t len ) , \
TP_ARGS ( mp , agno , agbno , len ) )
2012-04-29 14:41:10 +04:00
DEFINE_BUSY_EVENT ( xfs_extent_busy ) ;
DEFINE_BUSY_EVENT ( xfs_extent_busy_enomem ) ;
DEFINE_BUSY_EVENT ( xfs_extent_busy_force ) ;
DEFINE_BUSY_EVENT ( xfs_extent_busy_reuse ) ;
DEFINE_BUSY_EVENT ( xfs_extent_busy_clear ) ;
xfs: Improve scalability of busy extent tracking
When we free a metadata extent, we record it in the per-AG busy
extent array so that it is not re-used before the freeing
transaction hits the disk. This array is fixed size, so when it
overflows we make further allocation transactions synchronous
because we cannot track more freed extents until those transactions
hit the disk and are completed. Under heavy mixed allocation and
freeing workloads with large log buffers, we can overflow this array
quite easily.
Further, the array is sparsely populated, which means that inserts
need to search for a free slot, and array searches often have to
search many more slots that are actually used to check all the
busy extents. Quite inefficient, really.
To enable this aspect of extent freeing to scale better, we need
a structure that can grow dynamically. While in other areas of
XFS we have used radix trees, the extents being freed are at random
locations on disk so are better suited to being indexed by an rbtree.
So, use a per-AG rbtree indexed by block number to track busy
extents. This incures a memory allocation when marking an extent
busy, but should not occur too often in low memory situations. This
should scale to an arbitrary number of extents so should not be a
limitation for features such as in-memory aggregation of
transactions.
However, there are still situations where we can't avoid allocating
busy extents (such as allocation from the AGFL). To minimise the
overhead of such occurences, we need to avoid doing a synchronous
log force while holding the AGF locked to ensure that the previous
transactions are safely on disk before we use the extent. We can do
this by marking the transaction doing the allocation as synchronous
rather issuing a log force.
Because of the locking involved and the ordering of transactions,
the synchronous transaction provides the same guarantees as a
synchronous log force because it ensures that all the prior
transactions are already on disk when the synchronous transaction
hits the disk. i.e. it preserves the free->allocate order of the
extent correctly in recovery.
By doing this, we avoid holding the AGF locked while log writes are
in progress, hence reducing the length of time the lock is held and
therefore we increase the rate at which we can allocate and free
from the allocation group, thereby increasing overall throughput.
The only problem with this approach is that when a metadata buffer is
marked stale (e.g. a directory block is removed), then buffer remains
pinned and locked until the log goes to disk. The issue here is that
if that stale buffer is reallocated in a subsequent transaction, the
attempt to lock that buffer in the transaction will hang waiting
the log to go to disk to unlock and unpin the buffer. Hence if
someone tries to lock a pinned, stale, locked buffer we need to
push on the log to get it unlocked ASAP. Effectively we are trading
off a guaranteed log force for a much less common trigger for log
force to occur.
Ideally we should not reallocate busy extents. That is a much more
complex fix to the problem as it involves direct intervention in the
allocation btree searches in many places. This is left to a future
set of modifications.
Finally, now that we track busy extents in allocated memory, we
don't need the descriptors in the transaction structure to point to
them. We can replace the complex busy chunk infrastructure with a
simple linked list of busy extents. This allows us to remove a large
chunk of code, making the overall change a net reduction in code
size.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 06:07:08 +04:00
2012-04-29 14:41:10 +04:00
TRACE_EVENT ( xfs_extent_busy_trim ,
2011-04-24 23:06:15 +04:00
TP_PROTO ( struct xfs_mount * mp , xfs_agnumber_t agno ,
xfs_agblock_t agbno , xfs_extlen_t len ,
xfs_agblock_t tbno , xfs_extlen_t tlen ) ,
TP_ARGS ( mp , agno , agbno , len , tbno , tlen ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_agnumber_t , agno )
__field ( xfs_agblock_t , agbno )
__field ( xfs_extlen_t , len )
__field ( xfs_agblock_t , tbno )
__field ( xfs_extlen_t , tlen )
) ,
TP_fast_assign (
__entry - > dev = mp - > m_super - > s_dev ;
__entry - > agno = agno ;
__entry - > agbno = agbno ;
__entry - > len = len ;
__entry - > tbno = tbno ;
__entry - > tlen = tlen ;
) ,
TP_printk ( " dev %d:%d agno %u agbno %u len %u tbno %u tlen %u " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > agno ,
__entry - > agbno ,
__entry - > len ,
__entry - > tbno ,
__entry - > tlen )
) ;
xfs: Improve scalability of busy extent tracking
When we free a metadata extent, we record it in the per-AG busy
extent array so that it is not re-used before the freeing
transaction hits the disk. This array is fixed size, so when it
overflows we make further allocation transactions synchronous
because we cannot track more freed extents until those transactions
hit the disk and are completed. Under heavy mixed allocation and
freeing workloads with large log buffers, we can overflow this array
quite easily.
Further, the array is sparsely populated, which means that inserts
need to search for a free slot, and array searches often have to
search many more slots that are actually used to check all the
busy extents. Quite inefficient, really.
To enable this aspect of extent freeing to scale better, we need
a structure that can grow dynamically. While in other areas of
XFS we have used radix trees, the extents being freed are at random
locations on disk so are better suited to being indexed by an rbtree.
So, use a per-AG rbtree indexed by block number to track busy
extents. This incures a memory allocation when marking an extent
busy, but should not occur too often in low memory situations. This
should scale to an arbitrary number of extents so should not be a
limitation for features such as in-memory aggregation of
transactions.
However, there are still situations where we can't avoid allocating
busy extents (such as allocation from the AGFL). To minimise the
overhead of such occurences, we need to avoid doing a synchronous
log force while holding the AGF locked to ensure that the previous
transactions are safely on disk before we use the extent. We can do
this by marking the transaction doing the allocation as synchronous
rather issuing a log force.
Because of the locking involved and the ordering of transactions,
the synchronous transaction provides the same guarantees as a
synchronous log force because it ensures that all the prior
transactions are already on disk when the synchronous transaction
hits the disk. i.e. it preserves the free->allocate order of the
extent correctly in recovery.
By doing this, we avoid holding the AGF locked while log writes are
in progress, hence reducing the length of time the lock is held and
therefore we increase the rate at which we can allocate and free
from the allocation group, thereby increasing overall throughput.
The only problem with this approach is that when a metadata buffer is
marked stale (e.g. a directory block is removed), then buffer remains
pinned and locked until the log goes to disk. The issue here is that
if that stale buffer is reallocated in a subsequent transaction, the
attempt to lock that buffer in the transaction will hang waiting
the log to go to disk to unlock and unpin the buffer. Hence if
someone tries to lock a pinned, stale, locked buffer we need to
push on the log to get it unlocked ASAP. Effectively we are trading
off a guaranteed log force for a much less common trigger for log
force to occur.
Ideally we should not reallocate busy extents. That is a much more
complex fix to the problem as it involves direct intervention in the
allocation btree searches in many places. This is left to a future
set of modifications.
Finally, now that we track busy extents in allocated memory, we
don't need the descriptors in the transaction structure to point to
them. We can replace the complex busy chunk infrastructure with a
simple linked list of busy extents. This allows us to remove a large
chunk of code, making the overall change a net reduction in code
size.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 06:07:08 +04:00
TRACE_EVENT ( xfs_trans_commit_lsn ,
TP_PROTO ( struct xfs_trans * trans ) ,
TP_ARGS ( trans ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( struct xfs_trans * , tp )
__field ( xfs_lsn_t , lsn )
) ,
TP_fast_assign (
__entry - > dev = trans - > t_mountp - > m_super - > s_dev ;
__entry - > tp = trans ;
__entry - > lsn = trans - > t_commit_lsn ;
) ,
TP_printk ( " dev %d:%d trans 0x%p commit_lsn 0x%llx " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > tp ,
2010-01-02 05:38:56 +03:00
__entry - > lsn )
2009-12-15 02:14:59 +03:00
) ;
TRACE_EVENT ( xfs_agf ,
TP_PROTO ( struct xfs_mount * mp , struct xfs_agf * agf , int flags ,
unsigned long caller_ip ) ,
TP_ARGS ( mp , agf , flags , caller_ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_agnumber_t , agno )
__field ( int , flags )
__field ( __u32 , length )
__field ( __u32 , bno_root )
__field ( __u32 , cnt_root )
__field ( __u32 , bno_level )
__field ( __u32 , cnt_level )
__field ( __u32 , flfirst )
__field ( __u32 , fllast )
__field ( __u32 , flcount )
__field ( __u32 , freeblks )
__field ( __u32 , longest )
__field ( unsigned long , caller_ip )
) ,
TP_fast_assign (
__entry - > dev = mp - > m_super - > s_dev ;
__entry - > agno = be32_to_cpu ( agf - > agf_seqno ) ,
__entry - > flags = flags ;
__entry - > length = be32_to_cpu ( agf - > agf_length ) ,
__entry - > bno_root = be32_to_cpu ( agf - > agf_roots [ XFS_BTNUM_BNO ] ) ,
__entry - > cnt_root = be32_to_cpu ( agf - > agf_roots [ XFS_BTNUM_CNT ] ) ,
__entry - > bno_level =
be32_to_cpu ( agf - > agf_levels [ XFS_BTNUM_BNO ] ) ,
__entry - > cnt_level =
be32_to_cpu ( agf - > agf_levels [ XFS_BTNUM_CNT ] ) ,
__entry - > flfirst = be32_to_cpu ( agf - > agf_flfirst ) ,
__entry - > fllast = be32_to_cpu ( agf - > agf_fllast ) ,
__entry - > flcount = be32_to_cpu ( agf - > agf_flcount ) ,
__entry - > freeblks = be32_to_cpu ( agf - > agf_freeblks ) ,
__entry - > longest = be32_to_cpu ( agf - > agf_longest ) ;
__entry - > caller_ip = caller_ip ;
) ,
TP_printk ( " dev %d:%d agno %u flags %s length %u roots b %u c %u "
" levels b %u c %u flfirst %u fllast %u flcount %u "
" freeblks %u longest %u caller %pf " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > agno ,
__print_flags ( __entry - > flags , " | " , XFS_AGF_FLAGS ) ,
__entry - > length ,
__entry - > bno_root ,
__entry - > cnt_root ,
__entry - > bno_level ,
__entry - > cnt_level ,
__entry - > flfirst ,
__entry - > fllast ,
__entry - > flcount ,
__entry - > freeblks ,
__entry - > longest ,
( void * ) __entry - > caller_ip )
) ;
TRACE_EVENT ( xfs_free_extent ,
TP_PROTO ( struct xfs_mount * mp , xfs_agnumber_t agno , xfs_agblock_t agbno ,
xfs_extlen_t len , bool isfl , int haveleft , int haveright ) ,
TP_ARGS ( mp , agno , agbno , len , isfl , haveleft , haveright ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_agnumber_t , agno )
__field ( xfs_agblock_t , agbno )
__field ( xfs_extlen_t , len )
__field ( int , isfl )
__field ( int , haveleft )
__field ( int , haveright )
) ,
TP_fast_assign (
__entry - > dev = mp - > m_super - > s_dev ;
__entry - > agno = agno ;
__entry - > agbno = agbno ;
__entry - > len = len ;
__entry - > isfl = isfl ;
__entry - > haveleft = haveleft ;
__entry - > haveright = haveright ;
) ,
TP_printk ( " dev %d:%d agno %u agbno %u len %u isfl %d %s " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > agno ,
__entry - > agbno ,
__entry - > len ,
__entry - > isfl ,
__entry - > haveleft ?
( __entry - > haveright ? " both " : " left " ) :
( __entry - > haveright ? " right " : " none " ) )
) ;
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_alloc_class ,
TP_PROTO ( struct xfs_alloc_arg * args ) ,
TP_ARGS ( args ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_agnumber_t , agno )
__field ( xfs_agblock_t , agbno )
__field ( xfs_extlen_t , minlen )
__field ( xfs_extlen_t , maxlen )
__field ( xfs_extlen_t , mod )
__field ( xfs_extlen_t , prod )
__field ( xfs_extlen_t , minleft )
__field ( xfs_extlen_t , total )
__field ( xfs_extlen_t , alignment )
__field ( xfs_extlen_t , minalignslop )
__field ( xfs_extlen_t , len )
__field ( short , type )
__field ( short , otype )
__field ( char , wasdel )
__field ( char , wasfromfl )
__field ( char , isfl )
__field ( char , userdata )
__field ( xfs_fsblock_t , firstblock )
) ,
TP_fast_assign (
__entry - > dev = args - > mp - > m_super - > s_dev ;
__entry - > agno = args - > agno ;
__entry - > agbno = args - > agbno ;
__entry - > minlen = args - > minlen ;
__entry - > maxlen = args - > maxlen ;
__entry - > mod = args - > mod ;
__entry - > prod = args - > prod ;
__entry - > minleft = args - > minleft ;
__entry - > total = args - > total ;
__entry - > alignment = args - > alignment ;
__entry - > minalignslop = args - > minalignslop ;
__entry - > len = args - > len ;
__entry - > type = args - > type ;
__entry - > otype = args - > otype ;
__entry - > wasdel = args - > wasdel ;
__entry - > wasfromfl = args - > wasfromfl ;
__entry - > isfl = args - > isfl ;
__entry - > userdata = args - > userdata ;
__entry - > firstblock = args - > firstblock ;
) ,
TP_printk ( " dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u "
" prod %u minleft %u total %u alignment %u minalignslop %u "
" len %u type %s otype %s wasdel %d wasfromfl %d isfl %d "
" userdata %d firstblock 0x%llx " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > agno ,
__entry - > agbno ,
__entry - > minlen ,
__entry - > maxlen ,
__entry - > mod ,
__entry - > prod ,
__entry - > minleft ,
__entry - > total ,
__entry - > alignment ,
__entry - > minalignslop ,
__entry - > len ,
__print_symbolic ( __entry - > type , XFS_ALLOC_TYPES ) ,
__print_symbolic ( __entry - > otype , XFS_ALLOC_TYPES ) ,
__entry - > wasdel ,
__entry - > wasfromfl ,
__entry - > isfl ,
__entry - > userdata ,
2011-04-24 23:02:58 +04:00
( unsigned long long ) __entry - > firstblock )
2009-12-15 02:14:59 +03:00
)
2009-12-21 17:03:03 +03:00
# define DEFINE_ALLOC_EVENT(name) \
DEFINE_EVENT ( xfs_alloc_class , name , \
TP_PROTO ( struct xfs_alloc_arg * args ) , \
TP_ARGS ( args ) )
2009-12-15 02:14:59 +03:00
DEFINE_ALLOC_EVENT ( xfs_alloc_exact_done ) ;
2010-12-10 18:03:57 +03:00
DEFINE_ALLOC_EVENT ( xfs_alloc_exact_notfound ) ;
2009-12-15 02:14:59 +03:00
DEFINE_ALLOC_EVENT ( xfs_alloc_exact_error ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_near_nominleft ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_near_first ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_near_greater ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_near_lesser ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_near_error ) ;
2011-04-24 23:06:15 +04:00
DEFINE_ALLOC_EVENT ( xfs_alloc_near_noentry ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_near_busy ) ;
2009-12-15 02:14:59 +03:00
DEFINE_ALLOC_EVENT ( xfs_alloc_size_neither ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_size_noentry ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_size_nominleft ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_size_done ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_size_error ) ;
2011-04-24 23:06:15 +04:00
DEFINE_ALLOC_EVENT ( xfs_alloc_size_busy ) ;
2009-12-15 02:14:59 +03:00
DEFINE_ALLOC_EVENT ( xfs_alloc_small_freelist ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_small_notenough ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_small_done ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_small_error ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_vextent_badargs ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_vextent_nofix ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_vextent_noagbp ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_vextent_loopfailed ) ;
DEFINE_ALLOC_EVENT ( xfs_alloc_vextent_allfailed ) ;
2012-03-22 09:15:13 +04:00
DECLARE_EVENT_CLASS ( xfs_da_class ,
2009-12-21 17:03:03 +03:00
TP_PROTO ( struct xfs_da_args * args ) ,
TP_ARGS ( args ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__dynamic_array ( char , name , args - > namelen )
__field ( int , namelen )
__field ( xfs_dahash_t , hashval )
__field ( xfs_ino_t , inumber )
__field ( int , op_flags )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( args - > dp ) - > i_sb - > s_dev ;
__entry - > ino = args - > dp - > i_ino ;
if ( args - > namelen )
memcpy ( __get_str ( name ) , args - > name , args - > namelen ) ;
__entry - > namelen = args - > namelen ;
__entry - > hashval = args - > hashval ;
__entry - > inumber = args - > inumber ;
__entry - > op_flags = args - > op_flags ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x "
" inumber 0x%llx op_flags %s " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > namelen ,
__entry - > namelen ? __get_str ( name ) : NULL ,
__entry - > namelen ,
__entry - > hashval ,
__entry - > inumber ,
__print_flags ( __entry - > op_flags , " | " , XFS_DA_OP_FLAGS ) )
)
# define DEFINE_DIR2_EVENT(name) \
2012-03-22 09:15:13 +04:00
DEFINE_EVENT ( xfs_da_class , name , \
2009-12-15 02:14:59 +03:00
TP_PROTO ( struct xfs_da_args * args ) , \
2009-12-21 17:03:03 +03:00
TP_ARGS ( args ) )
DEFINE_DIR2_EVENT ( xfs_dir2_sf_addname ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_sf_create ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_sf_lookup ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_sf_replace ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_sf_removename ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_sf_toino4 ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_sf_toino8 ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_sf_to_block ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_block_addname ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_block_lookup ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_block_replace ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_block_removename ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_block_to_sf ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_block_to_leaf ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_leaf_addname ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_leaf_lookup ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_leaf_replace ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_leaf_removename ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_leaf_to_block ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_leaf_to_node ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_node_addname ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_node_lookup ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_node_replace ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_node_removename ) ;
DEFINE_DIR2_EVENT ( xfs_dir2_node_to_leaf ) ;
2012-03-22 09:15:13 +04:00
# define DEFINE_ATTR_EVENT(name) \
DEFINE_EVENT ( xfs_da_class , name , \
TP_PROTO ( struct xfs_da_args * args ) , \
TP_ARGS ( args ) )
DEFINE_ATTR_EVENT ( xfs_attr_sf_add ) ;
DEFINE_ATTR_EVENT ( xfs_attr_sf_addname ) ;
DEFINE_ATTR_EVENT ( xfs_attr_sf_create ) ;
DEFINE_ATTR_EVENT ( xfs_attr_sf_lookup ) ;
DEFINE_ATTR_EVENT ( xfs_attr_sf_remove ) ;
DEFINE_ATTR_EVENT ( xfs_attr_sf_removename ) ;
DEFINE_ATTR_EVENT ( xfs_attr_sf_to_leaf ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_add ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_add_old ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_add_new ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_addname ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_create ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_lookup ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_replace ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_removename ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_split ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_split_before ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_split_after ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_clearflag ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_setflag ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_flipflags ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_to_sf ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_to_node ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_rebalance ) ;
DEFINE_ATTR_EVENT ( xfs_attr_leaf_unbalance ) ;
DEFINE_ATTR_EVENT ( xfs_attr_node_addname ) ;
DEFINE_ATTR_EVENT ( xfs_attr_node_lookup ) ;
DEFINE_ATTR_EVENT ( xfs_attr_node_replace ) ;
DEFINE_ATTR_EVENT ( xfs_attr_node_removename ) ;
# define DEFINE_DA_EVENT(name) \
DEFINE_EVENT ( xfs_da_class , name , \
TP_PROTO ( struct xfs_da_args * args ) , \
TP_ARGS ( args ) )
DEFINE_DA_EVENT ( xfs_da_split ) ;
DEFINE_DA_EVENT ( xfs_da_join ) ;
DEFINE_DA_EVENT ( xfs_da_link_before ) ;
DEFINE_DA_EVENT ( xfs_da_link_after ) ;
DEFINE_DA_EVENT ( xfs_da_unlink_back ) ;
DEFINE_DA_EVENT ( xfs_da_unlink_forward ) ;
DEFINE_DA_EVENT ( xfs_da_root_split ) ;
DEFINE_DA_EVENT ( xfs_da_root_join ) ;
DEFINE_DA_EVENT ( xfs_da_node_add ) ;
DEFINE_DA_EVENT ( xfs_da_node_create ) ;
DEFINE_DA_EVENT ( xfs_da_node_split ) ;
DEFINE_DA_EVENT ( xfs_da_node_remove ) ;
DEFINE_DA_EVENT ( xfs_da_node_rebalance ) ;
DEFINE_DA_EVENT ( xfs_da_node_unbalance ) ;
DEFINE_DA_EVENT ( xfs_da_swap_lastblock ) ;
DEFINE_DA_EVENT ( xfs_da_grow_inode ) ;
DEFINE_DA_EVENT ( xfs_da_shrink_inode ) ;
2009-12-21 17:03:03 +03:00
DECLARE_EVENT_CLASS ( xfs_dir2_space_class ,
TP_PROTO ( struct xfs_da_args * args , int idx ) ,
TP_ARGS ( args , idx ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( int , op_flags )
__field ( int , idx )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( args - > dp ) - > i_sb - > s_dev ;
__entry - > ino = args - > dp - > i_ino ;
__entry - > op_flags = args - > op_flags ;
__entry - > idx = idx ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx op_flags %s index %d " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__print_flags ( __entry - > op_flags , " | " , XFS_DA_OP_FLAGS ) ,
__entry - > idx )
2009-12-15 02:14:59 +03:00
)
2009-12-21 17:03:03 +03:00
# define DEFINE_DIR2_SPACE_EVENT(name) \
DEFINE_EVENT ( xfs_dir2_space_class , name , \
2009-12-15 02:14:59 +03:00
TP_PROTO ( struct xfs_da_args * args , int idx ) , \
2009-12-21 17:03:03 +03:00
TP_ARGS ( args , idx ) )
DEFINE_DIR2_SPACE_EVENT ( xfs_dir2_leafn_add ) ;
DEFINE_DIR2_SPACE_EVENT ( xfs_dir2_leafn_remove ) ;
DEFINE_DIR2_SPACE_EVENT ( xfs_dir2_grow_inode ) ;
DEFINE_DIR2_SPACE_EVENT ( xfs_dir2_shrink_inode ) ;
2009-12-15 02:14:59 +03:00
TRACE_EVENT ( xfs_dir2_leafn_moveents ,
TP_PROTO ( struct xfs_da_args * args , int src_idx , int dst_idx , int count ) ,
TP_ARGS ( args , src_idx , dst_idx , count ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( int , op_flags )
__field ( int , src_idx )
__field ( int , dst_idx )
__field ( int , count )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( args - > dp ) - > i_sb - > s_dev ;
__entry - > ino = args - > dp - > i_ino ;
__entry - > op_flags = args - > op_flags ;
__entry - > src_idx = src_idx ;
__entry - > dst_idx = dst_idx ;
__entry - > count = count ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx op_flags %s "
" src_idx %d dst_idx %d count %d " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__print_flags ( __entry - > op_flags , " | " , XFS_DA_OP_FLAGS ) ,
__entry - > src_idx ,
__entry - > dst_idx ,
__entry - > count )
) ;
2010-01-14 04:33:55 +03:00
# define XFS_SWAPEXT_INODES \
{ 0 , " target " } , \
{ 1 , " temp " }
# define XFS_INODE_FORMAT_STR \
{ 0 , " invalid " } , \
{ 1 , " local " } , \
{ 2 , " extent " } , \
{ 3 , " btree " }
DECLARE_EVENT_CLASS ( xfs_swap_extent_class ,
TP_PROTO ( struct xfs_inode * ip , int which ) ,
TP_ARGS ( ip , which ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( int , which )
__field ( xfs_ino_t , ino )
__field ( int , format )
__field ( int , nex )
__field ( int , broot_size )
__field ( int , fork_off )
) ,
TP_fast_assign (
__entry - > dev = VFS_I ( ip ) - > i_sb - > s_dev ;
__entry - > which = which ;
__entry - > ino = ip - > i_ino ;
__entry - > format = ip - > i_d . di_format ;
__entry - > nex = ip - > i_d . di_nextents ;
__entry - > broot_size = ip - > i_df . if_broot_bytes ;
__entry - > fork_off = XFS_IFORK_BOFF ( ip ) ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, "
2011-12-19 00:00:07 +04:00
" broot size %d, fork offset %d " ,
2010-01-14 04:33:55 +03:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__print_symbolic ( __entry - > which , XFS_SWAPEXT_INODES ) ,
__print_symbolic ( __entry - > format , XFS_INODE_FORMAT_STR ) ,
__entry - > nex ,
__entry - > broot_size ,
__entry - > fork_off )
)
# define DEFINE_SWAPEXT_EVENT(name) \
DEFINE_EVENT ( xfs_swap_extent_class , name , \
TP_PROTO ( struct xfs_inode * ip , int which ) , \
TP_ARGS ( ip , which ) )
DEFINE_SWAPEXT_EVENT ( xfs_swap_extent_before ) ;
DEFINE_SWAPEXT_EVENT ( xfs_swap_extent_after ) ;
2010-04-13 09:06:46 +04:00
DECLARE_EVENT_CLASS ( xfs_log_recover_item_class ,
2012-06-14 18:22:15 +04:00
TP_PROTO ( struct xlog * log , struct xlog_recover * trans ,
2010-04-13 09:06:46 +04:00
struct xlog_recover_item * item , int pass ) ,
TP_ARGS ( log , trans , item , pass ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( unsigned long , item )
__field ( xlog_tid_t , tid )
__field ( int , type )
__field ( int , pass )
__field ( int , count )
__field ( int , total )
) ,
TP_fast_assign (
__entry - > dev = log - > l_mp - > m_super - > s_dev ;
__entry - > item = ( unsigned long ) item ;
__entry - > tid = trans - > r_log_tid ;
__entry - > type = ITEM_TYPE ( item ) ;
__entry - > pass = pass ;
__entry - > count = item - > ri_cnt ;
__entry - > total = item - > ri_total ;
) ,
TP_printk ( " dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s "
" item region count/total %d/%d " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > tid ,
__entry - > pass ,
( void * ) __entry - > item ,
__print_symbolic ( __entry - > type , XFS_LI_TYPE_DESC ) ,
__entry - > count ,
__entry - > total )
)
# define DEFINE_LOG_RECOVER_ITEM(name) \
DEFINE_EVENT ( xfs_log_recover_item_class , name , \
2012-06-14 18:22:15 +04:00
TP_PROTO ( struct xlog * log , struct xlog_recover * trans , \
2010-04-13 09:06:46 +04:00
struct xlog_recover_item * item , int pass ) , \
TP_ARGS ( log , trans , item , pass ) )
DEFINE_LOG_RECOVER_ITEM ( xfs_log_recover_item_add ) ;
DEFINE_LOG_RECOVER_ITEM ( xfs_log_recover_item_add_cont ) ;
DEFINE_LOG_RECOVER_ITEM ( xfs_log_recover_item_reorder_head ) ;
DEFINE_LOG_RECOVER_ITEM ( xfs_log_recover_item_reorder_tail ) ;
DEFINE_LOG_RECOVER_ITEM ( xfs_log_recover_item_recover ) ;
DECLARE_EVENT_CLASS ( xfs_log_recover_buf_item_class ,
2012-06-14 18:22:15 +04:00
TP_PROTO ( struct xlog * log , struct xfs_buf_log_format * buf_f ) ,
2010-04-13 09:06:46 +04:00
TP_ARGS ( log , buf_f ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( __int64_t , blkno )
__field ( unsigned short , len )
__field ( unsigned short , flags )
__field ( unsigned short , size )
__field ( unsigned int , map_size )
) ,
TP_fast_assign (
__entry - > dev = log - > l_mp - > m_super - > s_dev ;
__entry - > blkno = buf_f - > blf_blkno ;
__entry - > len = buf_f - > blf_len ;
__entry - > flags = buf_f - > blf_flags ;
__entry - > size = buf_f - > blf_size ;
__entry - > map_size = buf_f - > blf_map_size ;
) ,
TP_printk ( " dev %d:%d blkno 0x%llx, len %u, flags 0x%x, size %d, "
" map_size %d " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > blkno ,
__entry - > len ,
__entry - > flags ,
__entry - > size ,
__entry - > map_size )
)
# define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
DEFINE_EVENT ( xfs_log_recover_buf_item_class , name , \
2012-06-14 18:22:15 +04:00
TP_PROTO ( struct xlog * log , struct xfs_buf_log_format * buf_f ) , \
2010-04-13 09:06:46 +04:00
TP_ARGS ( log , buf_f ) )
DEFINE_LOG_RECOVER_BUF_ITEM ( xfs_log_recover_buf_not_cancel ) ;
DEFINE_LOG_RECOVER_BUF_ITEM ( xfs_log_recover_buf_cancel ) ;
DEFINE_LOG_RECOVER_BUF_ITEM ( xfs_log_recover_buf_cancel_add ) ;
DEFINE_LOG_RECOVER_BUF_ITEM ( xfs_log_recover_buf_cancel_ref_inc ) ;
DEFINE_LOG_RECOVER_BUF_ITEM ( xfs_log_recover_buf_recover ) ;
DEFINE_LOG_RECOVER_BUF_ITEM ( xfs_log_recover_buf_inode_buf ) ;
DEFINE_LOG_RECOVER_BUF_ITEM ( xfs_log_recover_buf_reg_buf ) ;
DEFINE_LOG_RECOVER_BUF_ITEM ( xfs_log_recover_buf_dquot_buf ) ;
DECLARE_EVENT_CLASS ( xfs_log_recover_ino_item_class ,
2012-06-14 18:22:15 +04:00
TP_PROTO ( struct xlog * log , struct xfs_inode_log_format * in_f ) ,
2010-04-13 09:06:46 +04:00
TP_ARGS ( log , in_f ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_ino_t , ino )
__field ( unsigned short , size )
__field ( int , fields )
__field ( unsigned short , asize )
__field ( unsigned short , dsize )
__field ( __int64_t , blkno )
__field ( int , len )
__field ( int , boffset )
) ,
TP_fast_assign (
__entry - > dev = log - > l_mp - > m_super - > s_dev ;
__entry - > ino = in_f - > ilf_ino ;
__entry - > size = in_f - > ilf_size ;
__entry - > fields = in_f - > ilf_fields ;
__entry - > asize = in_f - > ilf_asize ;
__entry - > dsize = in_f - > ilf_dsize ;
__entry - > blkno = in_f - > ilf_blkno ;
__entry - > len = in_f - > ilf_len ;
__entry - > boffset = in_f - > ilf_boffset ;
) ,
TP_printk ( " dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, "
" dsize %d, blkno 0x%llx, len %d, boffset %d " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > ino ,
__entry - > size ,
__entry - > fields ,
__entry - > asize ,
__entry - > dsize ,
__entry - > blkno ,
__entry - > len ,
__entry - > boffset )
)
# define DEFINE_LOG_RECOVER_INO_ITEM(name) \
DEFINE_EVENT ( xfs_log_recover_ino_item_class , name , \
2012-06-14 18:22:15 +04:00
TP_PROTO ( struct xlog * log , struct xfs_inode_log_format * in_f ) , \
2010-04-13 09:06:46 +04:00
TP_ARGS ( log , in_f ) )
DEFINE_LOG_RECOVER_INO_ITEM ( xfs_log_recover_inode_recover ) ;
DEFINE_LOG_RECOVER_INO_ITEM ( xfs_log_recover_inode_cancel ) ;
DEFINE_LOG_RECOVER_INO_ITEM ( xfs_log_recover_inode_skip ) ;
2011-01-07 16:02:04 +03:00
DECLARE_EVENT_CLASS ( xfs_discard_class ,
TP_PROTO ( struct xfs_mount * mp , xfs_agnumber_t agno ,
xfs_agblock_t agbno , xfs_extlen_t len ) ,
TP_ARGS ( mp , agno , agbno , len ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( xfs_agnumber_t , agno )
__field ( xfs_agblock_t , agbno )
__field ( xfs_extlen_t , len )
) ,
TP_fast_assign (
__entry - > dev = mp - > m_super - > s_dev ;
__entry - > agno = agno ;
__entry - > agbno = agbno ;
__entry - > len = len ;
) ,
TP_printk ( " dev %d:%d agno %u agbno %u len %u \n " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > agno ,
__entry - > agbno ,
__entry - > len )
)
# define DEFINE_DISCARD_EVENT(name) \
DEFINE_EVENT ( xfs_discard_class , name , \
TP_PROTO ( struct xfs_mount * mp , xfs_agnumber_t agno , \
xfs_agblock_t agbno , xfs_extlen_t len ) , \
TP_ARGS ( mp , agno , agbno , len ) )
DEFINE_DISCARD_EVENT ( xfs_discard_extent ) ;
DEFINE_DISCARD_EVENT ( xfs_discard_toosmall ) ;
DEFINE_DISCARD_EVENT ( xfs_discard_exclude ) ;
DEFINE_DISCARD_EVENT ( xfs_discard_busy ) ;
2009-12-15 02:14:59 +03:00
# endif /* _TRACE_XFS_H */
# undef TRACE_INCLUDE_PATH
# define TRACE_INCLUDE_PATH .
# define TRACE_INCLUDE_FILE xfs_trace
# include <trace/define_trace.h>