2017-03-16 22:18:50 -08:00
/* SPDX-License-Identifier: GPL-2.0 */
# undef TRACE_SYSTEM
# define TRACE_SYSTEM bcachefs
# if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
# define _TRACE_BCACHEFS_H
# include <linux/tracepoint.h>
2022-08-10 12:42:55 -04:00
# define TRACE_BPOS_entries(name) \
__field ( u64 , name # # _inode ) \
__field ( u64 , name # # _offset ) \
__field ( u32 , name # # _snapshot )
# define TRACE_BPOS_assign(dst, src) \
__entry - > dst # # _inode = ( src ) . inode ; \
__entry - > dst # # _offset = ( src ) . offset ; \
__entry - > dst # # _snapshot = ( src ) . snapshot
2017-03-16 22:18:50 -08:00
DECLARE_EVENT_CLASS ( bpos ,
2022-08-18 17:00:12 -04:00
TP_PROTO ( const struct bpos * p ) ,
2017-03-16 22:18:50 -08:00
TP_ARGS ( p ) ,
TP_STRUCT__entry (
2022-08-10 12:42:55 -04:00
TRACE_BPOS_entries ( p )
2017-03-16 22:18:50 -08:00
) ,
TP_fast_assign (
2022-08-10 12:42:55 -04:00
TRACE_BPOS_assign ( p , * p ) ;
2017-03-16 22:18:50 -08:00
) ,
2022-08-10 12:42:55 -04:00
TP_printk ( " %llu:%llu:%u " , __entry - > p_inode , __entry - > p_offset , __entry - > p_snapshot )
2017-03-16 22:18:50 -08:00
) ;
DECLARE_EVENT_CLASS ( bkey ,
TP_PROTO ( const struct bkey * k ) ,
TP_ARGS ( k ) ,
TP_STRUCT__entry (
__field ( u64 , inode )
__field ( u64 , offset )
__field ( u32 , size )
) ,
TP_fast_assign (
__entry - > inode = k - > p . inode ;
__entry - > offset = k - > p . offset ;
__entry - > size = k - > size ;
) ,
TP_printk ( " %llu:%llu len %u " , __entry - > inode ,
__entry - > offset , __entry - > size )
) ;
2022-08-27 12:48:36 -04:00
DECLARE_EVENT_CLASS ( btree_node ,
TP_PROTO ( struct bch_fs * c , struct btree * b ) ,
TP_ARGS ( c , b ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( u8 , level )
__field ( u8 , btree_id )
TRACE_BPOS_entries ( pos )
) ,
TP_fast_assign (
__entry - > dev = c - > dev ;
__entry - > level = b - > c . level ;
__entry - > btree_id = b - > c . btree_id ;
TRACE_BPOS_assign ( pos , b - > key . k . p ) ;
) ,
TP_printk ( " %d,%d %u %s %llu:%llu:%u " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > level ,
bch2_btree_ids [ __entry - > btree_id ] ,
__entry - > pos_inode , __entry - > pos_offset , __entry - > pos_snapshot )
) ;
2017-03-16 22:18:50 -08:00
DECLARE_EVENT_CLASS ( bch_fs ,
TP_PROTO ( struct bch_fs * c ) ,
TP_ARGS ( c ) ,
TP_STRUCT__entry (
2021-05-27 19:15:44 -04:00
__field ( dev_t , dev )
2017-03-16 22:18:50 -08:00
) ,
TP_fast_assign (
2021-05-27 19:15:44 -04:00
__entry - > dev = c - > dev ;
2017-03-16 22:18:50 -08:00
) ,
2021-05-27 19:15:44 -04:00
TP_printk ( " %d,%d " , MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) )
2017-03-16 22:18:50 -08:00
) ;
DECLARE_EVENT_CLASS ( bio ,
TP_PROTO ( struct bio * bio ) ,
TP_ARGS ( bio ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( sector_t , sector )
__field ( unsigned int , nr_sector )
__array ( char , rwbs , 6 )
) ,
TP_fast_assign (
__entry - > dev = bio - > bi_bdev ? bio_dev ( bio ) : 0 ;
__entry - > sector = bio - > bi_iter . bi_sector ;
__entry - > nr_sector = bio - > bi_iter . bi_size > > 9 ;
blk_fill_rwbs ( __entry - > rwbs , bio - > bi_opf ) ;
) ,
TP_printk ( " %d,%d %s %llu + %u " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) , __entry - > rwbs ,
( unsigned long long ) __entry - > sector , __entry - > nr_sector )
) ;
2022-04-03 15:13:20 -04:00
/* super-io.c: */
TRACE_EVENT ( write_super ,
TP_PROTO ( struct bch_fs * c , unsigned long ip ) ,
TP_ARGS ( c , ip ) ,
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( unsigned long , ip )
) ,
TP_fast_assign (
__entry - > dev = c - > dev ;
__entry - > ip = ip ;
) ,
TP_printk ( " %d,%d for %pS " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
( void * ) __entry - > ip )
) ;
2017-03-16 22:18:50 -08:00
/* io.c: */
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( bio , read_promote ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( struct bio * bio ) ,
TP_ARGS ( bio )
) ;
DEFINE_EVENT ( bio , read_bounce ,
TP_PROTO ( struct bio * bio ) ,
TP_ARGS ( bio )
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( bio , read_split ,
TP_PROTO ( struct bio * bio ) ,
TP_ARGS ( bio )
) ;
2017-03-16 22:18:50 -08:00
DEFINE_EVENT ( bio , read_retry ,
TP_PROTO ( struct bio * bio ) ,
TP_ARGS ( bio )
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( bio , read_reuse_race ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( struct bio * bio ) ,
TP_ARGS ( bio )
) ;
/* Journal */
DEFINE_EVENT ( bch_fs , journal_full ,
TP_PROTO ( struct bch_fs * c ) ,
TP_ARGS ( c )
) ;
DEFINE_EVENT ( bch_fs , journal_entry_full ,
TP_PROTO ( struct bch_fs * c ) ,
TP_ARGS ( c )
) ;
DEFINE_EVENT ( bio , journal_write ,
TP_PROTO ( struct bio * bio ) ,
TP_ARGS ( bio )
) ;
2020-11-19 19:54:40 -05:00
TRACE_EVENT ( journal_reclaim_start ,
2022-04-17 18:06:31 -04:00
TP_PROTO ( struct bch_fs * c , bool direct , bool kicked ,
u64 min_nr , u64 min_key_cache ,
2020-11-19 19:54:40 -05:00
u64 prereserved , u64 prereserved_total ,
u64 btree_cache_dirty , u64 btree_cache_total ,
u64 btree_key_cache_dirty , u64 btree_key_cache_total ) ,
2022-04-17 18:06:31 -04:00
TP_ARGS ( c , direct , kicked , min_nr , min_key_cache , prereserved , prereserved_total ,
2020-11-19 19:54:40 -05:00
btree_cache_dirty , btree_cache_total ,
btree_key_cache_dirty , btree_key_cache_total ) ,
TP_STRUCT__entry (
2021-05-27 19:15:44 -04:00
__field ( dev_t , dev )
2022-04-17 18:06:31 -04:00
__field ( bool , direct )
__field ( bool , kicked )
2020-11-19 19:54:40 -05:00
__field ( u64 , min_nr )
2022-04-17 18:06:31 -04:00
__field ( u64 , min_key_cache )
2020-11-19 19:54:40 -05:00
__field ( u64 , prereserved )
__field ( u64 , prereserved_total )
__field ( u64 , btree_cache_dirty )
__field ( u64 , btree_cache_total )
__field ( u64 , btree_key_cache_dirty )
__field ( u64 , btree_key_cache_total )
) ,
TP_fast_assign (
2021-05-27 19:15:44 -04:00
__entry - > dev = c - > dev ;
2022-04-17 18:06:31 -04:00
__entry - > direct = direct ;
__entry - > kicked = kicked ;
2020-11-19 19:54:40 -05:00
__entry - > min_nr = min_nr ;
2022-04-17 18:06:31 -04:00
__entry - > min_key_cache = min_key_cache ;
2020-11-19 19:54:40 -05:00
__entry - > prereserved = prereserved ;
__entry - > prereserved_total = prereserved_total ;
__entry - > btree_cache_dirty = btree_cache_dirty ;
__entry - > btree_cache_total = btree_cache_total ;
__entry - > btree_key_cache_dirty = btree_key_cache_dirty ;
__entry - > btree_key_cache_total = btree_key_cache_total ;
) ,
2022-04-17 18:06:31 -04:00
TP_printk ( " %d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu " ,
2021-05-27 19:15:44 -04:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
2022-04-17 18:06:31 -04:00
__entry - > direct ,
__entry - > kicked ,
2020-11-19 19:54:40 -05:00
__entry - > min_nr ,
2022-04-17 18:06:31 -04:00
__entry - > min_key_cache ,
2020-11-19 19:54:40 -05:00
__entry - > prereserved ,
__entry - > prereserved_total ,
__entry - > btree_cache_dirty ,
__entry - > btree_cache_total ,
__entry - > btree_key_cache_dirty ,
__entry - > btree_key_cache_total )
) ;
TRACE_EVENT ( journal_reclaim_finish ,
TP_PROTO ( struct bch_fs * c , u64 nr_flushed ) ,
TP_ARGS ( c , nr_flushed ) ,
TP_STRUCT__entry (
2021-05-27 19:15:44 -04:00
__field ( dev_t , dev )
__field ( u64 , nr_flushed )
2020-11-19 19:54:40 -05:00
) ,
TP_fast_assign (
2021-05-27 19:15:44 -04:00
__entry - > dev = c - > dev ;
__entry - > nr_flushed = nr_flushed ;
2020-11-19 19:54:40 -05:00
) ,
2022-04-17 18:06:31 -04:00
TP_printk ( " %d,%d flushed %llu " ,
2021-05-27 19:15:44 -04:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > nr_flushed )
2020-11-19 19:54:40 -05:00
) ;
2017-03-16 22:18:50 -08:00
/* bset.c: */
DEFINE_EVENT ( bpos , bkey_pack_pos_fail ,
2022-08-18 17:00:12 -04:00
TP_PROTO ( const struct bpos * p ) ,
2017-03-16 22:18:50 -08:00
TP_ARGS ( p )
) ;
2022-08-27 12:48:36 -04:00
/* Btree cache: */
2017-03-16 22:18:50 -08:00
2022-08-27 12:48:36 -04:00
TRACE_EVENT ( btree_cache_scan ,
TP_PROTO ( long nr_to_scan , long can_free , long ret ) ,
TP_ARGS ( nr_to_scan , can_free , ret ) ,
2017-03-16 22:18:50 -08:00
TP_STRUCT__entry (
2022-08-27 12:48:36 -04:00
__field ( long , nr_to_scan )
__field ( long , can_free )
__field ( long , ret )
2017-03-16 22:18:50 -08:00
) ,
TP_fast_assign (
2022-08-27 12:48:36 -04:00
__entry - > nr_to_scan = nr_to_scan ;
__entry - > can_free = can_free ;
__entry - > ret = ret ;
2017-03-16 22:18:50 -08:00
) ,
2022-08-27 12:48:36 -04:00
TP_printk ( " scanned for %li nodes, can free %li, ret %li " ,
__entry - > nr_to_scan , __entry - > can_free , __entry - > ret )
) ;
DEFINE_EVENT ( btree_node , btree_cache_reap ,
TP_PROTO ( struct bch_fs * c , struct btree * b ) ,
TP_ARGS ( c , b )
) ;
DEFINE_EVENT ( bch_fs , btree_cache_cannibalize_lock_fail ,
TP_PROTO ( struct bch_fs * c ) ,
TP_ARGS ( c )
) ;
DEFINE_EVENT ( bch_fs , btree_cache_cannibalize_lock ,
TP_PROTO ( struct bch_fs * c ) ,
TP_ARGS ( c )
) ;
DEFINE_EVENT ( bch_fs , btree_cache_cannibalize ,
TP_PROTO ( struct bch_fs * c ) ,
TP_ARGS ( c )
2017-03-16 22:18:50 -08:00
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( bch_fs , btree_cache_cannibalize_unlock ,
TP_PROTO ( struct bch_fs * c ) ,
TP_ARGS ( c )
) ;
/* Btree */
DEFINE_EVENT ( btree_node , btree_node_read ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( struct bch_fs * c , struct btree * b ) ,
TP_ARGS ( c , b )
) ;
2022-08-27 12:48:36 -04:00
TRACE_EVENT ( btree_node_write ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( struct btree * b , unsigned bytes , unsigned sectors ) ,
TP_ARGS ( b , bytes , sectors ) ,
TP_STRUCT__entry (
2018-11-01 15:10:01 -04:00
__field ( enum btree_node_type , type )
2017-03-16 22:18:50 -08:00
__field ( unsigned , bytes )
__field ( unsigned , sectors )
) ,
TP_fast_assign (
__entry - > type = btree_node_type ( b ) ;
__entry - > bytes = bytes ;
__entry - > sectors = sectors ;
) ,
TP_printk ( " bkey type %u bytes %u sectors %u " ,
__entry - > type , __entry - > bytes , __entry - > sectors )
) ;
DEFINE_EVENT ( btree_node , btree_node_alloc ,
TP_PROTO ( struct bch_fs * c , struct btree * b ) ,
TP_ARGS ( c , b )
) ;
DEFINE_EVENT ( btree_node , btree_node_free ,
TP_PROTO ( struct bch_fs * c , struct btree * b ) ,
TP_ARGS ( c , b )
) ;
TRACE_EVENT ( btree_reserve_get_fail ,
2022-01-11 23:24:43 -05:00
TP_PROTO ( const char * trans_fn ,
unsigned long caller_ip ,
size_t required ) ,
TP_ARGS ( trans_fn , caller_ip , required ) ,
2017-03-16 22:18:50 -08:00
TP_STRUCT__entry (
2022-08-27 12:11:18 -04:00
__array ( char , trans_fn , 32 )
2022-01-11 23:24:43 -05:00
__field ( unsigned long , caller_ip )
2017-03-16 22:18:50 -08:00
__field ( size_t , required )
) ,
TP_fast_assign (
2022-01-11 23:24:43 -05:00
strlcpy ( __entry - > trans_fn , trans_fn , sizeof ( __entry - > trans_fn ) ) ;
__entry - > caller_ip = caller_ip ;
__entry - > required = required ;
2017-03-16 22:18:50 -08:00
) ,
2022-01-11 23:24:43 -05:00
TP_printk ( " %s %pS required %zu " ,
__entry - > trans_fn ,
( void * ) __entry - > caller_ip ,
__entry - > required )
2017-03-16 22:18:50 -08:00
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( btree_node , btree_node_compact ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( struct bch_fs * c , struct btree * b ) ,
TP_ARGS ( c , b )
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( btree_node , btree_node_merge ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( struct bch_fs * c , struct btree * b ) ,
TP_ARGS ( c , b )
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( btree_node , btree_node_split ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( struct bch_fs * c , struct btree * b ) ,
TP_ARGS ( c , b )
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( btree_node , btree_node_rewrite ,
2022-04-17 18:06:31 -04:00
TP_PROTO ( struct bch_fs * c , struct btree * b ) ,
TP_ARGS ( c , b )
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( btree_node , btree_node_set_root ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( struct bch_fs * c , struct btree * b ) ,
TP_ARGS ( c , b )
) ;
2022-08-27 12:48:36 -04:00
TRACE_EVENT ( btree_path_relock_fail ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-01-08 22:59:58 -05:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ,
unsigned level ) ,
TP_ARGS ( trans , caller_ip , path , level ) ,
2022-01-08 22:59:58 -05:00
TP_STRUCT__entry (
2022-08-27 12:11:18 -04:00
__array ( char , trans_fn , 32 )
2022-02-16 00:42:34 -05:00
__field ( unsigned long , caller_ip )
2022-01-08 22:59:58 -05:00
__field ( u8 , btree_id )
2022-09-17 14:36:24 -04:00
__field ( u8 , level )
2022-08-10 12:42:55 -04:00
TRACE_BPOS_entries ( pos )
2022-08-27 12:11:18 -04:00
__array ( char , node , 24 )
2022-01-08 22:59:58 -05:00
__field ( u32 , iter_lock_seq )
__field ( u32 , node_lock_seq )
) ,
TP_fast_assign (
2022-08-27 12:11:18 -04:00
struct btree * b = btree_path_node ( path , level ) ;
2022-08-10 12:42:55 -04:00
strlcpy ( __entry - > trans_fn , trans - > fn , sizeof ( __entry - > trans_fn ) ) ;
2022-02-16 00:42:34 -05:00
__entry - > caller_ip = caller_ip ;
2022-08-10 12:42:55 -04:00
__entry - > btree_id = path - > btree_id ;
2022-09-17 14:36:24 -04:00
__entry - > level = path - > level ;
2022-08-10 12:42:55 -04:00
TRACE_BPOS_assign ( pos , path - > pos ) ;
2022-08-27 12:11:18 -04:00
if ( IS_ERR ( b ) )
strscpy ( __entry - > node , bch2_err_str ( PTR_ERR ( b ) ) , sizeof ( __entry - > node ) ) ;
else
scnprintf ( __entry - > node , sizeof ( __entry - > node ) , " %px " , b ) ;
2022-08-10 12:42:55 -04:00
__entry - > iter_lock_seq = path - > l [ level ] . lock_seq ;
__entry - > node_lock_seq = is_btree_node ( path , level ) ? path - > l [ level ] . b - > c . lock . state . seq : 0 ;
2022-01-08 22:59:58 -05:00
) ,
2022-09-17 14:36:24 -04:00
TP_printk ( " %s %pS btree %s pos %llu:%llu:%u level %u node %s iter seq %u lock seq %u " ,
2022-01-08 22:59:58 -05:00
__entry - > trans_fn ,
2022-02-16 00:42:34 -05:00
( void * ) __entry - > caller_ip ,
2022-08-10 12:42:55 -04:00
bch2_btree_ids [ __entry - > btree_id ] ,
2022-01-08 22:59:58 -05:00
__entry - > pos_inode ,
__entry - > pos_offset ,
__entry - > pos_snapshot ,
2022-09-17 14:36:24 -04:00
__entry - > level ,
2022-01-08 22:59:58 -05:00
__entry - > node ,
__entry - > iter_lock_seq ,
__entry - > node_lock_seq )
) ;
2022-08-27 12:48:36 -04:00
TRACE_EVENT ( btree_path_upgrade_fail ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-08-07 13:43:32 -04:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ,
unsigned level ) ,
TP_ARGS ( trans , caller_ip , path , level ) ,
2022-08-07 13:43:32 -04:00
TP_STRUCT__entry (
2022-08-27 12:11:18 -04:00
__array ( char , trans_fn , 32 )
2022-08-07 13:43:32 -04:00
__field ( unsigned long , caller_ip )
__field ( u8 , btree_id )
2022-09-17 14:36:24 -04:00
__field ( u8 , level )
2022-08-10 12:42:55 -04:00
TRACE_BPOS_entries ( pos )
2022-08-07 13:43:32 -04:00
__field ( u8 , locked )
__field ( u8 , self_read_count )
__field ( u8 , self_intent_count )
2022-08-10 12:42:55 -04:00
__field ( u8 , read_count )
2022-08-07 13:43:32 -04:00
__field ( u8 , intent_count )
2022-09-17 14:36:24 -04:00
__field ( u32 , iter_lock_seq )
__field ( u32 , node_lock_seq )
2022-08-07 13:43:32 -04:00
) ,
TP_fast_assign (
2022-08-10 12:42:55 -04:00
struct six_lock_count c ;
strlcpy ( __entry - > trans_fn , trans - > fn , sizeof ( __entry - > trans_fn ) ) ;
2022-08-07 13:43:32 -04:00
__entry - > caller_ip = caller_ip ;
2022-08-10 12:42:55 -04:00
__entry - > btree_id = path - > btree_id ;
2022-09-17 14:36:24 -04:00
__entry - > level = level ;
2022-08-10 12:42:55 -04:00
TRACE_BPOS_assign ( pos , path - > pos ) ;
__entry - > locked = btree_node_locked ( path , level ) ;
2022-08-22 13:21:10 -04:00
c = bch2_btree_node_lock_counts ( trans , NULL , & path - > l [ level ] . b - > c , level ) ,
2022-08-21 23:08:53 -04:00
__entry - > self_read_count = c . n [ SIX_LOCK_read ] ;
__entry - > self_intent_count = c . n [ SIX_LOCK_intent ] ;
2022-08-10 12:42:55 -04:00
c = six_lock_counts ( & path - > l [ level ] . b - > c . lock ) ;
2022-08-21 23:08:53 -04:00
__entry - > read_count = c . n [ SIX_LOCK_read ] ;
__entry - > intent_count = c . n [ SIX_LOCK_read ] ;
2022-09-17 14:36:24 -04:00
__entry - > iter_lock_seq = path - > l [ level ] . lock_seq ;
__entry - > node_lock_seq = is_btree_node ( path , level ) ? path - > l [ level ] . b - > c . lock . state . seq : 0 ;
2022-08-07 13:43:32 -04:00
) ,
2022-09-17 14:36:24 -04:00
TP_printk ( " %s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u " ,
2022-08-07 13:43:32 -04:00
__entry - > trans_fn ,
( void * ) __entry - > caller_ip ,
2022-08-10 12:42:55 -04:00
bch2_btree_ids [ __entry - > btree_id ] ,
2022-08-07 13:43:32 -04:00
__entry - > pos_inode ,
__entry - > pos_offset ,
__entry - > pos_snapshot ,
2022-09-17 14:36:24 -04:00
__entry - > level ,
2022-08-07 13:43:32 -04:00
__entry - > locked ,
__entry - > self_read_count ,
__entry - > self_intent_count ,
__entry - > read_count ,
2022-09-17 14:36:24 -04:00
__entry - > intent_count ,
__entry - > iter_lock_seq ,
__entry - > node_lock_seq )
2022-08-07 13:43:32 -04:00
) ;
2017-03-16 22:18:50 -08:00
/* Garbage collection */
2022-04-17 18:06:31 -04:00
DEFINE_EVENT ( bch_fs , gc_gens_start ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( struct bch_fs * c ) ,
TP_ARGS ( c )
) ;
2022-04-17 18:06:31 -04:00
DEFINE_EVENT ( bch_fs , gc_gens_end ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( struct bch_fs * c ) ,
TP_ARGS ( c )
) ;
/* Allocator */
DECLARE_EVENT_CLASS ( bucket_alloc ,
2022-01-09 20:48:31 -05:00
TP_PROTO ( struct bch_dev * ca , const char * alloc_reserve ,
2022-09-26 18:18:00 -04:00
bool user ,
u64 bucket ,
2022-07-17 21:40:39 -04:00
u64 free ,
2022-01-09 20:48:31 -05:00
u64 avail ,
2022-07-17 21:40:39 -04:00
u64 copygc_wait_amount ,
s64 copygc_waiting_for ,
2022-01-09 20:48:31 -05:00
u64 seen ,
u64 open ,
u64 need_journal_commit ,
u64 nouse ,
bool nonblocking ,
2022-07-17 22:31:21 -04:00
const char * err ) ,
2022-09-26 18:18:00 -04:00
TP_ARGS ( ca , alloc_reserve , user , bucket , free , avail ,
copygc_wait_amount , copygc_waiting_for ,
2022-07-17 22:31:21 -04:00
seen , open , need_journal_commit , nouse , nonblocking , err ) ,
2017-03-16 22:18:50 -08:00
TP_STRUCT__entry (
2022-01-09 20:48:31 -05:00
__field ( dev_t , dev )
__array ( char , reserve , 16 )
2022-09-26 18:18:00 -04:00
__field ( bool , user )
__field ( u64 , bucket )
2022-07-17 21:40:39 -04:00
__field ( u64 , free )
2022-01-09 20:48:31 -05:00
__field ( u64 , avail )
2022-07-17 21:40:39 -04:00
__field ( u64 , copygc_wait_amount )
__field ( s64 , copygc_waiting_for )
2022-01-09 20:48:31 -05:00
__field ( u64 , seen )
__field ( u64 , open )
__field ( u64 , need_journal_commit )
__field ( u64 , nouse )
__field ( bool , nonblocking )
2022-09-26 18:18:00 -04:00
__array ( char , err , 32 )
2017-03-16 22:18:50 -08:00
) ,
TP_fast_assign (
2022-01-02 21:45:35 -05:00
__entry - > dev = ca - > dev ;
2022-03-13 19:27:55 -04:00
strlcpy ( __entry - > reserve , alloc_reserve , sizeof ( __entry - > reserve ) ) ;
2022-09-26 18:18:00 -04:00
__entry - > user = user ;
__entry - > bucket = bucket ;
2022-07-17 21:40:39 -04:00
__entry - > free = free ;
2022-01-09 20:48:31 -05:00
__entry - > avail = avail ;
2022-07-17 21:40:39 -04:00
__entry - > copygc_wait_amount = copygc_wait_amount ;
__entry - > copygc_waiting_for = copygc_waiting_for ;
2022-01-09 20:48:31 -05:00
__entry - > seen = seen ;
__entry - > open = open ;
__entry - > need_journal_commit = need_journal_commit ;
__entry - > nouse = nouse ;
__entry - > nonblocking = nonblocking ;
2022-07-17 22:31:21 -04:00
strlcpy ( __entry - > err , err , sizeof ( __entry - > err ) ) ;
2017-03-16 22:18:50 -08:00
) ,
2022-09-26 18:18:00 -04:00
TP_printk ( " %d,%d reserve %s user %u bucket %llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u err %s " ,
2021-04-17 20:37:04 -04:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
2022-01-09 20:48:31 -05:00
__entry - > reserve ,
2022-09-26 18:18:00 -04:00
__entry - > user ,
__entry - > bucket ,
2022-07-17 21:40:39 -04:00
__entry - > free ,
2022-01-09 20:48:31 -05:00
__entry - > avail ,
2022-07-17 21:40:39 -04:00
__entry - > copygc_wait_amount ,
__entry - > copygc_waiting_for ,
2022-01-09 20:48:31 -05:00
__entry - > seen ,
__entry - > open ,
__entry - > need_journal_commit ,
__entry - > nouse ,
__entry - > nonblocking ,
2022-07-17 22:31:21 -04:00
__entry - > err )
2017-03-16 22:18:50 -08:00
) ;
DEFINE_EVENT ( bucket_alloc , bucket_alloc ,
2022-01-09 20:48:31 -05:00
TP_PROTO ( struct bch_dev * ca , const char * alloc_reserve ,
2022-09-26 18:18:00 -04:00
bool user ,
u64 bucket ,
2022-07-17 21:40:39 -04:00
u64 free ,
2022-01-09 20:48:31 -05:00
u64 avail ,
2022-07-17 21:40:39 -04:00
u64 copygc_wait_amount ,
s64 copygc_waiting_for ,
2022-01-09 20:48:31 -05:00
u64 seen ,
u64 open ,
u64 need_journal_commit ,
u64 nouse ,
bool nonblocking ,
2022-07-17 22:31:21 -04:00
const char * err ) ,
2022-09-26 18:18:00 -04:00
TP_ARGS ( ca , alloc_reserve , user , bucket , free , avail ,
copygc_wait_amount , copygc_waiting_for ,
2022-07-17 22:31:21 -04:00
seen , open , need_journal_commit , nouse , nonblocking , err )
2017-03-16 22:18:50 -08:00
) ;
DEFINE_EVENT ( bucket_alloc , bucket_alloc_fail ,
2022-01-09 20:48:31 -05:00
TP_PROTO ( struct bch_dev * ca , const char * alloc_reserve ,
2022-09-26 18:18:00 -04:00
bool user ,
u64 bucket ,
2022-07-17 21:40:39 -04:00
u64 free ,
2022-01-09 20:48:31 -05:00
u64 avail ,
2022-07-17 21:40:39 -04:00
u64 copygc_wait_amount ,
s64 copygc_waiting_for ,
2022-01-09 20:48:31 -05:00
u64 seen ,
u64 open ,
u64 need_journal_commit ,
u64 nouse ,
bool nonblocking ,
2022-07-17 22:31:21 -04:00
const char * err ) ,
2022-09-26 18:18:00 -04:00
TP_ARGS ( ca , alloc_reserve , user , bucket , free , avail ,
copygc_wait_amount , copygc_waiting_for ,
2022-07-17 22:31:21 -04:00
seen , open , need_journal_commit , nouse , nonblocking , err )
2017-03-16 22:18:50 -08:00
) ;
2022-04-17 18:06:31 -04:00
TRACE_EVENT ( discard_buckets ,
TP_PROTO ( struct bch_fs * c , u64 seen , u64 open ,
2022-07-17 22:31:21 -04:00
u64 need_journal_commit , u64 discarded , const char * err ) ,
TP_ARGS ( c , seen , open , need_journal_commit , discarded , err ) ,
2022-04-17 18:06:31 -04:00
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( u64 , seen )
__field ( u64 , open )
__field ( u64 , need_journal_commit )
__field ( u64 , discarded )
2022-07-17 22:31:21 -04:00
__array ( char , err , 16 )
2022-04-17 18:06:31 -04:00
) ,
TP_fast_assign (
__entry - > dev = c - > dev ;
__entry - > seen = seen ;
__entry - > open = open ;
__entry - > need_journal_commit = need_journal_commit ;
__entry - > discarded = discarded ;
2022-07-17 22:31:21 -04:00
strlcpy ( __entry - > err , err , sizeof ( __entry - > err ) ) ;
2022-04-17 18:06:31 -04:00
) ,
2022-07-17 22:31:21 -04:00
TP_printk ( " %d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s " ,
2022-04-17 18:06:31 -04:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > seen ,
__entry - > open ,
__entry - > need_journal_commit ,
__entry - > discarded ,
2022-07-17 22:31:21 -04:00
__entry - > err )
2022-04-17 18:06:31 -04:00
) ;
2022-08-27 12:48:36 -04:00
TRACE_EVENT ( bucket_invalidate ,
2022-06-20 22:26:41 -04:00
TP_PROTO ( struct bch_fs * c , unsigned dev , u64 bucket , u32 sectors ) ,
TP_ARGS ( c , dev , bucket , sectors ) ,
2022-04-17 18:06:31 -04:00
TP_STRUCT__entry (
__field ( dev_t , dev )
__field ( u32 , dev_idx )
2022-06-20 22:26:41 -04:00
__field ( u32 , sectors )
2022-04-17 18:06:31 -04:00
__field ( u64 , bucket )
) ,
TP_fast_assign (
__entry - > dev = c - > dev ;
__entry - > dev_idx = dev ;
2022-06-20 22:26:41 -04:00
__entry - > sectors = sectors ;
2022-04-17 18:06:31 -04:00
__entry - > bucket = bucket ;
) ,
2022-06-20 22:26:41 -04:00
TP_printk ( " %d:%d invalidated %u:%llu cached sectors %u " ,
2022-04-17 18:06:31 -04:00
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
2022-06-20 22:26:41 -04:00
__entry - > dev_idx , __entry - > bucket ,
__entry - > sectors )
2022-04-17 18:06:31 -04:00
) ;
2017-03-16 22:18:50 -08:00
/* Moving IO */
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( bkey , move_extent_read ,
TP_PROTO ( const struct bkey * k ) ,
TP_ARGS ( k )
) ;
DEFINE_EVENT ( bkey , move_extent_write ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( const struct bkey * k ) ,
TP_ARGS ( k )
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( bkey , move_extent_finish ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( const struct bkey * k ) ,
TP_ARGS ( k )
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( bkey , move_extent_fail ,
TP_PROTO ( const struct bkey * k ) ,
TP_ARGS ( k )
) ;
DEFINE_EVENT ( bkey , move_extent_alloc_mem_fail ,
2017-03-16 22:18:50 -08:00
TP_PROTO ( const struct bkey * k ) ,
TP_ARGS ( k )
) ;
TRACE_EVENT ( move_data ,
TP_PROTO ( struct bch_fs * c , u64 sectors_moved ,
u64 keys_moved ) ,
TP_ARGS ( c , sectors_moved , keys_moved ) ,
TP_STRUCT__entry (
2021-05-27 19:15:44 -04:00
__field ( dev_t , dev )
2017-03-16 22:18:50 -08:00
__field ( u64 , sectors_moved )
__field ( u64 , keys_moved )
) ,
TP_fast_assign (
2021-05-27 19:15:44 -04:00
__entry - > dev = c - > dev ;
2017-03-16 22:18:50 -08:00
__entry - > sectors_moved = sectors_moved ;
__entry - > keys_moved = keys_moved ;
) ,
2021-05-27 19:15:44 -04:00
TP_printk ( " %d,%d sectors_moved %llu keys_moved %llu " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > sectors_moved , __entry - > keys_moved )
2017-03-16 22:18:50 -08:00
) ;
TRACE_EVENT ( copygc ,
2020-07-11 16:28:54 -04:00
TP_PROTO ( struct bch_fs * c ,
2017-03-16 22:18:50 -08:00
u64 sectors_moved , u64 sectors_not_moved ,
u64 buckets_moved , u64 buckets_not_moved ) ,
2020-07-11 16:28:54 -04:00
TP_ARGS ( c ,
2017-03-16 22:18:50 -08:00
sectors_moved , sectors_not_moved ,
buckets_moved , buckets_not_moved ) ,
TP_STRUCT__entry (
2021-05-27 19:15:44 -04:00
__field ( dev_t , dev )
2017-03-16 22:18:50 -08:00
__field ( u64 , sectors_moved )
__field ( u64 , sectors_not_moved )
__field ( u64 , buckets_moved )
__field ( u64 , buckets_not_moved )
) ,
TP_fast_assign (
2021-05-27 19:15:44 -04:00
__entry - > dev = c - > dev ;
2017-03-16 22:18:50 -08:00
__entry - > sectors_moved = sectors_moved ;
__entry - > sectors_not_moved = sectors_not_moved ;
__entry - > buckets_moved = buckets_moved ;
__entry - > buckets_not_moved = buckets_moved ;
) ,
2021-05-27 19:15:44 -04:00
TP_printk ( " %d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > sectors_moved , __entry - > sectors_not_moved ,
__entry - > buckets_moved , __entry - > buckets_not_moved )
2017-03-16 22:18:50 -08:00
) ;
2021-05-26 01:03:35 -04:00
TRACE_EVENT ( copygc_wait ,
TP_PROTO ( struct bch_fs * c ,
u64 wait_amount , u64 until ) ,
TP_ARGS ( c , wait_amount , until ) ,
TP_STRUCT__entry (
2021-05-27 19:15:44 -04:00
__field ( dev_t , dev )
2021-05-26 01:03:35 -04:00
__field ( u64 , wait_amount )
__field ( u64 , until )
) ,
TP_fast_assign (
2021-05-27 19:15:44 -04:00
__entry - > dev = c - > dev ;
2021-05-26 01:03:35 -04:00
__entry - > wait_amount = wait_amount ;
__entry - > until = until ;
) ,
2021-05-27 19:15:44 -04:00
TP_printk ( " %d,%u waiting for %llu sectors until %llu " ,
MAJOR ( __entry - > dev ) , MINOR ( __entry - > dev ) ,
__entry - > wait_amount , __entry - > until )
2021-05-26 01:03:35 -04:00
) ;
2022-08-18 17:00:12 -04:00
/* btree transactions: */
2022-04-17 18:06:31 -04:00
DECLARE_EVENT_CLASS ( transaction_event ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip ) ,
2019-04-23 00:10:08 -04:00
TP_STRUCT__entry (
2022-08-27 12:11:18 -04:00
__array ( char , trans_fn , 32 )
2021-06-04 15:18:10 -04:00
__field ( unsigned long , caller_ip )
2019-04-23 00:10:08 -04:00
) ,
TP_fast_assign (
2022-08-10 12:42:55 -04:00
strlcpy ( __entry - > trans_fn , trans - > fn , sizeof ( __entry - > trans_fn ) ) ;
2021-06-04 15:18:10 -04:00
__entry - > caller_ip = caller_ip ;
2019-04-23 00:10:08 -04:00
) ,
2022-01-04 00:33:52 -05:00
TP_printk ( " %s %pS " , __entry - > trans_fn , ( void * ) __entry - > caller_ip )
) ;
2022-04-17 18:06:31 -04:00
DEFINE_EVENT ( transaction_event , transaction_commit ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-04-17 18:06:31 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2022-04-17 18:06:31 -04:00
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( transaction_event , trans_restart_injected ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-07-17 20:22:30 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2022-07-17 20:22:30 -04:00
) ;
2022-04-17 18:06:31 -04:00
DEFINE_EVENT ( transaction_event , trans_blocked_journal_reclaim ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2019-04-23 00:10:08 -04:00
) ;
2022-04-17 18:06:31 -04:00
DEFINE_EVENT ( transaction_event , trans_restart_journal_res_get ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2021-06-04 15:18:10 -04:00
) ;
2022-08-27 12:23:38 -04:00
TRACE_EVENT ( trans_restart_journal_preres_get ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-08-27 12:23:38 -04:00
unsigned long caller_ip ,
unsigned flags ) ,
TP_ARGS ( trans , caller_ip , flags ) ,
TP_STRUCT__entry (
__array ( char , trans_fn , 32 )
__field ( unsigned long , caller_ip )
__field ( unsigned , flags )
) ,
TP_fast_assign (
strlcpy ( __entry - > trans_fn , trans - > fn , sizeof ( __entry - > trans_fn ) ) ;
__entry - > caller_ip = caller_ip ;
__entry - > flags = flags ;
) ,
TP_printk ( " %s %pS %x " , __entry - > trans_fn ,
( void * ) __entry - > caller_ip ,
__entry - > flags )
2021-06-04 15:18:10 -04:00
) ;
2022-04-17 18:06:31 -04:00
DEFINE_EVENT ( transaction_event , trans_restart_journal_reclaim ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2021-06-04 15:18:10 -04:00
) ;
2022-04-17 18:06:31 -04:00
DEFINE_EVENT ( transaction_event , trans_restart_fault_inject ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2021-06-04 15:18:10 -04:00
) ;
2022-04-17 18:06:31 -04:00
DEFINE_EVENT ( transaction_event , trans_traverse_all ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2021-06-04 15:18:10 -04:00
) ;
2022-04-17 18:06:31 -04:00
DEFINE_EVENT ( transaction_event , trans_restart_mark_replicas ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-01-12 01:14:47 -05:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2022-01-12 01:14:47 -05:00
) ;
2022-04-17 18:06:31 -04:00
DEFINE_EVENT ( transaction_event , trans_restart_key_cache_raced ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2021-06-04 15:18:10 -04:00
) ;
2022-07-05 17:27:44 -04:00
DEFINE_EVENT ( transaction_event , trans_restart_too_many_iters ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-07-05 17:27:44 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2022-07-05 17:27:44 -04:00
) ;
2021-06-04 15:18:10 -04:00
DECLARE_EVENT_CLASS ( transaction_restart_iter ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path ) ,
2021-06-04 15:18:10 -04:00
TP_STRUCT__entry (
2022-08-27 12:11:18 -04:00
__array ( char , trans_fn , 32 )
2022-02-16 00:42:34 -05:00
__field ( unsigned long , caller_ip )
2021-06-04 15:18:10 -04:00
__field ( u8 , btree_id )
2022-08-10 12:42:55 -04:00
TRACE_BPOS_entries ( pos )
2021-06-04 15:18:10 -04:00
) ,
TP_fast_assign (
2022-08-10 12:42:55 -04:00
strlcpy ( __entry - > trans_fn , trans - > fn , sizeof ( __entry - > trans_fn ) ) ;
2022-02-16 00:42:34 -05:00
__entry - > caller_ip = caller_ip ;
2022-08-10 12:42:55 -04:00
__entry - > btree_id = path - > btree_id ;
TRACE_BPOS_assign ( pos , path - > pos )
2021-06-04 15:18:10 -04:00
) ,
2022-08-10 12:42:55 -04:00
TP_printk ( " %s %pS btree %s pos %llu:%llu:%u " ,
2022-01-04 00:33:52 -05:00
__entry - > trans_fn ,
2022-02-16 00:42:34 -05:00
( void * ) __entry - > caller_ip ,
2022-08-10 12:42:55 -04:00
bch2_btree_ids [ __entry - > btree_id ] ,
2021-06-04 15:18:10 -04:00
__entry - > pos_inode ,
__entry - > pos_offset ,
__entry - > pos_snapshot )
) ;
DEFINE_EVENT ( transaction_restart_iter , trans_restart_btree_node_reused ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2021-06-04 15:18:10 -04:00
) ;
DEFINE_EVENT ( transaction_restart_iter , trans_restart_btree_node_split ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2021-06-04 15:18:10 -04:00
) ;
2022-09-17 14:36:24 -04:00
TRACE_EVENT ( trans_restart_upgrade ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ,
2022-09-17 14:36:24 -04:00
struct btree_path * path ,
unsigned old_locks_want ,
unsigned new_locks_want ) ,
TP_ARGS ( trans , caller_ip , path , old_locks_want , new_locks_want ) ,
2021-06-04 15:18:10 -04:00
2022-09-17 14:36:24 -04:00
TP_STRUCT__entry (
__array ( char , trans_fn , 32 )
__field ( unsigned long , caller_ip )
__field ( u8 , btree_id )
__field ( u8 , old_locks_want )
__field ( u8 , new_locks_want )
TRACE_BPOS_entries ( pos )
) ,
TP_fast_assign (
strlcpy ( __entry - > trans_fn , trans - > fn , sizeof ( __entry - > trans_fn ) ) ;
__entry - > caller_ip = caller_ip ;
__entry - > btree_id = path - > btree_id ;
__entry - > old_locks_want = old_locks_want ;
__entry - > new_locks_want = new_locks_want ;
TRACE_BPOS_assign ( pos , path - > pos )
) ,
TP_printk ( " %s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u " ,
__entry - > trans_fn ,
( void * ) __entry - > caller_ip ,
bch2_btree_ids [ __entry - > btree_id ] ,
__entry - > pos_inode ,
__entry - > pos_offset ,
__entry - > pos_snapshot ,
__entry - > old_locks_want ,
__entry - > new_locks_want )
2021-06-04 15:18:10 -04:00
) ;
DEFINE_EVENT ( transaction_restart_iter , trans_restart_relock ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2021-06-04 15:18:10 -04:00
) ;
2022-01-08 22:59:58 -05:00
DEFINE_EVENT ( transaction_restart_iter , trans_restart_relock_next_node ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-01-08 22:59:58 -05:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2022-01-08 22:59:58 -05:00
) ;
DEFINE_EVENT ( transaction_restart_iter , trans_restart_relock_parent_for_fill ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-01-08 22:59:58 -05:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2022-01-08 22:59:58 -05:00
) ;
DEFINE_EVENT ( transaction_restart_iter , trans_restart_relock_after_fill ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-01-08 22:59:58 -05:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2022-01-08 22:59:58 -05:00
) ;
2022-08-27 12:48:36 -04:00
DEFINE_EVENT ( transaction_event , trans_restart_key_cache_upgrade ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-08-07 13:43:32 -04:00
unsigned long caller_ip ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip )
2022-08-07 13:43:32 -04:00
) ;
2022-01-08 22:59:58 -05:00
DEFINE_EVENT ( transaction_restart_iter , trans_restart_relock_key_cache_fill ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-01-08 22:59:58 -05:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2022-01-08 22:59:58 -05:00
) ;
DEFINE_EVENT ( transaction_restart_iter , trans_restart_relock_path ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-01-08 22:59:58 -05:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2022-01-08 22:59:58 -05:00
) ;
DEFINE_EVENT ( transaction_restart_iter , trans_restart_relock_path_intent ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-01-08 22:59:58 -05:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2022-01-08 22:59:58 -05:00
) ;
2021-06-04 15:18:10 -04:00
DEFINE_EVENT ( transaction_restart_iter , trans_restart_traverse ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2021-06-04 15:18:10 -04:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2021-06-04 15:18:10 -04:00
) ;
2022-02-18 00:47:45 -05:00
DEFINE_EVENT ( transaction_restart_iter , trans_restart_memory_allocation_failure ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-02-18 00:47:45 -05:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ) ,
TP_ARGS ( trans , caller_ip , path )
2022-02-18 00:47:45 -05:00
) ;
2022-08-22 15:29:53 -04:00
DEFINE_EVENT ( transaction_event , trans_restart_would_deadlock ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-08-22 15:29:53 -04:00
unsigned long caller_ip ) ,
TP_ARGS ( trans , caller_ip )
2019-04-23 00:10:08 -04:00
) ;
bcachefs: Deadlock cycle detector
We've outgrown our own deadlock avoidance strategy.
The btree iterator API provides an interface where the user doesn't need
to concern themselves with lock ordering - different btree iterators can
be traversed in any order. Without special care, this will lead to
deadlocks.
Our previous strategy was to define a lock ordering internally, and
whenever we attempt to take a lock and trylock() fails, we'd check if
the current btree transaction is holding any locks that cause a lock
ordering violation. If so, we'd issue a transaction restart, and then
bch2_trans_begin() would re-traverse all previously used iterators, but
in the correct order.
That approach had some issues, though.
- Sometimes we'd issue transaction restarts unnecessarily, when no
deadlock would have actually occured. Lock ordering restarts have
become our primary cause of transaction restarts, on some workloads
totally 20% of actual transaction commits.
- To avoid deadlock or livelock, we'd often have to take intent locks
when we only wanted a read lock: with the lock ordering approach, it
is actually illegal to hold _any_ read lock while blocking on an intent
lock, and this has been causing us unnecessary lock contention.
- It was getting fragile - the various lock ordering rules are not
trivial, and we'd been seeing occasional livelock issues related to
this machinery.
So, since bcachefs is already a relational database masquerading as a
filesystem, we're stealing the next traditional database technique and
switching to a cycle detector for avoiding deadlocks.
When we block taking a btree lock, after adding ourself to the waitlist
but before sleeping, we do a DFS of btree transactions waiting on other
btree transactions, starting with the current transaction and walking
our held locks, and transactions blocking on our held locks.
If we find a cycle, we emit a transaction restart. Occasionally (e.g.
the btree split path) we can not allow the lock() operation to fail, so
if necessary we'll tell another transaction that it has to fail.
Result: trans_restart_would_deadlock events are reduced by a factor of
10 to 100, and we'll be able to delete a whole bunch of grotty, fragile
code.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
2022-08-22 13:23:47 -04:00
DEFINE_EVENT ( transaction_event , trans_restart_would_deadlock_recursion_limit ,
TP_PROTO ( struct btree_trans * trans ,
unsigned long caller_ip ) ,
TP_ARGS ( trans , caller_ip )
) ;
2021-09-07 21:25:32 -04:00
TRACE_EVENT ( trans_restart_would_deadlock_write ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ) ,
TP_ARGS ( trans ) ,
2021-09-07 21:25:32 -04:00
TP_STRUCT__entry (
2022-08-27 12:11:18 -04:00
__array ( char , trans_fn , 32 )
2021-09-07 21:25:32 -04:00
) ,
TP_fast_assign (
2022-08-10 12:42:55 -04:00
strlcpy ( __entry - > trans_fn , trans - > fn , sizeof ( __entry - > trans_fn ) ) ;
2021-09-07 21:25:32 -04:00
) ,
2022-01-04 00:33:52 -05:00
TP_printk ( " %s " , __entry - > trans_fn )
2021-09-07 21:25:32 -04:00
) ;
2019-05-15 10:54:43 -04:00
TRACE_EVENT ( trans_restart_mem_realloced ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-01-04 00:33:52 -05:00
unsigned long caller_ip ,
2021-04-15 12:50:09 -04:00
unsigned long bytes ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip , bytes ) ,
2019-05-15 10:54:43 -04:00
TP_STRUCT__entry (
2022-08-27 12:11:18 -04:00
__array ( char , trans_fn , 32 )
2021-04-15 12:50:09 -04:00
__field ( unsigned long , caller_ip )
__field ( unsigned long , bytes )
2019-05-15 10:54:43 -04:00
) ,
TP_fast_assign (
2022-08-10 12:42:55 -04:00
strlcpy ( __entry - > trans_fn , trans - > fn , sizeof ( __entry - > trans_fn ) ) ;
2021-04-15 12:50:09 -04:00
__entry - > caller_ip = caller_ip ;
__entry - > bytes = bytes ;
2019-05-15 10:54:43 -04:00
) ,
2022-01-04 00:33:52 -05:00
TP_printk ( " %s %pS bytes %lu " ,
__entry - > trans_fn ,
2021-04-15 12:50:09 -04:00
( void * ) __entry - > caller_ip ,
__entry - > bytes )
2019-04-23 00:10:08 -04:00
) ;
2022-03-17 21:35:51 -04:00
TRACE_EVENT ( trans_restart_key_cache_key_realloced ,
2022-08-10 12:42:55 -04:00
TP_PROTO ( struct btree_trans * trans ,
2022-02-27 11:34:21 -05:00
unsigned long caller_ip ,
2022-08-10 12:42:55 -04:00
struct btree_path * path ,
2022-03-17 21:35:51 -04:00
unsigned old_u64s ,
unsigned new_u64s ) ,
2022-08-10 12:42:55 -04:00
TP_ARGS ( trans , caller_ip , path , old_u64s , new_u64s ) ,
2022-03-17 21:35:51 -04:00
TP_STRUCT__entry (
2022-08-27 12:11:18 -04:00
__array ( char , trans_fn , 32 )
2022-03-17 21:35:51 -04:00
__field ( unsigned long , caller_ip )
__field ( enum btree_id , btree_id )
2022-08-10 12:42:55 -04:00
TRACE_BPOS_entries ( pos )
2022-03-17 21:35:51 -04:00
__field ( u32 , old_u64s )
__field ( u32 , new_u64s )
) ,
TP_fast_assign (
2022-08-10 12:42:55 -04:00
strlcpy ( __entry - > trans_fn , trans - > fn , sizeof ( __entry - > trans_fn ) ) ;
__entry - > caller_ip = caller_ip ;
__entry - > btree_id = path - > btree_id ;
TRACE_BPOS_assign ( pos , path - > pos ) ;
2022-03-17 21:35:51 -04:00
__entry - > old_u64s = old_u64s ;
__entry - > new_u64s = new_u64s ;
) ,
TP_printk ( " %s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u " ,
__entry - > trans_fn ,
( void * ) __entry - > caller_ip ,
bch2_btree_ids [ __entry - > btree_id ] ,
2022-08-07 13:43:32 -04:00
__entry - > pos_inode ,
__entry - > pos_offset ,
2022-08-10 12:42:55 -04:00
__entry - > pos_snapshot ,
__entry - > old_u64s ,
__entry - > new_u64s )
2022-08-07 13:43:32 -04:00
) ;
2017-03-16 22:18:50 -08:00
# endif /* _TRACE_BCACHEFS_H */
/* This part must be outside protection */
# undef TRACE_INCLUDE_PATH
# define TRACE_INCLUDE_PATH .. / .. / fs / bcachefs
# undef TRACE_INCLUDE_FILE
# define TRACE_INCLUDE_FILE trace
# include <trace/define_trace.h>