2013-03-24 03:11:31 +04:00
/*
* Assorted bcache debug code
*
* Copyright 2010 , 2011 Kent Overstreet < kent . overstreet @ gmail . com >
* Copyright 2012 Google , Inc .
*/
# include "bcache.h"
# include "btree.h"
# include "debug.h"
# include <linux/console.h>
# include <linux/debugfs.h>
# include <linux/module.h>
# include <linux/random.h>
# include <linux/seq_file.h>
static struct dentry * debug ;
const char * bch_ptr_status ( struct cache_set * c , const struct bkey * k )
{
unsigned i ;
for ( i = 0 ; i < KEY_PTRS ( k ) ; i + + )
if ( ptr_available ( c , k , i ) ) {
struct cache * ca = PTR_CACHE ( c , k , i ) ;
size_t bucket = PTR_BUCKET_NR ( c , k , i ) ;
size_t r = bucket_remainder ( c , PTR_OFFSET ( k , i ) ) ;
if ( KEY_SIZE ( k ) + r > c - > sb . bucket_size )
return " bad, length too big " ;
if ( bucket < ca - > sb . first_bucket )
return " bad, short offset " ;
if ( bucket > = ca - > sb . nbuckets )
return " bad, offset past end of device " ;
if ( ptr_stale ( c , k , i ) )
return " stale " ;
}
if ( ! bkey_cmp ( k , & ZERO_KEY ) )
return " bad, null key " ;
if ( ! KEY_PTRS ( k ) )
return " bad, no pointers " ;
if ( ! KEY_SIZE ( k ) )
return " zeroed key " ;
return " " ;
}
2013-05-15 07:33:16 +04:00
int bch_bkey_to_text ( char * buf , size_t size , const struct bkey * k )
2013-03-24 03:11:31 +04:00
{
unsigned i = 0 ;
2013-05-15 07:33:16 +04:00
char * out = buf , * end = buf + size ;
2013-03-24 03:11:31 +04:00
# define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
p ( " %llu:%llu len %llu -> [ " , KEY_INODE ( k ) , KEY_OFFSET ( k ) , KEY_SIZE ( k ) ) ;
if ( KEY_PTRS ( k ) )
while ( 1 ) {
p ( " %llu:%llu gen %llu " ,
PTR_DEV ( k , i ) , PTR_OFFSET ( k , i ) , PTR_GEN ( k , i ) ) ;
if ( + + i = = KEY_PTRS ( k ) )
break ;
p ( " , " ) ;
}
p ( " ] " ) ;
if ( KEY_DIRTY ( k ) )
p ( " dirty " ) ;
if ( KEY_CSUM ( k ) )
p ( " cs%llu %llx " , KEY_CSUM ( k ) , k - > ptr [ 1 ] ) ;
# undef p
2013-05-15 07:33:16 +04:00
return out - buf ;
2013-03-24 03:11:31 +04:00
}
2013-05-15 07:33:16 +04:00
int bch_btree_to_text ( char * buf , size_t size , const struct btree * b )
2013-03-24 03:11:31 +04:00
{
2013-05-15 07:33:16 +04:00
return scnprintf ( buf , size , " %zu level %i/%i " ,
PTR_BUCKET_NR ( b - > c , & b - > key , 0 ) ,
b - > level , b - > c - > root ? b - > c - > root - > level : - 1 ) ;
2013-03-24 03:11:31 +04:00
}
# if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
static bool skipped_backwards ( struct btree * b , struct bkey * k )
{
return bkey_cmp ( k , ( ! b - > level )
? & START_KEY ( bkey_next ( k ) )
: bkey_next ( k ) ) > 0 ;
}
static void dump_bset ( struct btree * b , struct bset * i )
{
struct bkey * k ;
unsigned j ;
2013-05-15 07:33:16 +04:00
char buf [ 80 ] ;
2013-03-24 03:11:31 +04:00
for ( k = i - > start ; k < end ( i ) ; k = bkey_next ( k ) ) {
2013-05-15 07:33:16 +04:00
bch_bkey_to_text ( buf , sizeof ( buf ) , k ) ;
2013-03-24 03:11:31 +04:00
printk ( KERN_ERR " block %zu key %zi/%u: %s " , index ( i , b ) ,
2013-05-15 07:33:16 +04:00
( uint64_t * ) k - i - > d , i - > keys , buf ) ;
2013-03-24 03:11:31 +04:00
for ( j = 0 ; j < KEY_PTRS ( k ) ; j + + ) {
size_t n = PTR_BUCKET_NR ( b - > c , k , j ) ;
printk ( " bucket %zu " , n ) ;
if ( n > = b - > c - > sb . first_bucket & & n < b - > c - > sb . nbuckets )
printk ( " prio %i " ,
PTR_BUCKET ( b - > c , k , j ) - > prio ) ;
}
printk ( " %s \n " , bch_ptr_status ( b - > c , k ) ) ;
if ( bkey_next ( k ) < end ( i ) & &
skipped_backwards ( b , k ) )
printk ( KERN_ERR " Key skipped backwards \n " ) ;
}
}
# endif
# ifdef CONFIG_BCACHE_DEBUG
void bch_btree_verify ( struct btree * b , struct bset * new )
{
struct btree * v = b - > c - > verify_data ;
struct closure cl ;
closure_init_stack ( & cl ) ;
if ( ! b - > c - > verify )
return ;
closure_wait_event ( & b - > io . wait , & cl ,
atomic_read ( & b - > io . cl . remaining ) = = - 1 ) ;
mutex_lock ( & b - > c - > verify_lock ) ;
bkey_copy ( & v - > key , & b - > key ) ;
v - > written = 0 ;
v - > level = b - > level ;
2013-04-26 00:58:35 +04:00
bch_btree_node_read ( v ) ;
2013-03-24 03:11:31 +04:00
closure_wait_event ( & v - > io . wait , & cl ,
atomic_read ( & b - > io . cl . remaining ) = = - 1 ) ;
if ( new - > keys ! = v - > sets [ 0 ] . data - > keys | |
memcmp ( new - > start ,
v - > sets [ 0 ] . data - > start ,
( void * ) end ( new ) - ( void * ) new - > start ) ) {
unsigned i , j ;
console_lock ( ) ;
printk ( KERN_ERR " *** original memory node: \n " ) ;
for ( i = 0 ; i < = b - > nsets ; i + + )
dump_bset ( b , b - > sets [ i ] . data ) ;
printk ( KERN_ERR " *** sorted memory node: \n " ) ;
dump_bset ( b , new ) ;
printk ( KERN_ERR " *** on disk node: \n " ) ;
dump_bset ( v , v - > sets [ 0 ] . data ) ;
for ( j = 0 ; j < new - > keys ; j + + )
if ( new - > d [ j ] ! = v - > sets [ 0 ] . data - > d [ j ] )
break ;
console_unlock ( ) ;
panic ( " verify failed at %u \n " , j ) ;
}
mutex_unlock ( & b - > c - > verify_lock ) ;
}
2013-09-11 06:02:45 +04:00
void bch_data_verify ( struct cached_dev * dc , struct bio * bio )
2013-03-24 03:11:31 +04:00
{
char name [ BDEVNAME_SIZE ] ;
struct bio * check ;
struct bio_vec * bv ;
int i ;
2013-09-11 06:02:45 +04:00
check = bio_clone ( bio , GFP_NOIO ) ;
2013-03-24 03:11:31 +04:00
if ( ! check )
return ;
2013-06-07 05:15:57 +04:00
if ( bio_alloc_pages ( check , GFP_NOIO ) )
2013-03-24 03:11:31 +04:00
goto out_put ;
2013-09-11 06:02:45 +04:00
submit_bio_wait ( READ_SYNC , check ) ;
2013-03-24 03:11:31 +04:00
2013-09-11 06:02:45 +04:00
bio_for_each_segment ( bv , bio , i ) {
void * p1 = kmap_atomic ( bv - > bv_page ) ;
void * p2 = page_address ( check - > bi_io_vec [ i ] . bv_page ) ;
2013-03-24 03:11:31 +04:00
if ( memcmp ( p1 + bv - > bv_offset ,
p2 + bv - > bv_offset ,
bv - > bv_len ) )
2013-03-25 22:46:44 +04:00
printk ( KERN_ERR
" bcache (%s): verify failed at sector %llu \n " ,
2013-03-24 03:11:31 +04:00
bdevname ( dc - > bdev , name ) ,
2013-09-11 06:02:45 +04:00
( uint64_t ) bio - > bi_sector ) ;
kunmap_atomic ( p1 ) ;
2013-03-24 03:11:31 +04:00
}
2013-09-11 06:02:45 +04:00
bio_for_each_segment_all ( bv , check , i )
2013-03-24 03:11:31 +04:00
__free_page ( bv - > bv_page ) ;
out_put :
bio_put ( check ) ;
}
# endif
# ifdef CONFIG_BCACHE_EDEBUG
unsigned bch_count_data ( struct btree * b )
{
unsigned ret = 0 ;
struct btree_iter iter ;
struct bkey * k ;
if ( ! b - > level )
for_each_key ( b , k , & iter )
ret + = KEY_SIZE ( k ) ;
return ret ;
}
static void vdump_bucket_and_panic ( struct btree * b , const char * fmt ,
va_list args )
{
unsigned i ;
2013-05-15 07:33:16 +04:00
char buf [ 80 ] ;
2013-03-24 03:11:31 +04:00
console_lock ( ) ;
for ( i = 0 ; i < = b - > nsets ; i + + )
dump_bset ( b , b - > sets [ i ] . data ) ;
vprintk ( fmt , args ) ;
console_unlock ( ) ;
2013-05-15 07:33:16 +04:00
bch_btree_to_text ( buf , sizeof ( buf ) , b ) ;
panic ( " at %s \n " , buf ) ;
2013-03-24 03:11:31 +04:00
}
void bch_check_key_order_msg ( struct btree * b , struct bset * i ,
const char * fmt , . . . )
{
struct bkey * k ;
if ( ! i - > keys )
return ;
for ( k = i - > start ; bkey_next ( k ) < end ( i ) ; k = bkey_next ( k ) )
if ( skipped_backwards ( b , k ) ) {
va_list args ;
va_start ( args , fmt ) ;
vdump_bucket_and_panic ( b , fmt , args ) ;
va_end ( args ) ;
}
}
void bch_check_keys ( struct btree * b , const char * fmt , . . . )
{
va_list args ;
struct bkey * k , * p = NULL ;
struct btree_iter iter ;
if ( b - > level )
return ;
for_each_key ( b , k , & iter ) {
if ( p & & bkey_cmp ( & START_KEY ( p ) , & START_KEY ( k ) ) > 0 ) {
printk ( KERN_ERR " Keys out of order: \n " ) ;
goto bug ;
}
if ( bch_ptr_invalid ( b , k ) )
continue ;
if ( p & & bkey_cmp ( p , & START_KEY ( k ) ) > 0 ) {
printk ( KERN_ERR " Overlapping keys: \n " ) ;
goto bug ;
}
p = k ;
}
return ;
bug :
va_start ( args , fmt ) ;
vdump_bucket_and_panic ( b , fmt , args ) ;
va_end ( args ) ;
}
# endif
# ifdef CONFIG_DEBUG_FS
/* XXX: cache set refcounting */
struct dump_iterator {
char buf [ PAGE_SIZE ] ;
size_t bytes ;
struct cache_set * c ;
struct keybuf keys ;
} ;
static bool dump_pred ( struct keybuf * buf , struct bkey * k )
{
return true ;
}
static ssize_t bch_dump_read ( struct file * file , char __user * buf ,
size_t size , loff_t * ppos )
{
struct dump_iterator * i = file - > private_data ;
ssize_t ret = 0 ;
2013-05-15 07:33:16 +04:00
char kbuf [ 80 ] ;
2013-03-24 03:11:31 +04:00
while ( size ) {
struct keybuf_key * w ;
unsigned bytes = min ( i - > bytes , size ) ;
int err = copy_to_user ( buf , i - > buf , bytes ) ;
if ( err )
return err ;
ret + = bytes ;
buf + = bytes ;
size - = bytes ;
i - > bytes - = bytes ;
memmove ( i - > buf , i - > buf + bytes , i - > bytes ) ;
if ( i - > bytes )
break ;
2013-06-05 17:24:39 +04:00
w = bch_keybuf_next_rescan ( i - > c , & i - > keys , & MAX_KEY , dump_pred ) ;
2013-03-24 03:11:31 +04:00
if ( ! w )
break ;
2013-05-15 07:33:16 +04:00
bch_bkey_to_text ( kbuf , sizeof ( kbuf ) , & w - > key ) ;
i - > bytes = snprintf ( i - > buf , PAGE_SIZE , " %s \n " , kbuf ) ;
2013-03-24 03:11:31 +04:00
bch_keybuf_del ( & i - > keys , w ) ;
}
return ret ;
}
static int bch_dump_open ( struct inode * inode , struct file * file )
{
struct cache_set * c = inode - > i_private ;
struct dump_iterator * i ;
i = kzalloc ( sizeof ( struct dump_iterator ) , GFP_KERNEL ) ;
if ( ! i )
return - ENOMEM ;
file - > private_data = i ;
i - > c = c ;
2013-06-05 17:24:39 +04:00
bch_keybuf_init ( & i - > keys ) ;
2013-03-24 03:11:31 +04:00
i - > keys . last_scanned = KEY ( 0 , 0 , 0 ) ;
return 0 ;
}
static int bch_dump_release ( struct inode * inode , struct file * file )
{
kfree ( file - > private_data ) ;
return 0 ;
}
static const struct file_operations cache_set_debug_ops = {
. owner = THIS_MODULE ,
. open = bch_dump_open ,
. read = bch_dump_read ,
. release = bch_dump_release
} ;
void bch_debug_init_cache_set ( struct cache_set * c )
{
if ( ! IS_ERR_OR_NULL ( debug ) ) {
char name [ 50 ] ;
snprintf ( name , 50 , " bcache-%pU " , c - > sb . set_uuid ) ;
c - > debug = debugfs_create_file ( name , 0400 , debug , c ,
& cache_set_debug_ops ) ;
}
}
# endif
void bch_debug_exit ( void )
{
if ( ! IS_ERR_OR_NULL ( debug ) )
debugfs_remove_recursive ( debug ) ;
}
int __init bch_debug_init ( struct kobject * kobj )
{
int ret = 0 ;
debug = debugfs_create_dir ( " bcache " , NULL ) ;
return ret ;
}