2013-03-24 03:11:31 +04:00
/*
* Assorted bcache debug code
*
* Copyright 2010 , 2011 Kent Overstreet < kent . overstreet @ gmail . com >
* Copyright 2012 Google , Inc .
*/
# include "bcache.h"
# include "btree.h"
# include "debug.h"
2013-12-18 11:47:33 +04:00
# include "extents.h"
2013-03-24 03:11:31 +04:00
# include <linux/console.h>
# include <linux/debugfs.h>
# include <linux/module.h>
# include <linux/random.h>
# include <linux/seq_file.h>
static struct dentry * debug ;
2013-10-25 03:36:03 +04:00
# ifdef CONFIG_BCACHE_DEBUG
2013-03-24 03:11:31 +04:00
2013-12-18 10:49:08 +04:00
# define for_each_written_bset(b, start, i) \
for ( i = ( start ) ; \
( void * ) i < ( void * ) ( start ) + ( KEY_SIZE ( & b - > key ) < < 9 ) & & \
i - > seq = = ( start ) - > seq ; \
2013-12-18 11:49:49 +04:00
i = ( void * ) i + set_blocks ( i , block_bytes ( b - > c ) ) * \
block_bytes ( b - > c ) )
2013-12-18 10:49:08 +04:00
void bch_btree_verify ( struct btree * b )
2013-03-24 03:11:31 +04:00
{
struct btree * v = b - > c - > verify_data ;
2013-12-18 10:49:08 +04:00
struct bset * ondisk , * sorted , * inmemory ;
struct bio * bio ;
2013-03-24 03:11:31 +04:00
2013-12-18 10:49:08 +04:00
if ( ! b - > c - > verify | | ! b - > c - > verify_ondisk )
2013-03-24 03:11:31 +04:00
return ;
2013-12-17 03:27:25 +04:00
down ( & b - > io_mutex ) ;
2013-03-24 03:11:31 +04:00
mutex_lock ( & b - > c - > verify_lock ) ;
2013-12-18 10:49:08 +04:00
ondisk = b - > c - > verify_ondisk ;
2013-12-21 05:28:16 +04:00
sorted = b - > c - > verify_data - > keys . set - > data ;
inmemory = b - > keys . set - > data ;
2013-12-18 10:49:08 +04:00
2013-03-24 03:11:31 +04:00
bkey_copy ( & v - > key , & b - > key ) ;
v - > written = 0 ;
v - > level = b - > level ;
2013-12-21 05:28:16 +04:00
v - > keys . ops = b - > keys . ops ;
2013-03-24 03:11:31 +04:00
2013-12-18 10:49:08 +04:00
bio = bch_bbio_alloc ( b - > c ) ;
bio - > bi_bdev = PTR_CACHE ( b - > c , & b - > key , 0 ) - > bdev ;
bio - > bi_iter . bi_sector = PTR_OFFSET ( & b - > key , 0 ) ;
bio - > bi_iter . bi_size = KEY_SIZE ( & v - > key ) < < 9 ;
2016-06-05 22:32:05 +03:00
bio_set_op_attrs ( bio , REQ_OP_READ , REQ_META | READ_SYNC ) ;
2013-12-18 10:49:08 +04:00
bch_bio_map ( bio , sorted ) ;
2013-03-24 03:11:31 +04:00
2016-06-05 22:31:41 +03:00
submit_bio_wait ( bio ) ;
2013-12-18 10:49:08 +04:00
bch_bbio_free ( bio , b - > c ) ;
memcpy ( ondisk , sorted , KEY_SIZE ( & v - > key ) < < 9 ) ;
bch_btree_node_read_done ( v ) ;
2013-12-21 05:28:16 +04:00
sorted = v - > keys . set - > data ;
2013-12-18 10:49:08 +04:00
if ( inmemory - > keys ! = sorted - > keys | |
memcmp ( inmemory - > start ,
sorted - > start ,
2013-12-18 09:56:21 +04:00
( void * ) bset_bkey_last ( inmemory ) - ( void * ) inmemory - > start ) ) {
2013-12-18 10:49:08 +04:00
struct bset * i ;
unsigned j ;
2013-03-24 03:11:31 +04:00
console_lock ( ) ;
2013-12-18 10:49:08 +04:00
printk ( KERN_ERR " *** in memory: \n " ) ;
2013-12-18 11:47:33 +04:00
bch_dump_bset ( & b - > keys , inmemory , 0 ) ;
2013-03-24 03:11:31 +04:00
2013-12-18 10:49:08 +04:00
printk ( KERN_ERR " *** read back in: \n " ) ;
2013-12-18 11:47:33 +04:00
bch_dump_bset ( & v - > keys , sorted , 0 ) ;
2013-03-24 03:11:31 +04:00
2013-12-18 10:49:08 +04:00
for_each_written_bset ( b , ondisk , i ) {
unsigned block = ( ( void * ) i - ( void * ) ondisk ) /
block_bytes ( b - > c ) ;
2013-03-24 03:11:31 +04:00
2013-12-18 10:49:08 +04:00
printk ( KERN_ERR " *** on disk block %u: \n " , block ) ;
2013-12-18 11:47:33 +04:00
bch_dump_bset ( & b - > keys , i , block ) ;
2013-12-18 10:49:08 +04:00
}
printk ( KERN_ERR " *** block %zu not written \n " ,
( ( void * ) i - ( void * ) ondisk ) / block_bytes ( b - > c ) ) ;
for ( j = 0 ; j < inmemory - > keys ; j + + )
if ( inmemory - > d [ j ] ! = sorted - > d [ j ] )
2013-03-24 03:11:31 +04:00
break ;
2013-12-18 10:49:08 +04:00
printk ( KERN_ERR " b->written %u \n " , b - > written ) ;
2013-03-24 03:11:31 +04:00
console_unlock ( ) ;
panic ( " verify failed at %u \n " , j ) ;
}
mutex_unlock ( & b - > c - > verify_lock ) ;
2013-12-17 03:27:25 +04:00
up ( & b - > io_mutex ) ;
2013-03-24 03:11:31 +04:00
}
2013-09-11 06:02:45 +04:00
void bch_data_verify ( struct cached_dev * dc , struct bio * bio )
2013-03-24 03:11:31 +04:00
{
char name [ BDEVNAME_SIZE ] ;
struct bio * check ;
2016-09-22 10:10:01 +03:00
struct bio_vec bv ;
2013-11-24 05:19:00 +04:00
struct bvec_iter iter ;
2013-03-24 03:11:31 +04:00
2013-09-11 06:02:45 +04:00
check = bio_clone ( bio , GFP_NOIO ) ;
2013-03-24 03:11:31 +04:00
if ( ! check )
return ;
2016-06-05 22:32:05 +03:00
bio_set_op_attrs ( check , REQ_OP_READ , READ_SYNC ) ;
2013-03-24 03:11:31 +04:00
2013-06-07 05:15:57 +04:00
if ( bio_alloc_pages ( check , GFP_NOIO ) )
2013-03-24 03:11:31 +04:00
goto out_put ;
2016-06-05 22:31:41 +03:00
submit_bio_wait ( check ) ;
2013-03-24 03:11:31 +04:00
2013-11-24 05:19:00 +04:00
bio_for_each_segment ( bv , bio , iter ) {
void * p1 = kmap_atomic ( bv . bv_page ) ;
void * p2 = page_address ( check - > bi_io_vec [ iter . bi_idx ] . bv_page ) ;
2013-03-24 03:11:31 +04:00
2013-11-24 05:19:00 +04:00
cache_set_err_on ( memcmp ( p1 + bv . bv_offset ,
p2 + bv . bv_offset ,
bv . bv_len ) ,
2013-09-11 01:27:42 +04:00
dc - > disk . c ,
" verify failed at dev %s sector %llu " ,
bdevname ( dc - > bdev , name ) ,
2013-10-12 02:44:27 +04:00
( uint64_t ) bio - > bi_iter . bi_sector ) ;
2013-09-11 01:27:42 +04:00
2013-09-11 06:02:45 +04:00
kunmap_atomic ( p1 ) ;
2013-03-24 03:11:31 +04:00
}
2016-09-22 10:10:01 +03:00
bio_free_pages ( check ) ;
2013-03-24 03:11:31 +04:00
out_put :
bio_put ( check ) ;
}
# endif
# ifdef CONFIG_DEBUG_FS
/* XXX: cache set refcounting */
struct dump_iterator {
char buf [ PAGE_SIZE ] ;
size_t bytes ;
struct cache_set * c ;
struct keybuf keys ;
} ;
static bool dump_pred ( struct keybuf * buf , struct bkey * k )
{
return true ;
}
static ssize_t bch_dump_read ( struct file * file , char __user * buf ,
size_t size , loff_t * ppos )
{
struct dump_iterator * i = file - > private_data ;
ssize_t ret = 0 ;
2013-05-15 07:33:16 +04:00
char kbuf [ 80 ] ;
2013-03-24 03:11:31 +04:00
while ( size ) {
struct keybuf_key * w ;
unsigned bytes = min ( i - > bytes , size ) ;
int err = copy_to_user ( buf , i - > buf , bytes ) ;
if ( err )
return err ;
ret + = bytes ;
buf + = bytes ;
size - = bytes ;
i - > bytes - = bytes ;
memmove ( i - > buf , i - > buf + bytes , i - > bytes ) ;
if ( i - > bytes )
break ;
2013-06-05 17:24:39 +04:00
w = bch_keybuf_next_rescan ( i - > c , & i - > keys , & MAX_KEY , dump_pred ) ;
2013-03-24 03:11:31 +04:00
if ( ! w )
break ;
2013-12-18 11:47:33 +04:00
bch_extent_to_text ( kbuf , sizeof ( kbuf ) , & w - > key ) ;
2013-05-15 07:33:16 +04:00
i - > bytes = snprintf ( i - > buf , PAGE_SIZE , " %s \n " , kbuf ) ;
2013-03-24 03:11:31 +04:00
bch_keybuf_del ( & i - > keys , w ) ;
}
return ret ;
}
static int bch_dump_open ( struct inode * inode , struct file * file )
{
struct cache_set * c = inode - > i_private ;
struct dump_iterator * i ;
i = kzalloc ( sizeof ( struct dump_iterator ) , GFP_KERNEL ) ;
if ( ! i )
return - ENOMEM ;
file - > private_data = i ;
i - > c = c ;
2013-06-05 17:24:39 +04:00
bch_keybuf_init ( & i - > keys ) ;
2013-03-24 03:11:31 +04:00
i - > keys . last_scanned = KEY ( 0 , 0 , 0 ) ;
return 0 ;
}
static int bch_dump_release ( struct inode * inode , struct file * file )
{
kfree ( file - > private_data ) ;
return 0 ;
}
static const struct file_operations cache_set_debug_ops = {
. owner = THIS_MODULE ,
. open = bch_dump_open ,
. read = bch_dump_read ,
. release = bch_dump_release
} ;
void bch_debug_init_cache_set ( struct cache_set * c )
{
if ( ! IS_ERR_OR_NULL ( debug ) ) {
char name [ 50 ] ;
snprintf ( name , 50 , " bcache-%pU " , c - > sb . set_uuid ) ;
c - > debug = debugfs_create_file ( name , 0400 , debug , c ,
& cache_set_debug_ops ) ;
}
}
# endif
void bch_debug_exit ( void )
{
if ( ! IS_ERR_OR_NULL ( debug ) )
debugfs_remove_recursive ( debug ) ;
}
int __init bch_debug_init ( struct kobject * kobj )
{
int ret = 0 ;
debug = debugfs_create_dir ( " bcache " , NULL ) ;
return ret ;
}