2013-06-05 06:21:07 -07:00
# ifndef _BCACHE_WRITEBACK_H
# define _BCACHE_WRITEBACK_H
2013-06-05 06:24:39 -07:00
# define CUTOFF_WRITEBACK 40
# define CUTOFF_WRITEBACK_SYNC 70
2013-06-05 06:21:07 -07:00
static inline uint64_t bcache_dev_sectors_dirty ( struct bcache_device * d )
{
uint64_t i , ret = 0 ;
for ( i = 0 ; i < d - > nr_stripes ; i + + )
ret + = atomic_read ( d - > stripe_sectors_dirty + i ) ;
return ret ;
}
2013-10-31 15:43:22 -07:00
static inline unsigned offset_to_stripe ( struct bcache_device * d ,
uint64_t offset )
{
do_div ( offset , d - > stripe_size ) ;
return offset ;
}
static inline bool bcache_dev_stripe_dirty ( struct cached_dev * dc ,
2013-06-05 06:24:39 -07:00
uint64_t offset ,
unsigned nr_sectors )
{
2013-10-31 15:43:22 -07:00
unsigned stripe = offset_to_stripe ( & dc - > disk , offset ) ;
2013-06-05 06:24:39 -07:00
while ( 1 ) {
2013-10-31 15:43:22 -07:00
if ( atomic_read ( dc - > disk . stripe_sectors_dirty + stripe ) )
2013-06-05 06:24:39 -07:00
return true ;
2013-10-31 15:43:22 -07:00
if ( nr_sectors < = dc - > disk . stripe_size )
2013-06-05 06:24:39 -07:00
return false ;
2013-10-31 15:43:22 -07:00
nr_sectors - = dc - > disk . stripe_size ;
2013-06-05 06:24:39 -07:00
stripe + + ;
}
}
static inline bool should_writeback ( struct cached_dev * dc , struct bio * bio ,
unsigned cache_mode , bool would_skip )
{
unsigned in_use = dc - > disk . c - > gc_stats . in_use ;
if ( cache_mode ! = CACHE_MODE_WRITEBACK | |
2013-08-21 17:49:09 -07:00
test_bit ( BCACHE_DEV_DETACHING , & dc - > disk . flags ) | |
2013-06-05 06:24:39 -07:00
in_use > CUTOFF_WRITEBACK_SYNC )
return false ;
if ( dc - > partial_stripes_expensive & &
2013-10-11 15:44:27 -07:00
bcache_dev_stripe_dirty ( dc , bio - > bi_iter . bi_sector ,
2013-06-05 06:24:39 -07:00
bio_sectors ( bio ) ) )
return true ;
if ( would_skip )
return false ;
return bio - > bi_rw & REQ_SYNC | |
in_use < = CUTOFF_WRITEBACK ;
}
2013-07-24 17:50:06 -07:00
static inline void bch_writeback_queue ( struct cached_dev * dc )
{
wake_up_process ( dc - > writeback_thread ) ;
}
static inline void bch_writeback_add ( struct cached_dev * dc )
{
if ( ! atomic_read ( & dc - > has_dirty ) & &
! atomic_xchg ( & dc - > has_dirty , 1 ) ) {
atomic_inc ( & dc - > count ) ;
if ( BDEV_STATE ( & dc - > sb ) ! = BDEV_STATE_DIRTY ) {
SET_BDEV_STATE ( & dc - > sb , BDEV_STATE_DIRTY ) ;
/* XXX: should do this synchronously */
bch_write_bdev_super ( dc , NULL ) ;
}
bch_writeback_queue ( dc ) ;
}
}
2013-06-05 06:21:07 -07:00
void bcache_dev_sectors_dirty_add ( struct cache_set * , unsigned , uint64_t , int ) ;
void bch_sectors_dirty_init ( struct cached_dev * dc ) ;
2013-07-24 17:50:06 -07:00
int bch_cached_dev_writeback_init ( struct cached_dev * ) ;
2013-06-05 06:21:07 -07:00
# endif