2013-03-23 16:11:31 -07:00
# ifndef _BCACHE_REQUEST_H_
# define _BCACHE_REQUEST_H_
# include <linux/cgroup.h>
2013-09-10 19:02:45 -07:00
struct data_insert_op {
2013-03-23 16:11:31 -07:00
struct closure cl ;
2013-07-24 17:44:17 -07:00
struct cache_set * c ;
2013-09-10 19:02:45 -07:00
struct bio * bio ;
2013-03-23 16:11:31 -07:00
2013-07-24 17:44:17 -07:00
unsigned inode ;
2013-07-24 18:11:11 -07:00
uint16_t write_point ;
2013-09-10 19:02:45 -07:00
uint16_t write_prio ;
short error ;
2013-07-24 17:44:17 -07:00
unsigned bypass : 1 ;
2013-09-10 19:02:45 -07:00
unsigned writeback : 1 ;
2013-07-24 17:44:17 -07:00
unsigned flush_journal : 1 ;
2013-09-10 19:02:45 -07:00
unsigned csum : 1 ;
2013-07-24 17:44:17 -07:00
2013-09-10 18:52:54 -07:00
unsigned replace : 1 ;
2013-09-10 19:02:45 -07:00
unsigned replace_collision : 1 ;
2013-03-23 16:11:31 -07:00
2013-09-10 19:02:45 -07:00
unsigned insert_data_done : 1 ;
2013-07-24 17:26:51 -07:00
/* Anything past this point won't get zeroed in search_alloc() */
struct keylist insert_keys ;
2013-09-10 18:52:54 -07:00
BKEY_PADDED ( replace_key ) ;
2013-03-23 16:11:31 -07:00
} ;
2013-04-26 15:39:55 -07:00
unsigned bch_get_congested ( struct cache_set * ) ;
2013-10-24 17:07:04 -07:00
void bch_data_insert ( struct closure * cl ) ;
2013-03-23 16:11:31 -07:00
void bch_cached_dev_request_init ( struct cached_dev * dc ) ;
void bch_flash_dev_request_init ( struct bcache_device * d ) ;
extern struct kmem_cache * bch_search_cache , * bch_passthrough_cache ;
struct bch_cgroup {
# ifdef CONFIG_CGROUP_BCACHE
struct cgroup_subsys_state css ;
# endif
/*
* We subtract one from the index into bch_cache_modes [ ] , so that
* default = = - 1 ; this makes it so the rest match up with d - > cache_mode ,
* and we use d - > cache_mode if cgrp - > cache_mode < 0
*/
short cache_mode ;
bool verify ;
struct cache_stat_collector stats ;
} ;
struct bch_cgroup * bch_bio_to_cgroup ( struct bio * bio ) ;
# endif /* _BCACHE_REQUEST_H_ */