2009-12-03 20:59:42 +03:00
# ifndef _BLK_CGROUP_H
# define _BLK_CGROUP_H
/*
* Common Block IO controller cgroup interface
*
* Based on ideas and code from CFQ , CFS and BFQ :
* Copyright ( C ) 2003 Jens Axboe < axboe @ kernel . dk >
*
* Copyright ( C ) 2008 Fabio Checconi < fabio @ gandalf . sssup . it >
* Paolo Valente < paolo . valente @ unimore . it >
*
* Copyright ( C ) 2009 Vivek Goyal < vgoyal @ redhat . com >
* Nauman Rafique < nauman @ google . com >
*/
# include <linux/cgroup.h>
2010-09-16 01:06:33 +04:00
enum blkio_policy_id {
BLKIO_POLICY_PROP = 0 , /* Proportional Bandwidth division */
2010-09-16 01:06:34 +04:00
BLKIO_POLICY_THROTL , /* Throttling */
2010-09-16 01:06:33 +04:00
} ;
2010-10-01 23:16:41 +04:00
/* Max limits for throttle policy */
# define THROTL_IOPS_MAX UINT_MAX
2010-03-11 02:22:11 +03:00
# if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
# ifndef CONFIG_BLK_CGROUP
/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
extern struct cgroup_subsys blkio_subsys ;
# define blkio_subsys_id blkio_subsys.subsys_id
# endif
2009-12-03 23:06:43 +03:00
2010-04-09 10:31:19 +04:00
enum stat_type {
/* Total time spent (in ns) between request dispatch to the driver and
* request completion for IOs doen by this cgroup . This may not be
* accurate when NCQ is turned on . */
BLKIO_STAT_SERVICE_TIME = 0 ,
/* Total bytes transferred */
BLKIO_STAT_SERVICE_BYTES ,
/* Total IOs serviced, post merge */
BLKIO_STAT_SERVICED ,
/* Total time spent waiting in scheduler queue in ns */
BLKIO_STAT_WAIT_TIME ,
2010-04-09 08:14:23 +04:00
/* Number of IOs merged */
BLKIO_STAT_MERGED ,
2010-04-09 08:15:10 +04:00
/* Number of IOs queued up */
BLKIO_STAT_QUEUED ,
2010-04-09 10:31:19 +04:00
/* All the single valued stats go below this */
BLKIO_STAT_TIME ,
BLKIO_STAT_SECTORS ,
2011-03-12 18:54:00 +03:00
/* Time not charged to this cgroup */
BLKIO_STAT_UNACCOUNTED_TIME ,
2010-04-09 10:31:19 +04:00
# ifdef CONFIG_DEBUG_BLK_CGROUP
2010-04-09 08:15:10 +04:00
BLKIO_STAT_AVG_QUEUE_SIZE ,
2010-04-09 08:15:35 +04:00
BLKIO_STAT_IDLE_TIME ,
BLKIO_STAT_EMPTY_TIME ,
BLKIO_STAT_GROUP_WAIT_TIME ,
2010-04-09 10:31:19 +04:00
BLKIO_STAT_DEQUEUE
# endif
} ;
enum stat_sub_type {
BLKIO_STAT_READ = 0 ,
BLKIO_STAT_WRITE ,
BLKIO_STAT_SYNC ,
BLKIO_STAT_ASYNC ,
BLKIO_STAT_TOTAL
2010-04-02 02:01:24 +04:00
} ;
2010-04-09 08:15:35 +04:00
/* blkg state flags */
enum blkg_state_flags {
BLKG_waiting = 0 ,
BLKG_idling ,
BLKG_empty ,
} ;
2010-09-16 01:06:33 +04:00
/* cgroup files owned by proportional weight policy */
enum blkcg_file_name_prop {
BLKIO_PROP_weight = 1 ,
BLKIO_PROP_weight_device ,
BLKIO_PROP_io_service_bytes ,
BLKIO_PROP_io_serviced ,
BLKIO_PROP_time ,
BLKIO_PROP_sectors ,
2011-03-12 18:54:00 +03:00
BLKIO_PROP_unaccounted_time ,
2010-09-16 01:06:33 +04:00
BLKIO_PROP_io_service_time ,
BLKIO_PROP_io_wait_time ,
BLKIO_PROP_io_merged ,
BLKIO_PROP_io_queued ,
BLKIO_PROP_avg_queue_size ,
BLKIO_PROP_group_wait_time ,
BLKIO_PROP_idle_time ,
BLKIO_PROP_empty_time ,
BLKIO_PROP_dequeue ,
} ;
2010-09-16 01:06:34 +04:00
/* cgroup files owned by throttle policy */
enum blkcg_file_name_throtl {
BLKIO_THROTL_read_bps_device ,
BLKIO_THROTL_write_bps_device ,
2010-09-16 01:06:36 +04:00
BLKIO_THROTL_read_iops_device ,
BLKIO_THROTL_write_iops_device ,
2010-09-16 01:06:34 +04:00
BLKIO_THROTL_io_service_bytes ,
BLKIO_THROTL_io_serviced ,
} ;
2009-12-03 20:59:42 +03:00
struct blkio_cgroup {
struct cgroup_subsys_state css ;
unsigned int weight ;
spinlock_t lock ;
struct hlist_head blkg_list ;
2010-04-13 12:05:49 +04:00
struct list_head policy_list ; /* list of blkio_policy_node */
2009-12-03 20:59:42 +03:00
} ;
2010-04-02 02:01:24 +04:00
struct blkio_group_stats {
/* total disk time and nr sectors dispatched by this group */
uint64_t time ;
uint64_t sectors ;
2011-03-12 18:54:00 +03:00
/* Time not charged to this cgroup */
uint64_t unaccounted_time ;
2010-04-09 08:15:10 +04:00
uint64_t stat_arr [ BLKIO_STAT_QUEUED + 1 ] [ BLKIO_STAT_TOTAL ] ;
2010-04-02 02:01:24 +04:00
# ifdef CONFIG_DEBUG_BLK_CGROUP
2010-04-09 08:15:10 +04:00
/* Sum of number of IOs queued across all samples */
uint64_t avg_queue_size_sum ;
/* Count of samples taken for average */
uint64_t avg_queue_size_samples ;
2010-04-02 02:01:24 +04:00
/* How many times this group has been removed from service tree */
unsigned long dequeue ;
2010-04-09 08:15:35 +04:00
/* Total time spent waiting for it to be assigned a timeslice. */
uint64_t group_wait_time ;
uint64_t start_group_wait_time ;
/* Time spent idling for this blkio_group */
uint64_t idle_time ;
uint64_t start_idle_time ;
/*
* Total time when we have requests queued and do not contain the
* current active queue .
*/
uint64_t empty_time ;
uint64_t start_empty_time ;
uint16_t flags ;
2010-04-02 02:01:24 +04:00
# endif
} ;
2009-12-03 20:59:42 +03:00
struct blkio_group {
/* An rcu protected unique identifier for the group */
void * key ;
struct hlist_node blkcg_node ;
2009-12-03 20:59:47 +03:00
unsigned short blkcg_id ;
2009-12-03 20:59:48 +03:00
/* Store cgroup path */
char path [ 128 ] ;
2009-12-03 20:59:49 +03:00
/* The device MKDEV(major, minor), this group has been created for */
2010-04-09 10:31:19 +04:00
dev_t dev ;
2010-09-16 01:06:33 +04:00
/* policy which owns this blk group */
enum blkio_policy_id plid ;
2009-12-03 20:59:49 +03:00
2010-04-02 02:01:24 +04:00
/* Need to serialize the stats in the case of reset/update */
spinlock_t stats_lock ;
struct blkio_group_stats stats ;
2009-12-03 20:59:42 +03:00
} ;
2010-04-13 12:05:49 +04:00
struct blkio_policy_node {
struct list_head node ;
dev_t dev ;
2010-09-16 01:06:33 +04:00
/* This node belongs to max bw policy or porportional weight policy */
enum blkio_policy_id plid ;
/* cgroup file to which this rule belongs to */
int fileid ;
2010-09-16 01:06:34 +04:00
union {
unsigned int weight ;
/*
* Rate read / write in terms of byptes per second
* Whether this rate represents read or write is determined
* by file type " fileid " .
*/
u64 bps ;
2010-09-16 01:06:36 +04:00
unsigned int iops ;
2010-09-16 01:06:34 +04:00
} val ;
2010-04-13 12:05:49 +04:00
} ;
extern unsigned int blkcg_get_weight ( struct blkio_cgroup * blkcg ,
dev_t dev ) ;
2010-09-16 01:06:34 +04:00
extern uint64_t blkcg_get_read_bps ( struct blkio_cgroup * blkcg ,
dev_t dev ) ;
extern uint64_t blkcg_get_write_bps ( struct blkio_cgroup * blkcg ,
dev_t dev ) ;
2010-09-16 01:06:36 +04:00
extern unsigned int blkcg_get_read_iops ( struct blkio_cgroup * blkcg ,
dev_t dev ) ;
extern unsigned int blkcg_get_write_iops ( struct blkio_cgroup * blkcg ,
dev_t dev ) ;
2010-04-13 12:05:49 +04:00
2009-12-04 18:36:42 +03:00
typedef void ( blkio_unlink_group_fn ) ( void * key , struct blkio_group * blkg ) ;
2010-10-01 16:49:49 +04:00
typedef void ( blkio_update_group_weight_fn ) ( void * key ,
struct blkio_group * blkg , unsigned int weight ) ;
typedef void ( blkio_update_group_read_bps_fn ) ( void * key ,
struct blkio_group * blkg , u64 read_bps ) ;
typedef void ( blkio_update_group_write_bps_fn ) ( void * key ,
struct blkio_group * blkg , u64 write_bps ) ;
typedef void ( blkio_update_group_read_iops_fn ) ( void * key ,
struct blkio_group * blkg , unsigned int read_iops ) ;
typedef void ( blkio_update_group_write_iops_fn ) ( void * key ,
struct blkio_group * blkg , unsigned int write_iops ) ;
2009-12-04 18:36:42 +03:00
struct blkio_policy_ops {
blkio_unlink_group_fn * blkio_unlink_group_fn ;
blkio_update_group_weight_fn * blkio_update_group_weight_fn ;
2010-09-16 01:06:34 +04:00
blkio_update_group_read_bps_fn * blkio_update_group_read_bps_fn ;
blkio_update_group_write_bps_fn * blkio_update_group_write_bps_fn ;
2010-09-16 01:06:36 +04:00
blkio_update_group_read_iops_fn * blkio_update_group_read_iops_fn ;
blkio_update_group_write_iops_fn * blkio_update_group_write_iops_fn ;
2009-12-04 18:36:42 +03:00
} ;
struct blkio_policy_type {
struct list_head list ;
struct blkio_policy_ops ops ;
2010-09-16 01:06:33 +04:00
enum blkio_policy_id plid ;
2009-12-04 18:36:42 +03:00
} ;
/* Blkio controller policy registration */
extern void blkio_policy_register ( struct blkio_policy_type * ) ;
extern void blkio_policy_unregister ( struct blkio_policy_type * ) ;
2010-04-26 21:27:56 +04:00
static inline char * blkg_path ( struct blkio_group * blkg )
{
return blkg - > path ;
}
2009-12-03 23:06:43 +03:00
# else
struct blkio_group {
} ;
2009-12-04 18:36:42 +03:00
struct blkio_policy_type {
} ;
static inline void blkio_policy_register ( struct blkio_policy_type * blkiop ) { }
static inline void blkio_policy_unregister ( struct blkio_policy_type * blkiop ) { }
2010-04-26 21:27:56 +04:00
static inline char * blkg_path ( struct blkio_group * blkg ) { return NULL ; }
2009-12-03 23:06:43 +03:00
# endif
2011-03-08 21:45:00 +03:00
# define BLKIO_WEIGHT_MIN 10
2009-12-03 20:59:42 +03:00
# define BLKIO_WEIGHT_MAX 1000
# define BLKIO_WEIGHT_DEFAULT 500
2009-12-03 20:59:48 +03:00
# ifdef CONFIG_DEBUG_BLK_CGROUP
2010-04-13 21:59:17 +04:00
void blkiocg_update_avg_queue_size_stats ( struct blkio_group * blkg ) ;
2010-04-02 02:01:41 +04:00
void blkiocg_update_dequeue_stats ( struct blkio_group * blkg ,
2009-12-03 20:59:49 +03:00
unsigned long dequeue ) ;
2010-04-09 08:15:35 +04:00
void blkiocg_update_set_idle_time_stats ( struct blkio_group * blkg ) ;
void blkiocg_update_idle_time_stats ( struct blkio_group * blkg ) ;
2010-04-26 21:25:11 +04:00
void blkiocg_set_start_empty_time ( struct blkio_group * blkg ) ;
2010-04-09 08:15:35 +04:00
# define BLKG_FLAG_FNS(name) \
static inline void blkio_mark_blkg_ # # name ( \
struct blkio_group_stats * stats ) \
{ \
stats - > flags | = ( 1 < < BLKG_ # # name ) ; \
} \
static inline void blkio_clear_blkg_ # # name ( \
struct blkio_group_stats * stats ) \
{ \
stats - > flags & = ~ ( 1 < < BLKG_ # # name ) ; \
} \
static inline int blkio_blkg_ # # name ( struct blkio_group_stats * stats ) \
{ \
return ( stats - > flags & ( 1 < < BLKG_ # # name ) ) ! = 0 ; \
} \
BLKG_FLAG_FNS ( waiting )
BLKG_FLAG_FNS ( idling )
BLKG_FLAG_FNS ( empty )
# undef BLKG_FLAG_FNS
2009-12-03 20:59:48 +03:00
# else
2010-04-13 21:59:17 +04:00
static inline void blkiocg_update_avg_queue_size_stats (
2010-04-09 08:15:10 +04:00
struct blkio_group * blkg ) { }
2010-04-02 02:01:41 +04:00
static inline void blkiocg_update_dequeue_stats ( struct blkio_group * blkg ,
unsigned long dequeue ) { }
2010-04-09 08:15:35 +04:00
static inline void blkiocg_update_set_idle_time_stats ( struct blkio_group * blkg )
{ }
static inline void blkiocg_update_idle_time_stats ( struct blkio_group * blkg ) { }
2010-04-26 21:25:11 +04:00
static inline void blkiocg_set_start_empty_time ( struct blkio_group * blkg ) { }
2009-12-03 20:59:48 +03:00
# endif
2010-03-11 02:22:11 +03:00
# if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
2009-12-03 20:59:42 +03:00
extern struct blkio_cgroup blkio_root_cgroup ;
extern struct blkio_cgroup * cgroup_to_blkio_cgroup ( struct cgroup * cgroup ) ;
2011-05-16 17:24:08 +04:00
extern struct blkio_cgroup * task_blkio_cgroup ( struct task_struct * tsk ) ;
2009-12-03 20:59:42 +03:00
extern void blkiocg_add_blkio_group ( struct blkio_cgroup * blkcg ,
2010-09-16 01:06:33 +04:00
struct blkio_group * blkg , void * key , dev_t dev ,
enum blkio_policy_id plid ) ;
2009-12-03 20:59:42 +03:00
extern int blkiocg_del_blkio_group ( struct blkio_group * blkg ) ;
extern struct blkio_group * blkiocg_lookup_group ( struct blkio_cgroup * blkcg ,
void * key ) ;
2010-04-02 02:01:24 +04:00
void blkiocg_update_timeslice_used ( struct blkio_group * blkg ,
2011-03-12 18:54:00 +03:00
unsigned long time ,
unsigned long unaccounted_time ) ;
2010-04-09 10:31:19 +04:00
void blkiocg_update_dispatch_stats ( struct blkio_group * blkg , uint64_t bytes ,
bool direction , bool sync ) ;
void blkiocg_update_completion_stats ( struct blkio_group * blkg ,
uint64_t start_time , uint64_t io_start_time , bool direction , bool sync ) ;
2010-04-09 08:14:23 +04:00
void blkiocg_update_io_merged_stats ( struct blkio_group * blkg , bool direction ,
bool sync ) ;
2010-04-13 21:59:17 +04:00
void blkiocg_update_io_add_stats ( struct blkio_group * blkg ,
2010-04-09 08:15:10 +04:00
struct blkio_group * curr_blkg , bool direction , bool sync ) ;
2010-04-13 21:59:17 +04:00
void blkiocg_update_io_remove_stats ( struct blkio_group * blkg ,
2010-04-09 08:15:10 +04:00
bool direction , bool sync ) ;
2009-12-03 20:59:42 +03:00
# else
2009-12-03 23:06:43 +03:00
struct cgroup ;
2009-12-03 20:59:42 +03:00
static inline struct blkio_cgroup *
cgroup_to_blkio_cgroup ( struct cgroup * cgroup ) { return NULL ; }
2011-05-16 17:24:08 +04:00
static inline struct blkio_cgroup *
task_blkio_cgroup ( struct task_struct * tsk ) { return NULL ; }
2009-12-03 20:59:42 +03:00
static inline void blkiocg_add_blkio_group ( struct blkio_cgroup * blkcg ,
2010-09-16 01:06:33 +04:00
struct blkio_group * blkg , void * key , dev_t dev ,
enum blkio_policy_id plid ) { }
2009-12-03 20:59:42 +03:00
static inline int
blkiocg_del_blkio_group ( struct blkio_group * blkg ) { return 0 ; }
static inline struct blkio_group *
blkiocg_lookup_group ( struct blkio_cgroup * blkcg , void * key ) { return NULL ; }
2010-04-02 02:01:24 +04:00
static inline void blkiocg_update_timeslice_used ( struct blkio_group * blkg ,
2011-03-12 18:54:00 +03:00
unsigned long time ,
unsigned long unaccounted_time )
{ }
2010-04-09 10:31:19 +04:00
static inline void blkiocg_update_dispatch_stats ( struct blkio_group * blkg ,
uint64_t bytes , bool direction , bool sync ) { }
static inline void blkiocg_update_completion_stats ( struct blkio_group * blkg ,
uint64_t start_time , uint64_t io_start_time , bool direction ,
bool sync ) { }
2010-04-09 08:14:23 +04:00
static inline void blkiocg_update_io_merged_stats ( struct blkio_group * blkg ,
bool direction , bool sync ) { }
2010-04-13 21:59:17 +04:00
static inline void blkiocg_update_io_add_stats ( struct blkio_group * blkg ,
2010-04-09 08:15:10 +04:00
struct blkio_group * curr_blkg , bool direction , bool sync ) { }
2010-04-13 21:59:17 +04:00
static inline void blkiocg_update_io_remove_stats ( struct blkio_group * blkg ,
2010-04-09 08:15:10 +04:00
bool direction , bool sync ) { }
2009-12-03 20:59:42 +03:00
# endif
# endif /* _BLK_CGROUP_H */