2009-12-03 20:59:42 +03:00
# ifndef _BLK_CGROUP_H
# define _BLK_CGROUP_H
/*
* Common Block IO controller cgroup interface
*
* Based on ideas and code from CFQ , CFS and BFQ :
* Copyright ( C ) 2003 Jens Axboe < axboe @ kernel . dk >
*
* Copyright ( C ) 2008 Fabio Checconi < fabio @ gandalf . sssup . it >
* Paolo Valente < paolo . valente @ unimore . it >
*
* Copyright ( C ) 2009 Vivek Goyal < vgoyal @ redhat . com >
* Nauman Rafique < nauman @ google . com >
*/
# include <linux/cgroup.h>
2011-05-19 23:38:29 +04:00
# include <linux/u64_stats_sync.h>
2009-12-03 20:59:42 +03:00
2010-09-16 01:06:33 +04:00
enum blkio_policy_id {
BLKIO_POLICY_PROP = 0 , /* Proportional Bandwidth division */
2010-09-16 01:06:34 +04:00
BLKIO_POLICY_THROTL , /* Throttling */
2012-03-06 01:15:04 +04:00
BLKIO_NR_POLICIES ,
2010-09-16 01:06:33 +04:00
} ;
2010-10-01 23:16:41 +04:00
/* Max limits for throttle policy */
# define THROTL_IOPS_MAX UINT_MAX
2012-03-06 01:14:54 +04:00
# ifdef CONFIG_BLK_CGROUP
2009-12-03 23:06:43 +03:00
2010-04-09 10:31:19 +04:00
enum stat_type {
2012-03-08 22:53:57 +04:00
/* Number of IOs merged */
BLKIO_STAT_MERGED ,
2010-04-09 10:31:19 +04:00
/* Total time spent (in ns) between request dispatch to the driver and
* request completion for IOs doen by this cgroup . This may not be
* accurate when NCQ is turned on . */
2012-03-08 22:53:57 +04:00
BLKIO_STAT_SERVICE_TIME ,
2010-04-09 10:31:19 +04:00
/* Total time spent waiting in scheduler queue in ns */
BLKIO_STAT_WAIT_TIME ,
2010-04-09 08:15:10 +04:00
/* Number of IOs queued up */
BLKIO_STAT_QUEUED ,
2012-03-08 22:53:59 +04:00
2010-04-09 10:31:19 +04:00
/* All the single valued stats go below this */
BLKIO_STAT_TIME ,
2011-05-19 23:38:20 +04:00
# ifdef CONFIG_DEBUG_BLK_CGROUP
2011-03-12 18:54:00 +03:00
/* Time not charged to this cgroup */
BLKIO_STAT_UNACCOUNTED_TIME ,
2010-04-09 08:15:10 +04:00
BLKIO_STAT_AVG_QUEUE_SIZE ,
2010-04-09 08:15:35 +04:00
BLKIO_STAT_IDLE_TIME ,
BLKIO_STAT_EMPTY_TIME ,
BLKIO_STAT_GROUP_WAIT_TIME ,
2010-04-09 10:31:19 +04:00
BLKIO_STAT_DEQUEUE
# endif
} ;
2012-03-08 22:53:59 +04:00
/* Types lower than this live in stat_arr and have subtypes */
# define BLKIO_STAT_ARR_NR (BLKIO_STAT_QUEUED + 1)
2011-05-19 23:38:28 +04:00
/* Per cpu stats */
enum stat_type_cpu {
BLKIO_STAT_CPU_SECTORS ,
/* Total bytes transferred */
BLKIO_STAT_CPU_SERVICE_BYTES ,
/* Total IOs serviced, post merge */
BLKIO_STAT_CPU_SERVICED ,
BLKIO_STAT_CPU_NR
} ;
2010-04-09 10:31:19 +04:00
enum stat_sub_type {
BLKIO_STAT_READ = 0 ,
BLKIO_STAT_WRITE ,
BLKIO_STAT_SYNC ,
BLKIO_STAT_ASYNC ,
BLKIO_STAT_TOTAL
2010-04-02 02:01:24 +04:00
} ;
2010-04-09 08:15:35 +04:00
/* blkg state flags */
enum blkg_state_flags {
BLKG_waiting = 0 ,
BLKG_idling ,
BLKG_empty ,
} ;
2010-09-16 01:06:33 +04:00
/* cgroup files owned by proportional weight policy */
enum blkcg_file_name_prop {
BLKIO_PROP_weight = 1 ,
BLKIO_PROP_weight_device ,
BLKIO_PROP_io_service_bytes ,
BLKIO_PROP_io_serviced ,
BLKIO_PROP_time ,
BLKIO_PROP_sectors ,
2011-03-12 18:54:00 +03:00
BLKIO_PROP_unaccounted_time ,
2010-09-16 01:06:33 +04:00
BLKIO_PROP_io_service_time ,
BLKIO_PROP_io_wait_time ,
BLKIO_PROP_io_merged ,
BLKIO_PROP_io_queued ,
BLKIO_PROP_avg_queue_size ,
BLKIO_PROP_group_wait_time ,
BLKIO_PROP_idle_time ,
BLKIO_PROP_empty_time ,
BLKIO_PROP_dequeue ,
} ;
2010-09-16 01:06:34 +04:00
/* cgroup files owned by throttle policy */
enum blkcg_file_name_throtl {
BLKIO_THROTL_read_bps_device ,
BLKIO_THROTL_write_bps_device ,
2010-09-16 01:06:36 +04:00
BLKIO_THROTL_read_iops_device ,
BLKIO_THROTL_write_iops_device ,
2010-09-16 01:06:34 +04:00
BLKIO_THROTL_io_service_bytes ,
BLKIO_THROTL_io_serviced ,
} ;
2009-12-03 20:59:42 +03:00
struct blkio_cgroup {
struct cgroup_subsys_state css ;
unsigned int weight ;
spinlock_t lock ;
struct hlist_head blkg_list ;
} ;
2010-04-02 02:01:24 +04:00
struct blkio_group_stats {
2012-03-08 22:54:00 +04:00
struct u64_stats_sync syncp ;
2010-04-02 02:01:24 +04:00
/* total disk time and nr sectors dispatched by this group */
uint64_t time ;
2012-03-08 22:53:59 +04:00
uint64_t stat_arr [ BLKIO_STAT_ARR_NR ] [ BLKIO_STAT_TOTAL ] ;
2010-04-02 02:01:24 +04:00
# ifdef CONFIG_DEBUG_BLK_CGROUP
2011-05-19 23:38:20 +04:00
/* Time not charged to this cgroup */
uint64_t unaccounted_time ;
2010-04-09 08:15:10 +04:00
/* Sum of number of IOs queued across all samples */
uint64_t avg_queue_size_sum ;
/* Count of samples taken for average */
uint64_t avg_queue_size_samples ;
2010-04-02 02:01:24 +04:00
/* How many times this group has been removed from service tree */
unsigned long dequeue ;
2010-04-09 08:15:35 +04:00
/* Total time spent waiting for it to be assigned a timeslice. */
uint64_t group_wait_time ;
/* Time spent idling for this blkio_group */
uint64_t idle_time ;
/*
* Total time when we have requests queued and do not contain the
* current active queue .
*/
uint64_t empty_time ;
2012-03-08 22:53:58 +04:00
/* fields after this shouldn't be cleared on stat reset */
uint64_t start_group_wait_time ;
uint64_t start_idle_time ;
2010-04-09 08:15:35 +04:00
uint64_t start_empty_time ;
uint16_t flags ;
2010-04-02 02:01:24 +04:00
# endif
} ;
2012-03-08 22:53:58 +04:00
# ifdef CONFIG_DEBUG_BLK_CGROUP
# define BLKG_STATS_DEBUG_CLEAR_START \
offsetof ( struct blkio_group_stats , unaccounted_time )
# define BLKG_STATS_DEBUG_CLEAR_SIZE \
( offsetof ( struct blkio_group_stats , start_group_wait_time ) - \
BLKG_STATS_DEBUG_CLEAR_START )
# endif
2011-05-19 23:38:28 +04:00
/* Per cpu blkio group stats */
struct blkio_group_stats_cpu {
uint64_t sectors ;
uint64_t stat_arr_cpu [ BLKIO_STAT_CPU_NR ] [ BLKIO_STAT_TOTAL ] ;
2011-05-19 23:38:29 +04:00
struct u64_stats_sync syncp ;
2011-05-19 23:38:28 +04:00
} ;
2012-03-06 01:15:07 +04:00
struct blkio_group_conf {
unsigned int weight ;
unsigned int iops [ 2 ] ;
u64 bps [ 2 ] ;
} ;
2012-03-06 01:15:14 +04:00
/* per-blkg per-policy data */
struct blkg_policy_data {
/* the blkg this per-policy data belongs to */
struct blkio_group * blkg ;
2012-03-06 01:15:16 +04:00
/* Configuration */
struct blkio_group_conf conf ;
struct blkio_group_stats stats ;
/* Per cpu stats pointer */
struct blkio_group_stats_cpu __percpu * stats_cpu ;
2012-03-06 01:15:14 +04:00
/* pol->pdata_size bytes of private data used by policy impl */
char pdata [ ] __aligned ( __alignof__ ( unsigned long long ) ) ;
} ;
2009-12-03 20:59:42 +03:00
struct blkio_group {
2012-03-06 01:15:22 +04:00
/* Pointer to the associated request_queue */
struct request_queue * q ;
2012-03-06 01:15:20 +04:00
struct list_head q_node ;
2009-12-03 20:59:42 +03:00
struct hlist_node blkcg_node ;
2012-03-06 01:15:11 +04:00
struct blkio_cgroup * blkcg ;
2009-12-03 20:59:48 +03:00
/* Store cgroup path */
char path [ 128 ] ;
2012-03-06 01:15:15 +04:00
/* reference count */
int refcnt ;
2009-12-03 20:59:49 +03:00
2012-03-06 01:15:16 +04:00
struct blkg_policy_data * pd [ BLKIO_NR_POLICIES ] ;
2012-03-06 01:15:15 +04:00
2012-03-08 22:53:56 +04:00
/* List of blkg waiting for per cpu stats memory to be allocated */
struct list_head alloc_node ;
2012-03-06 01:15:15 +04:00
struct rcu_head rcu_head ;
2009-12-03 20:59:42 +03:00
} ;
2012-03-06 01:15:14 +04:00
typedef void ( blkio_init_group_fn ) ( struct blkio_group * blkg ) ;
2012-03-06 01:15:03 +04:00
typedef void ( blkio_update_group_weight_fn ) ( struct request_queue * q ,
2010-10-01 16:49:49 +04:00
struct blkio_group * blkg , unsigned int weight ) ;
2012-03-06 01:15:03 +04:00
typedef void ( blkio_update_group_read_bps_fn ) ( struct request_queue * q ,
2010-10-01 16:49:49 +04:00
struct blkio_group * blkg , u64 read_bps ) ;
2012-03-06 01:15:03 +04:00
typedef void ( blkio_update_group_write_bps_fn ) ( struct request_queue * q ,
2010-10-01 16:49:49 +04:00
struct blkio_group * blkg , u64 write_bps ) ;
2012-03-06 01:15:03 +04:00
typedef void ( blkio_update_group_read_iops_fn ) ( struct request_queue * q ,
2010-10-01 16:49:49 +04:00
struct blkio_group * blkg , unsigned int read_iops ) ;
2012-03-06 01:15:03 +04:00
typedef void ( blkio_update_group_write_iops_fn ) ( struct request_queue * q ,
2010-10-01 16:49:49 +04:00
struct blkio_group * blkg , unsigned int write_iops ) ;
2009-12-04 18:36:42 +03:00
struct blkio_policy_ops {
2012-03-06 01:15:14 +04:00
blkio_init_group_fn * blkio_init_group_fn ;
2009-12-04 18:36:42 +03:00
blkio_update_group_weight_fn * blkio_update_group_weight_fn ;
2010-09-16 01:06:34 +04:00
blkio_update_group_read_bps_fn * blkio_update_group_read_bps_fn ;
blkio_update_group_write_bps_fn * blkio_update_group_write_bps_fn ;
2010-09-16 01:06:36 +04:00
blkio_update_group_read_iops_fn * blkio_update_group_read_iops_fn ;
blkio_update_group_write_iops_fn * blkio_update_group_write_iops_fn ;
2009-12-04 18:36:42 +03:00
} ;
struct blkio_policy_type {
struct list_head list ;
struct blkio_policy_ops ops ;
2010-09-16 01:06:33 +04:00
enum blkio_policy_id plid ;
2012-03-06 01:15:14 +04:00
size_t pdata_size ; /* policy specific private data size */
2009-12-04 18:36:42 +03:00
} ;
2012-03-06 01:15:12 +04:00
extern int blkcg_init_queue ( struct request_queue * q ) ;
extern void blkcg_drain_queue ( struct request_queue * q ) ;
extern void blkcg_exit_queue ( struct request_queue * q ) ;
2009-12-04 18:36:42 +03:00
/* Blkio controller policy registration */
extern void blkio_policy_register ( struct blkio_policy_type * ) ;
extern void blkio_policy_unregister ( struct blkio_policy_type * ) ;
2012-03-06 01:15:20 +04:00
extern void blkg_destroy_all ( struct request_queue * q , bool destroy_root ) ;
extern void update_root_blkg_pd ( struct request_queue * q ,
enum blkio_policy_id plid ) ;
2009-12-04 18:36:42 +03:00
2012-03-06 01:15:14 +04:00
/**
* blkg_to_pdata - get policy private data
* @ blkg : blkg of interest
* @ pol : policy of interest
*
* Return pointer to private data associated with the @ blkg - @ pol pair .
*/
static inline void * blkg_to_pdata ( struct blkio_group * blkg ,
struct blkio_policy_type * pol )
{
2012-03-06 01:15:16 +04:00
return blkg ? blkg - > pd [ pol - > plid ] - > pdata : NULL ;
2012-03-06 01:15:14 +04:00
}
/**
* pdata_to_blkg - get blkg associated with policy private data
* @ pdata : policy private data of interest
* @ pol : policy @ pdata is for
*
* @ pdata is policy private data for @ pol . Determine the blkg it ' s
* associated with .
*/
static inline struct blkio_group * pdata_to_blkg ( void * pdata ,
struct blkio_policy_type * pol )
{
if ( pdata ) {
struct blkg_policy_data * pd =
container_of ( pdata , struct blkg_policy_data , pdata ) ;
return pd - > blkg ;
}
return NULL ;
}
2010-04-26 21:27:56 +04:00
static inline char * blkg_path ( struct blkio_group * blkg )
{
return blkg - > path ;
}
2012-03-06 01:15:15 +04:00
/**
* blkg_get - get a blkg reference
* @ blkg : blkg to get
*
* The caller should be holding queue_lock and an existing reference .
*/
static inline void blkg_get ( struct blkio_group * blkg )
{
lockdep_assert_held ( blkg - > q - > queue_lock ) ;
WARN_ON_ONCE ( ! blkg - > refcnt ) ;
blkg - > refcnt + + ;
}
void __blkg_release ( struct blkio_group * blkg ) ;
/**
* blkg_put - put a blkg reference
* @ blkg : blkg to put
*
* The caller should be holding queue_lock .
*/
static inline void blkg_put ( struct blkio_group * blkg )
{
lockdep_assert_held ( blkg - > q - > queue_lock ) ;
WARN_ON_ONCE ( blkg - > refcnt < = 0 ) ;
if ( ! - - blkg - > refcnt )
__blkg_release ( blkg ) ;
}
2009-12-03 23:06:43 +03:00
# else
struct blkio_group {
} ;
2009-12-04 18:36:42 +03:00
struct blkio_policy_type {
} ;
2012-03-06 01:15:12 +04:00
static inline int blkcg_init_queue ( struct request_queue * q ) { return 0 ; }
static inline void blkcg_drain_queue ( struct request_queue * q ) { }
static inline void blkcg_exit_queue ( struct request_queue * q ) { }
2009-12-04 18:36:42 +03:00
static inline void blkio_policy_register ( struct blkio_policy_type * blkiop ) { }
static inline void blkio_policy_unregister ( struct blkio_policy_type * blkiop ) { }
2012-03-06 01:15:19 +04:00
static inline void blkg_destroy_all ( struct request_queue * q ,
bool destory_root ) { }
2012-03-06 01:15:20 +04:00
static inline void update_root_blkg_pd ( struct request_queue * q ,
enum blkio_policy_id plid ) { }
2009-12-04 18:36:42 +03:00
2012-03-06 01:15:14 +04:00
static inline void * blkg_to_pdata ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ) { return NULL ; }
static inline struct blkio_group * pdata_to_blkg ( void * pdata ,
struct blkio_policy_type * pol ) { return NULL ; }
2010-04-26 21:27:56 +04:00
static inline char * blkg_path ( struct blkio_group * blkg ) { return NULL ; }
2012-03-06 01:15:15 +04:00
static inline void blkg_get ( struct blkio_group * blkg ) { }
static inline void blkg_put ( struct blkio_group * blkg ) { }
2010-04-26 21:27:56 +04:00
2009-12-03 23:06:43 +03:00
# endif
2011-03-08 21:45:00 +03:00
# define BLKIO_WEIGHT_MIN 10
2009-12-03 20:59:42 +03:00
# define BLKIO_WEIGHT_MAX 1000
# define BLKIO_WEIGHT_DEFAULT 500
2009-12-03 20:59:48 +03:00
# ifdef CONFIG_DEBUG_BLK_CGROUP
2012-03-06 01:15:17 +04:00
void blkiocg_update_avg_queue_size_stats ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ) ;
2010-04-02 02:01:41 +04:00
void blkiocg_update_dequeue_stats ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol ,
unsigned long dequeue ) ;
void blkiocg_update_set_idle_time_stats ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ) ;
void blkiocg_update_idle_time_stats ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ) ;
void blkiocg_set_start_empty_time ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ) ;
2010-04-09 08:15:35 +04:00
# define BLKG_FLAG_FNS(name) \
static inline void blkio_mark_blkg_ # # name ( \
struct blkio_group_stats * stats ) \
{ \
stats - > flags | = ( 1 < < BLKG_ # # name ) ; \
} \
static inline void blkio_clear_blkg_ # # name ( \
struct blkio_group_stats * stats ) \
{ \
stats - > flags & = ~ ( 1 < < BLKG_ # # name ) ; \
} \
static inline int blkio_blkg_ # # name ( struct blkio_group_stats * stats ) \
{ \
return ( stats - > flags & ( 1 < < BLKG_ # # name ) ) ! = 0 ; \
} \
BLKG_FLAG_FNS ( waiting )
BLKG_FLAG_FNS ( idling )
BLKG_FLAG_FNS ( empty )
# undef BLKG_FLAG_FNS
2009-12-03 20:59:48 +03:00
# else
2012-03-06 01:15:17 +04:00
static inline void blkiocg_update_avg_queue_size_stats ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ) { }
2010-04-02 02:01:41 +04:00
static inline void blkiocg_update_dequeue_stats ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol , unsigned long dequeue ) { }
static inline void blkiocg_update_set_idle_time_stats ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ) { }
static inline void blkiocg_update_idle_time_stats ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ) { }
static inline void blkiocg_set_start_empty_time ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ) { }
2009-12-03 20:59:48 +03:00
# endif
2012-03-06 01:14:54 +04:00
# ifdef CONFIG_BLK_CGROUP
2009-12-03 20:59:42 +03:00
extern struct blkio_cgroup blkio_root_cgroup ;
extern struct blkio_cgroup * cgroup_to_blkio_cgroup ( struct cgroup * cgroup ) ;
2012-03-06 01:15:28 +04:00
extern struct blkio_cgroup * bio_blkio_cgroup ( struct bio * bio ) ;
2012-03-06 01:15:06 +04:00
extern struct blkio_group * blkg_lookup ( struct blkio_cgroup * blkcg ,
2012-03-06 01:15:20 +04:00
struct request_queue * q ) ;
2012-03-06 01:15:06 +04:00
struct blkio_group * blkg_lookup_create ( struct blkio_cgroup * blkcg ,
struct request_queue * q ,
enum blkio_policy_id plid ,
bool for_root ) ;
2010-04-02 02:01:24 +04:00
void blkiocg_update_timeslice_used ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol ,
unsigned long time ,
unsigned long unaccounted_time ) ;
void blkiocg_update_dispatch_stats ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ,
uint64_t bytes , bool direction , bool sync ) ;
2010-04-09 10:31:19 +04:00
void blkiocg_update_completion_stats ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol ,
uint64_t start_time ,
uint64_t io_start_time , bool direction ,
bool sync ) ;
void blkiocg_update_io_merged_stats ( struct blkio_group * blkg ,
struct blkio_policy_type * pol ,
bool direction , bool sync ) ;
2010-04-13 21:59:17 +04:00
void blkiocg_update_io_add_stats ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol ,
struct blkio_group * curr_blkg , bool direction ,
bool sync ) ;
2010-04-13 21:59:17 +04:00
void blkiocg_update_io_remove_stats ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol ,
bool direction , bool sync ) ;
2009-12-03 20:59:42 +03:00
# else
2009-12-03 23:06:43 +03:00
struct cgroup ;
2009-12-03 20:59:42 +03:00
static inline struct blkio_cgroup *
cgroup_to_blkio_cgroup ( struct cgroup * cgroup ) { return NULL ; }
2011-05-16 17:24:08 +04:00
static inline struct blkio_cgroup *
2012-03-06 01:15:28 +04:00
bio_blkio_cgroup ( struct bio * bio ) { return NULL ; }
2009-12-03 20:59:42 +03:00
2012-03-06 01:15:06 +04:00
static inline struct blkio_group * blkg_lookup ( struct blkio_cgroup * blkcg ,
void * key ) { return NULL ; }
2010-04-02 02:01:24 +04:00
static inline void blkiocg_update_timeslice_used ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol , unsigned long time ,
unsigned long unaccounted_time ) { }
2010-04-09 10:31:19 +04:00
static inline void blkiocg_update_dispatch_stats ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol , uint64_t bytes ,
bool direction , bool sync ) { }
2010-04-09 10:31:19 +04:00
static inline void blkiocg_update_completion_stats ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol , uint64_t start_time ,
uint64_t io_start_time , bool direction , bool sync ) { }
2010-04-09 08:14:23 +04:00
static inline void blkiocg_update_io_merged_stats ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol , bool direction ,
bool sync ) { }
2010-04-13 21:59:17 +04:00
static inline void blkiocg_update_io_add_stats ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol ,
struct blkio_group * curr_blkg , bool direction ,
bool sync ) { }
2010-04-13 21:59:17 +04:00
static inline void blkiocg_update_io_remove_stats ( struct blkio_group * blkg ,
2012-03-06 01:15:17 +04:00
struct blkio_policy_type * pol , bool direction ,
bool sync ) { }
2009-12-03 20:59:42 +03:00
# endif
# endif /* _BLK_CGROUP_H */