2009-12-03 20:59:42 +03:00
/*
* Common Block IO controller cgroup interface
*
* Based on ideas and code from CFQ , CFS and BFQ :
* Copyright ( C ) 2003 Jens Axboe < axboe @ kernel . dk >
*
* Copyright ( C ) 2008 Fabio Checconi < fabio @ gandalf . sssup . it >
* Paolo Valente < paolo . valente @ unimore . it >
*
* Copyright ( C ) 2009 Vivek Goyal < vgoyal @ redhat . com >
* Nauman Rafique < nauman @ google . com >
*/
# include <linux/ioprio.h>
2009-12-03 20:59:49 +03:00
# include <linux/seq_file.h>
# include <linux/kdev_t.h>
2009-12-04 18:36:41 +03:00
# include <linux/module.h>
2009-12-07 11:29:39 +03:00
# include <linux/err.h>
2010-04-02 02:01:41 +04:00
# include <linux/blkdev.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
# include <linux/slab.h>
2009-12-03 20:59:42 +03:00
# include "blk-cgroup.h"
2010-04-13 12:05:49 +04:00
# include <linux/genhd.h>
2009-12-04 18:36:42 +03:00
2010-04-09 10:31:19 +04:00
# define MAX_KEY_LEN 100
2009-12-04 18:36:42 +03:00
static DEFINE_SPINLOCK ( blkio_list_lock ) ;
static LIST_HEAD ( blkio_list ) ;
2009-12-03 20:59:47 +03:00
2009-12-03 20:59:42 +03:00
struct blkio_cgroup blkio_root_cgroup = { . weight = 2 * BLKIO_WEIGHT_DEFAULT } ;
2009-12-04 18:36:41 +03:00
EXPORT_SYMBOL_GPL ( blkio_root_cgroup ) ;
2010-03-11 02:22:11 +03:00
static struct cgroup_subsys_state * blkiocg_create ( struct cgroup_subsys * ,
struct cgroup * ) ;
static int blkiocg_can_attach ( struct cgroup_subsys * , struct cgroup * ,
struct task_struct * , bool ) ;
static void blkiocg_attach ( struct cgroup_subsys * , struct cgroup * ,
struct cgroup * , struct task_struct * , bool ) ;
static void blkiocg_destroy ( struct cgroup_subsys * , struct cgroup * ) ;
static int blkiocg_populate ( struct cgroup_subsys * , struct cgroup * ) ;
struct cgroup_subsys blkio_subsys = {
. name = " blkio " ,
. create = blkiocg_create ,
. can_attach = blkiocg_can_attach ,
. attach = blkiocg_attach ,
. destroy = blkiocg_destroy ,
. populate = blkiocg_populate ,
# ifdef CONFIG_BLK_CGROUP
/* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
. subsys_id = blkio_subsys_id ,
# endif
. use_id = 1 ,
. module = THIS_MODULE ,
} ;
EXPORT_SYMBOL_GPL ( blkio_subsys ) ;
2010-04-13 12:05:49 +04:00
static inline void blkio_policy_insert_node ( struct blkio_cgroup * blkcg ,
struct blkio_policy_node * pn )
{
list_add ( & pn - > node , & blkcg - > policy_list ) ;
}
/* Must be called with blkcg->lock held */
static inline void blkio_policy_delete_node ( struct blkio_policy_node * pn )
{
list_del ( & pn - > node ) ;
}
/* Must be called with blkcg->lock held */
static struct blkio_policy_node *
blkio_policy_search_node ( const struct blkio_cgroup * blkcg , dev_t dev )
{
struct blkio_policy_node * pn ;
list_for_each_entry ( pn , & blkcg - > policy_list , node ) {
if ( pn - > dev = = dev )
return pn ;
}
return NULL ;
}
2009-12-03 20:59:42 +03:00
struct blkio_cgroup * cgroup_to_blkio_cgroup ( struct cgroup * cgroup )
{
return container_of ( cgroup_subsys_state ( cgroup , blkio_subsys_id ) ,
struct blkio_cgroup , css ) ;
}
2009-12-04 18:36:41 +03:00
EXPORT_SYMBOL_GPL ( cgroup_to_blkio_cgroup ) ;
2009-12-03 20:59:42 +03:00
2010-04-09 10:31:19 +04:00
void blkio_group_init ( struct blkio_group * blkg )
{
spin_lock_init ( & blkg - > stats_lock ) ;
}
EXPORT_SYMBOL_GPL ( blkio_group_init ) ;
2010-04-02 02:01:41 +04:00
/*
* Add to the appropriate stat variable depending on the request type .
* This should be called with the blkg - > stats_lock held .
*/
2010-04-09 10:31:19 +04:00
static void blkio_add_stat ( uint64_t * stat , uint64_t add , bool direction ,
bool sync )
2010-04-02 02:01:41 +04:00
{
2010-04-09 10:31:19 +04:00
if ( direction )
stat [ BLKIO_STAT_WRITE ] + = add ;
2010-04-02 02:01:41 +04:00
else
2010-04-09 10:31:19 +04:00
stat [ BLKIO_STAT_READ ] + = add ;
if ( sync )
stat [ BLKIO_STAT_SYNC ] + = add ;
2010-04-02 02:01:41 +04:00
else
2010-04-09 10:31:19 +04:00
stat [ BLKIO_STAT_ASYNC ] + = add ;
2010-04-02 02:01:41 +04:00
}
2010-04-09 08:15:10 +04:00
/*
* Decrements the appropriate stat variable if non - zero depending on the
* request type . Panics on value being zero .
* This should be called with the blkg - > stats_lock held .
*/
static void blkio_check_and_dec_stat ( uint64_t * stat , bool direction , bool sync )
{
if ( direction ) {
BUG_ON ( stat [ BLKIO_STAT_WRITE ] = = 0 ) ;
stat [ BLKIO_STAT_WRITE ] - - ;
} else {
BUG_ON ( stat [ BLKIO_STAT_READ ] = = 0 ) ;
stat [ BLKIO_STAT_READ ] - - ;
}
if ( sync ) {
BUG_ON ( stat [ BLKIO_STAT_SYNC ] = = 0 ) ;
stat [ BLKIO_STAT_SYNC ] - - ;
} else {
BUG_ON ( stat [ BLKIO_STAT_ASYNC ] = = 0 ) ;
stat [ BLKIO_STAT_ASYNC ] - - ;
}
}
# ifdef CONFIG_DEBUG_BLK_CGROUP
2010-04-09 08:15:35 +04:00
/* This should be called with the blkg->stats_lock held. */
static void blkio_set_start_group_wait_time ( struct blkio_group * blkg ,
struct blkio_group * curr_blkg )
{
if ( blkio_blkg_waiting ( & blkg - > stats ) )
return ;
if ( blkg = = curr_blkg )
return ;
blkg - > stats . start_group_wait_time = sched_clock ( ) ;
blkio_mark_blkg_waiting ( & blkg - > stats ) ;
}
/* This should be called with the blkg->stats_lock held. */
static void blkio_update_group_wait_time ( struct blkio_group_stats * stats )
{
unsigned long long now ;
if ( ! blkio_blkg_waiting ( stats ) )
return ;
now = sched_clock ( ) ;
if ( time_after64 ( now , stats - > start_group_wait_time ) )
stats - > group_wait_time + = now - stats - > start_group_wait_time ;
blkio_clear_blkg_waiting ( stats ) ;
}
/* This should be called with the blkg->stats_lock held. */
static void blkio_end_empty_time ( struct blkio_group_stats * stats )
{
unsigned long long now ;
if ( ! blkio_blkg_empty ( stats ) )
return ;
now = sched_clock ( ) ;
if ( time_after64 ( now , stats - > start_empty_time ) )
stats - > empty_time + = now - stats - > start_empty_time ;
blkio_clear_blkg_empty ( stats ) ;
}
void blkiocg_update_set_idle_time_stats ( struct blkio_group * blkg )
{
unsigned long flags ;
spin_lock_irqsave ( & blkg - > stats_lock , flags ) ;
BUG_ON ( blkio_blkg_idling ( & blkg - > stats ) ) ;
blkg - > stats . start_idle_time = sched_clock ( ) ;
blkio_mark_blkg_idling ( & blkg - > stats ) ;
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( blkiocg_update_set_idle_time_stats ) ;
void blkiocg_update_idle_time_stats ( struct blkio_group * blkg )
{
unsigned long flags ;
unsigned long long now ;
struct blkio_group_stats * stats ;
spin_lock_irqsave ( & blkg - > stats_lock , flags ) ;
stats = & blkg - > stats ;
if ( blkio_blkg_idling ( stats ) ) {
now = sched_clock ( ) ;
if ( time_after64 ( now , stats - > start_idle_time ) )
stats - > idle_time + = now - stats - > start_idle_time ;
blkio_clear_blkg_idling ( stats ) ;
}
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( blkiocg_update_idle_time_stats ) ;
2010-04-13 21:59:17 +04:00
void blkiocg_update_avg_queue_size_stats ( struct blkio_group * blkg )
2010-04-09 08:15:10 +04:00
{
unsigned long flags ;
struct blkio_group_stats * stats ;
spin_lock_irqsave ( & blkg - > stats_lock , flags ) ;
stats = & blkg - > stats ;
stats - > avg_queue_size_sum + =
stats - > stat_arr [ BLKIO_STAT_QUEUED ] [ BLKIO_STAT_READ ] +
stats - > stat_arr [ BLKIO_STAT_QUEUED ] [ BLKIO_STAT_WRITE ] ;
stats - > avg_queue_size_samples + + ;
2010-04-09 08:15:35 +04:00
blkio_update_group_wait_time ( stats ) ;
2010-04-09 08:15:10 +04:00
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
}
2010-04-13 21:59:17 +04:00
EXPORT_SYMBOL_GPL ( blkiocg_update_avg_queue_size_stats ) ;
void blkiocg_update_dequeue_stats ( struct blkio_group * blkg ,
unsigned long dequeue )
{
blkg - > stats . dequeue + = dequeue ;
}
EXPORT_SYMBOL_GPL ( blkiocg_update_dequeue_stats ) ;
2010-04-09 08:15:35 +04:00
# else
static inline void blkio_set_start_group_wait_time ( struct blkio_group * blkg ,
struct blkio_group * curr_blkg ) { }
static inline void blkio_end_empty_time ( struct blkio_group_stats * stats ) { }
2010-04-09 08:15:10 +04:00
# endif
2010-04-13 21:59:17 +04:00
void blkiocg_update_io_add_stats ( struct blkio_group * blkg ,
2010-04-09 08:15:10 +04:00
struct blkio_group * curr_blkg , bool direction ,
bool sync )
{
unsigned long flags ;
spin_lock_irqsave ( & blkg - > stats_lock , flags ) ;
blkio_add_stat ( blkg - > stats . stat_arr [ BLKIO_STAT_QUEUED ] , 1 , direction ,
sync ) ;
2010-04-09 08:15:35 +04:00
blkio_end_empty_time ( & blkg - > stats ) ;
blkio_set_start_group_wait_time ( blkg , curr_blkg ) ;
2010-04-09 08:15:10 +04:00
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
}
2010-04-13 21:59:17 +04:00
EXPORT_SYMBOL_GPL ( blkiocg_update_io_add_stats ) ;
2010-04-09 08:15:10 +04:00
2010-04-13 21:59:17 +04:00
void blkiocg_update_io_remove_stats ( struct blkio_group * blkg ,
2010-04-09 08:15:10 +04:00
bool direction , bool sync )
{
unsigned long flags ;
spin_lock_irqsave ( & blkg - > stats_lock , flags ) ;
blkio_check_and_dec_stat ( blkg - > stats . stat_arr [ BLKIO_STAT_QUEUED ] ,
direction , sync ) ;
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
}
2010-04-13 21:59:17 +04:00
EXPORT_SYMBOL_GPL ( blkiocg_update_io_remove_stats ) ;
2010-04-09 08:15:10 +04:00
2010-04-02 02:01:24 +04:00
void blkiocg_update_timeslice_used ( struct blkio_group * blkg , unsigned long time )
2009-12-03 20:59:49 +03:00
{
2010-04-02 02:01:24 +04:00
unsigned long flags ;
spin_lock_irqsave ( & blkg - > stats_lock , flags ) ;
blkg - > stats . time + = time ;
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
2009-12-03 20:59:49 +03:00
}
2010-04-02 02:01:24 +04:00
EXPORT_SYMBOL_GPL ( blkiocg_update_timeslice_used ) ;
2009-12-03 20:59:49 +03:00
2010-04-09 08:15:35 +04:00
void blkiocg_set_start_empty_time ( struct blkio_group * blkg , bool ignore )
{
unsigned long flags ;
struct blkio_group_stats * stats ;
spin_lock_irqsave ( & blkg - > stats_lock , flags ) ;
stats = & blkg - > stats ;
if ( stats - > stat_arr [ BLKIO_STAT_QUEUED ] [ BLKIO_STAT_READ ] | |
stats - > stat_arr [ BLKIO_STAT_QUEUED ] [ BLKIO_STAT_WRITE ] ) {
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
return ;
}
/*
* If ignore is set , we do not panic on the empty flag being set
* already . This is to avoid cases where there are superfluous timeslice
* complete events ( for eg . , forced_dispatch in CFQ ) when no IOs are
* served which could result in triggering the empty check incorrectly .
*/
BUG_ON ( ! ignore & & blkio_blkg_empty ( stats ) ) ;
stats - > start_empty_time = sched_clock ( ) ;
blkio_mark_blkg_empty ( stats ) ;
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( blkiocg_set_start_empty_time ) ;
2010-04-09 10:31:19 +04:00
void blkiocg_update_dispatch_stats ( struct blkio_group * blkg ,
uint64_t bytes , bool direction , bool sync )
2010-04-02 02:01:41 +04:00
{
struct blkio_group_stats * stats ;
unsigned long flags ;
spin_lock_irqsave ( & blkg - > stats_lock , flags ) ;
stats = & blkg - > stats ;
2010-04-09 10:31:19 +04:00
stats - > sectors + = bytes > > 9 ;
blkio_add_stat ( stats - > stat_arr [ BLKIO_STAT_SERVICED ] , 1 , direction ,
sync ) ;
blkio_add_stat ( stats - > stat_arr [ BLKIO_STAT_SERVICE_BYTES ] , bytes ,
direction , sync ) ;
2010-04-02 02:01:41 +04:00
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
}
2010-04-09 10:31:19 +04:00
EXPORT_SYMBOL_GPL ( blkiocg_update_dispatch_stats ) ;
2010-04-02 02:01:41 +04:00
2010-04-09 10:31:19 +04:00
void blkiocg_update_completion_stats ( struct blkio_group * blkg ,
uint64_t start_time , uint64_t io_start_time , bool direction , bool sync )
2010-04-02 02:01:41 +04:00
{
struct blkio_group_stats * stats ;
unsigned long flags ;
unsigned long long now = sched_clock ( ) ;
spin_lock_irqsave ( & blkg - > stats_lock , flags ) ;
stats = & blkg - > stats ;
2010-04-09 10:31:19 +04:00
if ( time_after64 ( now , io_start_time ) )
blkio_add_stat ( stats - > stat_arr [ BLKIO_STAT_SERVICE_TIME ] ,
now - io_start_time , direction , sync ) ;
if ( time_after64 ( io_start_time , start_time ) )
blkio_add_stat ( stats - > stat_arr [ BLKIO_STAT_WAIT_TIME ] ,
io_start_time - start_time , direction , sync ) ;
2010-04-02 02:01:41 +04:00
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
}
2010-04-09 10:31:19 +04:00
EXPORT_SYMBOL_GPL ( blkiocg_update_completion_stats ) ;
2010-04-02 02:01:41 +04:00
2010-04-09 08:14:23 +04:00
void blkiocg_update_io_merged_stats ( struct blkio_group * blkg , bool direction ,
bool sync )
{
unsigned long flags ;
spin_lock_irqsave ( & blkg - > stats_lock , flags ) ;
blkio_add_stat ( blkg - > stats . stat_arr [ BLKIO_STAT_MERGED ] , 1 , direction ,
sync ) ;
spin_unlock_irqrestore ( & blkg - > stats_lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( blkiocg_update_io_merged_stats ) ;
2009-12-03 20:59:42 +03:00
void blkiocg_add_blkio_group ( struct blkio_cgroup * blkcg ,
2009-12-03 20:59:49 +03:00
struct blkio_group * blkg , void * key , dev_t dev )
2009-12-03 20:59:42 +03:00
{
unsigned long flags ;
spin_lock_irqsave ( & blkcg - > lock , flags ) ;
rcu_assign_pointer ( blkg - > key , key ) ;
2009-12-03 20:59:47 +03:00
blkg - > blkcg_id = css_id ( & blkcg - > css ) ;
2009-12-03 20:59:42 +03:00
hlist_add_head_rcu ( & blkg - > blkcg_node , & blkcg - > blkg_list ) ;
spin_unlock_irqrestore ( & blkcg - > lock , flags ) ;
2009-12-03 20:59:48 +03:00
# ifdef CONFIG_DEBUG_BLK_CGROUP
/* Need to take css reference ? */
cgroup_path ( blkcg - > css . cgroup , blkg - > path , sizeof ( blkg - > path ) ) ;
# endif
2009-12-03 20:59:49 +03:00
blkg - > dev = dev ;
2009-12-03 20:59:42 +03:00
}
2009-12-04 18:36:41 +03:00
EXPORT_SYMBOL_GPL ( blkiocg_add_blkio_group ) ;
2009-12-03 20:59:42 +03:00
2009-12-03 20:59:47 +03:00
static void __blkiocg_del_blkio_group ( struct blkio_group * blkg )
{
hlist_del_init_rcu ( & blkg - > blkcg_node ) ;
blkg - > blkcg_id = 0 ;
}
/*
* returns 0 if blkio_group was still on cgroup list . Otherwise returns 1
* indicating that blk_group was unhashed by the time we got to it .
*/
2009-12-03 20:59:42 +03:00
int blkiocg_del_blkio_group ( struct blkio_group * blkg )
{
2009-12-03 20:59:47 +03:00
struct blkio_cgroup * blkcg ;
unsigned long flags ;
struct cgroup_subsys_state * css ;
int ret = 1 ;
rcu_read_lock ( ) ;
css = css_lookup ( & blkio_subsys , blkg - > blkcg_id ) ;
if ( ! css )
goto out ;
blkcg = container_of ( css , struct blkio_cgroup , css ) ;
spin_lock_irqsave ( & blkcg - > lock , flags ) ;
if ( ! hlist_unhashed ( & blkg - > blkcg_node ) ) {
__blkiocg_del_blkio_group ( blkg ) ;
ret = 0 ;
}
spin_unlock_irqrestore ( & blkcg - > lock , flags ) ;
out :
rcu_read_unlock ( ) ;
return ret ;
2009-12-03 20:59:42 +03:00
}
2009-12-04 18:36:41 +03:00
EXPORT_SYMBOL_GPL ( blkiocg_del_blkio_group ) ;
2009-12-03 20:59:42 +03:00
/* called under rcu_read_lock(). */
struct blkio_group * blkiocg_lookup_group ( struct blkio_cgroup * blkcg , void * key )
{
struct blkio_group * blkg ;
struct hlist_node * n ;
void * __key ;
hlist_for_each_entry_rcu ( blkg , n , & blkcg - > blkg_list , blkcg_node ) {
__key = blkg - > key ;
if ( __key = = key )
return blkg ;
}
return NULL ;
}
2009-12-04 18:36:41 +03:00
EXPORT_SYMBOL_GPL ( blkiocg_lookup_group ) ;
2009-12-03 20:59:42 +03:00
# define SHOW_FUNCTION(__VAR) \
static u64 blkiocg_ # # __VAR # # _read ( struct cgroup * cgroup , \
struct cftype * cftype ) \
{ \
struct blkio_cgroup * blkcg ; \
\
blkcg = cgroup_to_blkio_cgroup ( cgroup ) ; \
return ( u64 ) blkcg - > __VAR ; \
}
SHOW_FUNCTION ( weight ) ;
# undef SHOW_FUNCTION
static int
blkiocg_weight_write ( struct cgroup * cgroup , struct cftype * cftype , u64 val )
{
struct blkio_cgroup * blkcg ;
2009-12-03 20:59:52 +03:00
struct blkio_group * blkg ;
struct hlist_node * n ;
2009-12-04 18:36:42 +03:00
struct blkio_policy_type * blkiop ;
2010-04-13 12:05:49 +04:00
struct blkio_policy_node * pn ;
2009-12-03 20:59:42 +03:00
if ( val < BLKIO_WEIGHT_MIN | | val > BLKIO_WEIGHT_MAX )
return - EINVAL ;
blkcg = cgroup_to_blkio_cgroup ( cgroup ) ;
2010-02-01 11:58:54 +03:00
spin_lock ( & blkio_list_lock ) ;
2009-12-03 20:59:52 +03:00
spin_lock_irq ( & blkcg - > lock ) ;
2009-12-03 20:59:42 +03:00
blkcg - > weight = ( unsigned int ) val ;
2010-04-13 12:05:49 +04:00
2009-12-04 18:36:42 +03:00
hlist_for_each_entry ( blkg , n , & blkcg - > blkg_list , blkcg_node ) {
2010-04-13 12:05:49 +04:00
pn = blkio_policy_search_node ( blkcg , blkg - > dev ) ;
if ( pn )
continue ;
2009-12-04 18:36:42 +03:00
list_for_each_entry ( blkiop , & blkio_list , list )
blkiop - > ops . blkio_update_group_weight_fn ( blkg ,
blkcg - > weight ) ;
}
2009-12-03 20:59:52 +03:00
spin_unlock_irq ( & blkcg - > lock ) ;
2010-02-01 11:58:54 +03:00
spin_unlock ( & blkio_list_lock ) ;
2009-12-03 20:59:42 +03:00
return 0 ;
}
2010-04-02 02:01:24 +04:00
static int
2010-04-09 10:31:19 +04:00
blkiocg_reset_stats ( struct cgroup * cgroup , struct cftype * cftype , u64 val )
2010-04-02 02:01:24 +04:00
{
struct blkio_cgroup * blkcg ;
struct blkio_group * blkg ;
2010-04-09 08:15:35 +04:00
struct blkio_group_stats * stats ;
2010-04-02 02:01:24 +04:00
struct hlist_node * n ;
2010-04-09 08:15:10 +04:00
uint64_t queued [ BLKIO_STAT_TOTAL ] ;
int i ;
2010-04-09 08:15:35 +04:00
# ifdef CONFIG_DEBUG_BLK_CGROUP
bool idling , waiting , empty ;
unsigned long long now = sched_clock ( ) ;
# endif
2010-04-02 02:01:24 +04:00
blkcg = cgroup_to_blkio_cgroup ( cgroup ) ;
spin_lock_irq ( & blkcg - > lock ) ;
hlist_for_each_entry ( blkg , n , & blkcg - > blkg_list , blkcg_node ) {
spin_lock ( & blkg - > stats_lock ) ;
2010-04-09 08:15:35 +04:00
stats = & blkg - > stats ;
# ifdef CONFIG_DEBUG_BLK_CGROUP
idling = blkio_blkg_idling ( stats ) ;
waiting = blkio_blkg_waiting ( stats ) ;
empty = blkio_blkg_empty ( stats ) ;
# endif
2010-04-09 08:15:10 +04:00
for ( i = 0 ; i < BLKIO_STAT_TOTAL ; i + + )
2010-04-09 08:15:35 +04:00
queued [ i ] = stats - > stat_arr [ BLKIO_STAT_QUEUED ] [ i ] ;
memset ( stats , 0 , sizeof ( struct blkio_group_stats ) ) ;
2010-04-09 08:15:10 +04:00
for ( i = 0 ; i < BLKIO_STAT_TOTAL ; i + + )
2010-04-09 08:15:35 +04:00
stats - > stat_arr [ BLKIO_STAT_QUEUED ] [ i ] = queued [ i ] ;
# ifdef CONFIG_DEBUG_BLK_CGROUP
if ( idling ) {
blkio_mark_blkg_idling ( stats ) ;
stats - > start_idle_time = now ;
}
if ( waiting ) {
blkio_mark_blkg_waiting ( stats ) ;
stats - > start_group_wait_time = now ;
}
if ( empty ) {
blkio_mark_blkg_empty ( stats ) ;
stats - > start_empty_time = now ;
}
# endif
2010-04-02 02:01:24 +04:00
spin_unlock ( & blkg - > stats_lock ) ;
}
spin_unlock_irq ( & blkcg - > lock ) ;
return 0 ;
}
2010-04-09 10:31:19 +04:00
static void blkio_get_key_name ( enum stat_sub_type type , dev_t dev , char * str ,
int chars_left , bool diskname_only )
2010-04-02 02:01:24 +04:00
{
2010-04-09 10:31:19 +04:00
snprintf ( str , chars_left , " %d:%d " , MAJOR ( dev ) , MINOR ( dev ) ) ;
2010-04-02 02:01:24 +04:00
chars_left - = strlen ( str ) ;
if ( chars_left < = 0 ) {
printk ( KERN_WARNING
" Possibly incorrect cgroup stat display format " ) ;
return ;
}
2010-04-09 10:31:19 +04:00
if ( diskname_only )
return ;
2010-04-02 02:01:24 +04:00
switch ( type ) {
2010-04-09 10:31:19 +04:00
case BLKIO_STAT_READ :
2010-04-02 02:01:24 +04:00
strlcat ( str , " Read " , chars_left ) ;
break ;
2010-04-09 10:31:19 +04:00
case BLKIO_STAT_WRITE :
2010-04-02 02:01:24 +04:00
strlcat ( str , " Write " , chars_left ) ;
break ;
2010-04-09 10:31:19 +04:00
case BLKIO_STAT_SYNC :
2010-04-02 02:01:24 +04:00
strlcat ( str , " Sync " , chars_left ) ;
break ;
2010-04-09 10:31:19 +04:00
case BLKIO_STAT_ASYNC :
2010-04-02 02:01:24 +04:00
strlcat ( str , " Async " , chars_left ) ;
break ;
2010-04-09 10:31:19 +04:00
case BLKIO_STAT_TOTAL :
2010-04-02 02:01:24 +04:00
strlcat ( str , " Total " , chars_left ) ;
break ;
default :
strlcat ( str , " Invalid " , chars_left ) ;
}
}
2010-04-09 10:31:19 +04:00
static uint64_t blkio_fill_stat ( char * str , int chars_left , uint64_t val ,
struct cgroup_map_cb * cb , dev_t dev )
{
blkio_get_key_name ( 0 , dev , str , chars_left , true ) ;
cb - > fill ( cb , str , val ) ;
return val ;
}
2010-04-02 02:01:24 +04:00
2010-04-09 10:31:19 +04:00
/* This should be called with blkg->stats_lock held */
static uint64_t blkio_get_stat ( struct blkio_group * blkg ,
struct cgroup_map_cb * cb , dev_t dev , enum stat_type type )
2010-04-02 02:01:24 +04:00
{
uint64_t disk_total ;
char key_str [ MAX_KEY_LEN ] ;
2010-04-09 10:31:19 +04:00
enum stat_sub_type sub_type ;
if ( type = = BLKIO_STAT_TIME )
return blkio_fill_stat ( key_str , MAX_KEY_LEN - 1 ,
blkg - > stats . time , cb , dev ) ;
if ( type = = BLKIO_STAT_SECTORS )
return blkio_fill_stat ( key_str , MAX_KEY_LEN - 1 ,
blkg - > stats . sectors , cb , dev ) ;
# ifdef CONFIG_DEBUG_BLK_CGROUP
2010-04-09 08:15:10 +04:00
if ( type = = BLKIO_STAT_AVG_QUEUE_SIZE ) {
uint64_t sum = blkg - > stats . avg_queue_size_sum ;
uint64_t samples = blkg - > stats . avg_queue_size_samples ;
if ( samples )
do_div ( sum , samples ) ;
else
sum = 0 ;
return blkio_fill_stat ( key_str , MAX_KEY_LEN - 1 , sum , cb , dev ) ;
}
2010-04-09 08:15:35 +04:00
if ( type = = BLKIO_STAT_GROUP_WAIT_TIME )
return blkio_fill_stat ( key_str , MAX_KEY_LEN - 1 ,
blkg - > stats . group_wait_time , cb , dev ) ;
if ( type = = BLKIO_STAT_IDLE_TIME )
return blkio_fill_stat ( key_str , MAX_KEY_LEN - 1 ,
blkg - > stats . idle_time , cb , dev ) ;
if ( type = = BLKIO_STAT_EMPTY_TIME )
return blkio_fill_stat ( key_str , MAX_KEY_LEN - 1 ,
blkg - > stats . empty_time , cb , dev ) ;
2010-04-09 10:31:19 +04:00
if ( type = = BLKIO_STAT_DEQUEUE )
return blkio_fill_stat ( key_str , MAX_KEY_LEN - 1 ,
blkg - > stats . dequeue , cb , dev ) ;
# endif
2010-04-02 02:01:24 +04:00
2010-04-09 10:31:19 +04:00
for ( sub_type = BLKIO_STAT_READ ; sub_type < BLKIO_STAT_TOTAL ;
sub_type + + ) {
blkio_get_key_name ( sub_type , dev , key_str , MAX_KEY_LEN , false ) ;
cb - > fill ( cb , key_str , blkg - > stats . stat_arr [ type ] [ sub_type ] ) ;
2010-04-02 02:01:24 +04:00
}
2010-04-09 10:31:19 +04:00
disk_total = blkg - > stats . stat_arr [ type ] [ BLKIO_STAT_READ ] +
blkg - > stats . stat_arr [ type ] [ BLKIO_STAT_WRITE ] ;
blkio_get_key_name ( BLKIO_STAT_TOTAL , dev , key_str , MAX_KEY_LEN , false ) ;
2010-04-02 02:01:24 +04:00
cb - > fill ( cb , key_str , disk_total ) ;
return disk_total ;
}
2010-04-09 10:31:19 +04:00
# define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
2009-12-03 20:59:49 +03:00
static int blkiocg_ # # __VAR # # _read ( struct cgroup * cgroup , \
2010-04-02 02:01:24 +04:00
struct cftype * cftype , struct cgroup_map_cb * cb ) \
2009-12-03 20:59:49 +03:00
{ \
struct blkio_cgroup * blkcg ; \
struct blkio_group * blkg ; \
struct hlist_node * n ; \
2010-04-02 02:01:24 +04:00
uint64_t cgroup_total = 0 ; \
2009-12-03 20:59:49 +03:00
\
if ( ! cgroup_lock_live_group ( cgroup ) ) \
return - ENODEV ; \
\
blkcg = cgroup_to_blkio_cgroup ( cgroup ) ; \
rcu_read_lock ( ) ; \
hlist_for_each_entry_rcu ( blkg , n , & blkcg - > blkg_list , blkcg_node ) { \
2010-04-02 02:01:24 +04:00
if ( blkg - > dev ) { \
spin_lock_irq ( & blkg - > stats_lock ) ; \
2010-04-09 10:31:19 +04:00
cgroup_total + = blkio_get_stat ( blkg , cb , \
blkg - > dev , type ) ; \
2010-04-02 02:01:24 +04:00
spin_unlock_irq ( & blkg - > stats_lock ) ; \
} \
2009-12-03 20:59:49 +03:00
} \
2010-04-02 02:01:24 +04:00
if ( show_total ) \
cb - > fill ( cb , " Total " , cgroup_total ) ; \
2009-12-03 20:59:49 +03:00
rcu_read_unlock ( ) ; \
cgroup_unlock ( ) ; \
return 0 ; \
}
2010-04-09 10:31:19 +04:00
SHOW_FUNCTION_PER_GROUP ( time , BLKIO_STAT_TIME , 0 ) ;
SHOW_FUNCTION_PER_GROUP ( sectors , BLKIO_STAT_SECTORS , 0 ) ;
SHOW_FUNCTION_PER_GROUP ( io_service_bytes , BLKIO_STAT_SERVICE_BYTES , 1 ) ;
SHOW_FUNCTION_PER_GROUP ( io_serviced , BLKIO_STAT_SERVICED , 1 ) ;
SHOW_FUNCTION_PER_GROUP ( io_service_time , BLKIO_STAT_SERVICE_TIME , 1 ) ;
SHOW_FUNCTION_PER_GROUP ( io_wait_time , BLKIO_STAT_WAIT_TIME , 1 ) ;
2010-04-09 08:14:23 +04:00
SHOW_FUNCTION_PER_GROUP ( io_merged , BLKIO_STAT_MERGED , 1 ) ;
2010-04-09 08:15:10 +04:00
SHOW_FUNCTION_PER_GROUP ( io_queued , BLKIO_STAT_QUEUED , 1 ) ;
2009-12-03 20:59:49 +03:00
# ifdef CONFIG_DEBUG_BLK_CGROUP
2010-04-09 10:31:19 +04:00
SHOW_FUNCTION_PER_GROUP ( dequeue , BLKIO_STAT_DEQUEUE , 0 ) ;
2010-04-09 08:15:10 +04:00
SHOW_FUNCTION_PER_GROUP ( avg_queue_size , BLKIO_STAT_AVG_QUEUE_SIZE , 0 ) ;
2010-04-09 08:15:35 +04:00
SHOW_FUNCTION_PER_GROUP ( group_wait_time , BLKIO_STAT_GROUP_WAIT_TIME , 0 ) ;
SHOW_FUNCTION_PER_GROUP ( idle_time , BLKIO_STAT_IDLE_TIME , 0 ) ;
SHOW_FUNCTION_PER_GROUP ( empty_time , BLKIO_STAT_EMPTY_TIME , 0 ) ;
2009-12-03 20:59:49 +03:00
# endif
# undef SHOW_FUNCTION_PER_GROUP
2010-04-13 12:05:49 +04:00
static int blkio_check_dev_num ( dev_t dev )
{
int part = 0 ;
struct gendisk * disk ;
disk = get_gendisk ( dev , & part ) ;
if ( ! disk | | part )
return - ENODEV ;
return 0 ;
}
static int blkio_policy_parse_and_set ( char * buf ,
struct blkio_policy_node * newpn )
{
char * s [ 4 ] , * p , * major_s = NULL , * minor_s = NULL ;
int ret ;
unsigned long major , minor , temp ;
int i = 0 ;
dev_t dev ;
memset ( s , 0 , sizeof ( s ) ) ;
while ( ( p = strsep ( & buf , " " ) ) ! = NULL ) {
if ( ! * p )
continue ;
s [ i + + ] = p ;
/* Prevent from inputing too many things */
if ( i = = 3 )
break ;
}
if ( i ! = 2 )
return - EINVAL ;
p = strsep ( & s [ 0 ] , " : " ) ;
if ( p ! = NULL )
major_s = p ;
else
return - EINVAL ;
minor_s = s [ 0 ] ;
if ( ! minor_s )
return - EINVAL ;
ret = strict_strtoul ( major_s , 10 , & major ) ;
if ( ret )
return - EINVAL ;
ret = strict_strtoul ( minor_s , 10 , & minor ) ;
if ( ret )
return - EINVAL ;
dev = MKDEV ( major , minor ) ;
ret = blkio_check_dev_num ( dev ) ;
if ( ret )
return ret ;
newpn - > dev = dev ;
if ( s [ 1 ] = = NULL )
return - EINVAL ;
ret = strict_strtoul ( s [ 1 ] , 10 , & temp ) ;
if ( ret | | ( temp < BLKIO_WEIGHT_MIN & & temp > 0 ) | |
temp > BLKIO_WEIGHT_MAX )
return - EINVAL ;
newpn - > weight = temp ;
return 0 ;
}
unsigned int blkcg_get_weight ( struct blkio_cgroup * blkcg ,
dev_t dev )
{
struct blkio_policy_node * pn ;
pn = blkio_policy_search_node ( blkcg , dev ) ;
if ( pn )
return pn - > weight ;
else
return blkcg - > weight ;
}
EXPORT_SYMBOL_GPL ( blkcg_get_weight ) ;
static int blkiocg_weight_device_write ( struct cgroup * cgrp , struct cftype * cft ,
const char * buffer )
{
int ret = 0 ;
char * buf ;
struct blkio_policy_node * newpn , * pn ;
struct blkio_cgroup * blkcg ;
struct blkio_group * blkg ;
int keep_newpn = 0 ;
struct hlist_node * n ;
struct blkio_policy_type * blkiop ;
buf = kstrdup ( buffer , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
newpn = kzalloc ( sizeof ( * newpn ) , GFP_KERNEL ) ;
if ( ! newpn ) {
ret = - ENOMEM ;
goto free_buf ;
}
ret = blkio_policy_parse_and_set ( buf , newpn ) ;
if ( ret )
goto free_newpn ;
blkcg = cgroup_to_blkio_cgroup ( cgrp ) ;
spin_lock_irq ( & blkcg - > lock ) ;
pn = blkio_policy_search_node ( blkcg , newpn - > dev ) ;
if ( ! pn ) {
if ( newpn - > weight ! = 0 ) {
blkio_policy_insert_node ( blkcg , newpn ) ;
keep_newpn = 1 ;
}
spin_unlock_irq ( & blkcg - > lock ) ;
goto update_io_group ;
}
if ( newpn - > weight = = 0 ) {
/* weight == 0 means deleteing a specific weight */
blkio_policy_delete_node ( pn ) ;
spin_unlock_irq ( & blkcg - > lock ) ;
goto update_io_group ;
}
spin_unlock_irq ( & blkcg - > lock ) ;
pn - > weight = newpn - > weight ;
update_io_group :
/* update weight for each cfqg */
spin_lock ( & blkio_list_lock ) ;
spin_lock_irq ( & blkcg - > lock ) ;
hlist_for_each_entry ( blkg , n , & blkcg - > blkg_list , blkcg_node ) {
if ( newpn - > dev = = blkg - > dev ) {
list_for_each_entry ( blkiop , & blkio_list , list )
blkiop - > ops . blkio_update_group_weight_fn ( blkg ,
newpn - > weight ?
newpn - > weight :
blkcg - > weight ) ;
}
}
spin_unlock_irq ( & blkcg - > lock ) ;
spin_unlock ( & blkio_list_lock ) ;
free_newpn :
if ( ! keep_newpn )
kfree ( newpn ) ;
free_buf :
kfree ( buf ) ;
return ret ;
}
static int blkiocg_weight_device_read ( struct cgroup * cgrp , struct cftype * cft ,
struct seq_file * m )
{
struct blkio_cgroup * blkcg ;
struct blkio_policy_node * pn ;
seq_printf ( m , " dev \t weight \n " ) ;
blkcg = cgroup_to_blkio_cgroup ( cgrp ) ;
if ( list_empty ( & blkcg - > policy_list ) )
goto out ;
spin_lock_irq ( & blkcg - > lock ) ;
list_for_each_entry ( pn , & blkcg - > policy_list , node ) {
seq_printf ( m , " %u:%u \t %u \n " , MAJOR ( pn - > dev ) ,
MINOR ( pn - > dev ) , pn - > weight ) ;
}
spin_unlock_irq ( & blkcg - > lock ) ;
out :
return 0 ;
}
2009-12-03 20:59:42 +03:00
struct cftype blkio_files [ ] = {
2010-04-13 12:05:49 +04:00
{
. name = " weight_device " ,
. read_seq_string = blkiocg_weight_device_read ,
. write_string = blkiocg_weight_device_write ,
. max_write_len = 256 ,
} ,
2009-12-03 20:59:42 +03:00
{
. name = " weight " ,
. read_u64 = blkiocg_weight_read ,
. write_u64 = blkiocg_weight_write ,
} ,
2009-12-03 20:59:49 +03:00
{
. name = " time " ,
2010-04-02 02:01:24 +04:00
. read_map = blkiocg_time_read ,
2009-12-03 20:59:49 +03:00
} ,
{
. name = " sectors " ,
2010-04-02 02:01:24 +04:00
. read_map = blkiocg_sectors_read ,
} ,
{
. name = " io_service_bytes " ,
. read_map = blkiocg_io_service_bytes_read ,
} ,
{
. name = " io_serviced " ,
. read_map = blkiocg_io_serviced_read ,
} ,
{
. name = " io_service_time " ,
. read_map = blkiocg_io_service_time_read ,
} ,
{
. name = " io_wait_time " ,
. read_map = blkiocg_io_wait_time_read ,
2010-04-09 10:31:19 +04:00
} ,
2010-04-09 08:14:23 +04:00
{
. name = " io_merged " ,
. read_map = blkiocg_io_merged_read ,
} ,
2010-04-09 08:15:10 +04:00
{
. name = " io_queued " ,
. read_map = blkiocg_io_queued_read ,
} ,
2010-04-09 10:31:19 +04:00
{
. name = " reset_stats " ,
. write_u64 = blkiocg_reset_stats ,
2009-12-03 20:59:49 +03:00
} ,
# ifdef CONFIG_DEBUG_BLK_CGROUP
2010-04-09 08:15:10 +04:00
{
. name = " avg_queue_size " ,
. read_map = blkiocg_avg_queue_size_read ,
} ,
2010-04-09 08:15:35 +04:00
{
. name = " group_wait_time " ,
. read_map = blkiocg_group_wait_time_read ,
} ,
{
. name = " idle_time " ,
. read_map = blkiocg_idle_time_read ,
} ,
{
. name = " empty_time " ,
. read_map = blkiocg_empty_time_read ,
} ,
2010-04-09 08:15:10 +04:00
{
2009-12-03 20:59:49 +03:00
. name = " dequeue " ,
2010-04-02 02:01:24 +04:00
. read_map = blkiocg_dequeue_read ,
2010-04-09 08:15:10 +04:00
} ,
2009-12-03 20:59:49 +03:00
# endif
2009-12-03 20:59:42 +03:00
} ;
static int blkiocg_populate ( struct cgroup_subsys * subsys , struct cgroup * cgroup )
{
return cgroup_add_files ( cgroup , subsys , blkio_files ,
ARRAY_SIZE ( blkio_files ) ) ;
}
static void blkiocg_destroy ( struct cgroup_subsys * subsys , struct cgroup * cgroup )
{
struct blkio_cgroup * blkcg = cgroup_to_blkio_cgroup ( cgroup ) ;
2009-12-03 20:59:47 +03:00
unsigned long flags ;
struct blkio_group * blkg ;
void * key ;
2009-12-04 18:36:42 +03:00
struct blkio_policy_type * blkiop ;
2010-04-13 12:05:49 +04:00
struct blkio_policy_node * pn , * pntmp ;
2009-12-03 20:59:47 +03:00
rcu_read_lock ( ) ;
remove_entry :
spin_lock_irqsave ( & blkcg - > lock , flags ) ;
if ( hlist_empty ( & blkcg - > blkg_list ) ) {
spin_unlock_irqrestore ( & blkcg - > lock , flags ) ;
goto done ;
}
blkg = hlist_entry ( blkcg - > blkg_list . first , struct blkio_group ,
blkcg_node ) ;
key = rcu_dereference ( blkg - > key ) ;
__blkiocg_del_blkio_group ( blkg ) ;
2009-12-03 20:59:42 +03:00
2009-12-03 20:59:47 +03:00
spin_unlock_irqrestore ( & blkcg - > lock , flags ) ;
/*
* This blkio_group is being unlinked as associated cgroup is going
* away . Let all the IO controlling policies know about this event .
*
* Currently this is static call to one io controlling policy . Once
* we have more policies in place , we need some dynamic registration
* of callback function .
*/
2009-12-04 18:36:42 +03:00
spin_lock ( & blkio_list_lock ) ;
list_for_each_entry ( blkiop , & blkio_list , list )
blkiop - > ops . blkio_unlink_group_fn ( key , blkg ) ;
spin_unlock ( & blkio_list_lock ) ;
2009-12-03 20:59:47 +03:00
goto remove_entry ;
2010-04-13 12:05:49 +04:00
2009-12-03 20:59:47 +03:00
done :
2010-04-13 12:05:49 +04:00
list_for_each_entry_safe ( pn , pntmp , & blkcg - > policy_list , node ) {
blkio_policy_delete_node ( pn ) ;
kfree ( pn ) ;
}
2009-12-03 20:59:42 +03:00
free_css_id ( & blkio_subsys , & blkcg - > css ) ;
2009-12-03 20:59:47 +03:00
rcu_read_unlock ( ) ;
2010-03-11 02:22:11 +03:00
if ( blkcg ! = & blkio_root_cgroup )
kfree ( blkcg ) ;
2009-12-03 20:59:42 +03:00
}
static struct cgroup_subsys_state *
blkiocg_create ( struct cgroup_subsys * subsys , struct cgroup * cgroup )
{
struct blkio_cgroup * blkcg , * parent_blkcg ;
if ( ! cgroup - > parent ) {
blkcg = & blkio_root_cgroup ;
goto done ;
}
/* Currently we do not support hierarchy deeper than two level (0,1) */
parent_blkcg = cgroup_to_blkio_cgroup ( cgroup - > parent ) ;
if ( css_depth ( & parent_blkcg - > css ) > 0 )
return ERR_PTR ( - EINVAL ) ;
blkcg = kzalloc ( sizeof ( * blkcg ) , GFP_KERNEL ) ;
if ( ! blkcg )
return ERR_PTR ( - ENOMEM ) ;
blkcg - > weight = BLKIO_WEIGHT_DEFAULT ;
done :
spin_lock_init ( & blkcg - > lock ) ;
INIT_HLIST_HEAD ( & blkcg - > blkg_list ) ;
2010-04-13 12:05:49 +04:00
INIT_LIST_HEAD ( & blkcg - > policy_list ) ;
2009-12-03 20:59:42 +03:00
return & blkcg - > css ;
}
/*
* We cannot support shared io contexts , as we have no mean to support
* two tasks with the same ioc in two different groups without major rework
* of the main cic data structures . For now we allow a task to change
* its cgroup only if it ' s the only owner of its ioc .
*/
static int blkiocg_can_attach ( struct cgroup_subsys * subsys ,
struct cgroup * cgroup , struct task_struct * tsk ,
bool threadgroup )
{
struct io_context * ioc ;
int ret = 0 ;
/* task_lock() is needed to avoid races with exit_io_context() */
task_lock ( tsk ) ;
ioc = tsk - > io_context ;
if ( ioc & & atomic_read ( & ioc - > nr_tasks ) > 1 )
ret = - EINVAL ;
task_unlock ( tsk ) ;
return ret ;
}
static void blkiocg_attach ( struct cgroup_subsys * subsys , struct cgroup * cgroup ,
struct cgroup * prev , struct task_struct * tsk ,
bool threadgroup )
{
struct io_context * ioc ;
task_lock ( tsk ) ;
ioc = tsk - > io_context ;
if ( ioc )
ioc - > cgroup_changed = 1 ;
task_unlock ( tsk ) ;
}
2009-12-04 18:36:42 +03:00
void blkio_policy_register ( struct blkio_policy_type * blkiop )
{
spin_lock ( & blkio_list_lock ) ;
list_add_tail ( & blkiop - > list , & blkio_list ) ;
spin_unlock ( & blkio_list_lock ) ;
}
EXPORT_SYMBOL_GPL ( blkio_policy_register ) ;
void blkio_policy_unregister ( struct blkio_policy_type * blkiop )
{
spin_lock ( & blkio_list_lock ) ;
list_del_init ( & blkiop - > list ) ;
spin_unlock ( & blkio_list_lock ) ;
}
EXPORT_SYMBOL_GPL ( blkio_policy_unregister ) ;
2010-03-11 02:22:11 +03:00
static int __init init_cgroup_blkio ( void )
{
return cgroup_load_subsys ( & blkio_subsys ) ;
}
static void __exit exit_cgroup_blkio ( void )
{
cgroup_unload_subsys ( & blkio_subsys ) ;
}
module_init ( init_cgroup_blkio ) ;
module_exit ( exit_cgroup_blkio ) ;
MODULE_LICENSE ( " GPL " ) ;