2012-08-01 03:42:12 +04:00
/*
*
* Copyright IBM Corporation , 2012
* Author Aneesh Kumar K . V < aneesh . kumar @ linux . vnet . ibm . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation .
*
* This program is distributed in the hope that it would be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE .
*
*/
# include <linux/cgroup.h>
2014-12-11 02:42:34 +03:00
# include <linux/page_counter.h>
2012-08-01 03:42:12 +04:00
# include <linux/slab.h>
# include <linux/hugetlb.h>
# include <linux/hugetlb_cgroup.h>
struct hugetlb_cgroup {
struct cgroup_subsys_state css ;
/*
* the counter to account for hugepages from hugetlb .
*/
2014-12-11 02:42:34 +03:00
struct page_counter hugepage [ HUGE_MAX_HSTATE ] ;
2012-08-01 03:42:12 +04:00
} ;
2012-08-01 03:42:24 +04:00
# define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
# define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
# define MEMFILE_ATTR(val) ((val) & 0xffff)
2012-08-01 03:42:12 +04:00
static struct hugetlb_cgroup * root_h_cgroup __read_mostly ;
static inline
struct hugetlb_cgroup * hugetlb_cgroup_from_css ( struct cgroup_subsys_state * s )
{
2013-08-09 04:11:23 +04:00
return s ? container_of ( s , struct hugetlb_cgroup , css ) : NULL ;
2012-08-01 03:42:12 +04:00
}
static inline
struct hugetlb_cgroup * hugetlb_cgroup_from_task ( struct task_struct * task )
{
2014-02-08 19:36:58 +04:00
return hugetlb_cgroup_from_css ( task_css ( task , hugetlb_cgrp_id ) ) ;
2012-08-01 03:42:12 +04:00
}
static inline bool hugetlb_cgroup_is_root ( struct hugetlb_cgroup * h_cg )
{
return ( h_cg = = root_h_cgroup ) ;
}
2013-08-09 04:11:22 +04:00
static inline struct hugetlb_cgroup *
parent_hugetlb_cgroup ( struct hugetlb_cgroup * h_cg )
2012-08-01 03:42:12 +04:00
{
2014-05-16 21:22:48 +04:00
return hugetlb_cgroup_from_css ( h_cg - > css . parent ) ;
2012-08-01 03:42:12 +04:00
}
2013-08-09 04:11:22 +04:00
static inline bool hugetlb_cgroup_have_usage ( struct hugetlb_cgroup * h_cg )
2012-08-01 03:42:12 +04:00
{
int idx ;
for ( idx = 0 ; idx < hugetlb_max_hstate ; idx + + ) {
2014-12-11 02:42:34 +03:00
if ( page_counter_read ( & h_cg - > hugepage [ idx ] ) )
2012-08-01 03:42:12 +04:00
return true ;
}
return false ;
}
2016-05-21 02:57:50 +03:00
static void hugetlb_cgroup_init ( struct hugetlb_cgroup * h_cgroup ,
struct hugetlb_cgroup * parent_h_cgroup )
{
int idx ;
for ( idx = 0 ; idx < HUGE_MAX_HSTATE ; idx + + ) {
struct page_counter * counter = & h_cgroup - > hugepage [ idx ] ;
struct page_counter * parent = NULL ;
unsigned long limit ;
int ret ;
if ( parent_h_cgroup )
parent = & parent_h_cgroup - > hugepage [ idx ] ;
page_counter_init ( counter , parent ) ;
limit = round_down ( PAGE_COUNTER_MAX ,
1 < < huge_page_order ( & hstates [ idx ] ) ) ;
ret = page_counter_limit ( counter , limit ) ;
VM_BUG_ON ( ret ) ;
}
}
2013-08-09 04:11:23 +04:00
static struct cgroup_subsys_state *
hugetlb_cgroup_css_alloc ( struct cgroup_subsys_state * parent_css )
2012-08-01 03:42:12 +04:00
{
2013-08-09 04:11:23 +04:00
struct hugetlb_cgroup * parent_h_cgroup = hugetlb_cgroup_from_css ( parent_css ) ;
struct hugetlb_cgroup * h_cgroup ;
2012-08-01 03:42:12 +04:00
h_cgroup = kzalloc ( sizeof ( * h_cgroup ) , GFP_KERNEL ) ;
if ( ! h_cgroup )
return ERR_PTR ( - ENOMEM ) ;
2016-05-21 02:57:50 +03:00
if ( ! parent_h_cgroup )
2012-08-01 03:42:12 +04:00
root_h_cgroup = h_cgroup ;
2016-05-21 02:57:50 +03:00
hugetlb_cgroup_init ( h_cgroup , parent_h_cgroup ) ;
2012-08-01 03:42:12 +04:00
return & h_cgroup - > css ;
}
2013-08-09 04:11:23 +04:00
static void hugetlb_cgroup_css_free ( struct cgroup_subsys_state * css )
2012-08-01 03:42:12 +04:00
{
struct hugetlb_cgroup * h_cgroup ;
2013-08-09 04:11:23 +04:00
h_cgroup = hugetlb_cgroup_from_css ( css ) ;
2012-08-01 03:42:12 +04:00
kfree ( h_cgroup ) ;
}
2012-08-01 03:42:21 +04:00
/*
* Should be called with hugetlb_lock held .
* Since we are holding hugetlb_lock , pages cannot get moved from
* active list or uncharged from the cgroup , So no need to get
* page reference and test for page active here . This function
* cannot fail .
*/
2013-08-09 04:11:22 +04:00
static void hugetlb_cgroup_move_parent ( int idx , struct hugetlb_cgroup * h_cg ,
2012-08-01 03:42:21 +04:00
struct page * page )
{
2014-12-11 02:42:34 +03:00
unsigned int nr_pages ;
struct page_counter * counter ;
2012-08-01 03:42:21 +04:00
struct hugetlb_cgroup * page_hcg ;
2013-08-09 04:11:22 +04:00
struct hugetlb_cgroup * parent = parent_hugetlb_cgroup ( h_cg ) ;
2012-08-01 03:42:21 +04:00
page_hcg = hugetlb_cgroup_from_page ( page ) ;
/*
* We can have pages in active list without any cgroup
* ie , hugepage with less than 3 pages . We can safely
* ignore those pages .
*/
if ( ! page_hcg | | page_hcg ! = h_cg )
goto out ;
2014-12-11 02:42:34 +03:00
nr_pages = 1 < < compound_order ( page ) ;
2012-08-01 03:42:21 +04:00
if ( ! parent ) {
parent = root_h_cgroup ;
/* root has no limit */
2014-12-11 02:42:34 +03:00
page_counter_charge ( & parent - > hugepage [ idx ] , nr_pages ) ;
2012-08-01 03:42:21 +04:00
}
counter = & h_cg - > hugepage [ idx ] ;
2014-12-11 02:42:34 +03:00
/* Take the pages off the local counter */
page_counter_cancel ( counter , nr_pages ) ;
2012-08-01 03:42:21 +04:00
set_hugetlb_cgroup ( page , parent ) ;
out :
return ;
}
/*
* Force the hugetlb cgroup to empty the hugetlb resources by moving them to
* the parent cgroup .
*/
2013-08-09 04:11:23 +04:00
static void hugetlb_cgroup_css_offline ( struct cgroup_subsys_state * css )
2012-08-01 03:42:12 +04:00
{
2013-08-09 04:11:23 +04:00
struct hugetlb_cgroup * h_cg = hugetlb_cgroup_from_css ( css ) ;
2012-08-01 03:42:21 +04:00
struct hstate * h ;
struct page * page ;
2012-10-26 15:37:33 +04:00
int idx = 0 ;
2012-08-01 03:42:21 +04:00
do {
for_each_hstate ( h ) {
spin_lock ( & hugetlb_lock ) ;
list_for_each_entry ( page , & h - > hugepage_activelist , lru )
2013-08-09 04:11:22 +04:00
hugetlb_cgroup_move_parent ( idx , h_cg , page ) ;
2012-08-01 03:42:21 +04:00
spin_unlock ( & hugetlb_lock ) ;
idx + + ;
}
cond_resched ( ) ;
2013-08-09 04:11:22 +04:00
} while ( hugetlb_cgroup_have_usage ( h_cg ) ) ;
2012-08-01 03:42:12 +04:00
}
2012-08-01 03:42:18 +04:00
int hugetlb_cgroup_charge_cgroup ( int idx , unsigned long nr_pages ,
struct hugetlb_cgroup * * ptr )
{
int ret = 0 ;
2014-12-11 02:42:34 +03:00
struct page_counter * counter ;
2012-08-01 03:42:18 +04:00
struct hugetlb_cgroup * h_cg = NULL ;
if ( hugetlb_cgroup_disabled ( ) )
goto done ;
/*
* We don ' t charge any cgroup if the compound page have less
* than 3 pages .
*/
if ( huge_page_order ( & hstates [ idx ] ) < HUGETLB_CGROUP_MIN_ORDER )
goto done ;
again :
rcu_read_lock ( ) ;
h_cg = hugetlb_cgroup_from_task ( current ) ;
2014-05-13 20:11:01 +04:00
if ( ! css_tryget_online ( & h_cg - > css ) ) {
2012-08-01 03:42:18 +04:00
rcu_read_unlock ( ) ;
goto again ;
}
rcu_read_unlock ( ) ;
2015-11-06 05:50:26 +03:00
if ( ! page_counter_try_charge ( & h_cg - > hugepage [ idx ] , nr_pages , & counter ) )
ret = - ENOMEM ;
2012-08-01 03:42:18 +04:00
css_put ( & h_cg - > css ) ;
done :
* ptr = h_cg ;
return ret ;
}
2012-08-01 03:42:35 +04:00
/* Should be called with hugetlb_lock held */
2012-08-01 03:42:18 +04:00
void hugetlb_cgroup_commit_charge ( int idx , unsigned long nr_pages ,
struct hugetlb_cgroup * h_cg ,
struct page * page )
{
if ( hugetlb_cgroup_disabled ( ) | | ! h_cg )
return ;
set_hugetlb_cgroup ( page , h_cg ) ;
return ;
}
/*
* Should be called with hugetlb_lock held
*/
void hugetlb_cgroup_uncharge_page ( int idx , unsigned long nr_pages ,
struct page * page )
{
struct hugetlb_cgroup * h_cg ;
if ( hugetlb_cgroup_disabled ( ) )
return ;
2014-08-30 02:18:42 +04:00
lockdep_assert_held ( & hugetlb_lock ) ;
2012-08-01 03:42:18 +04:00
h_cg = hugetlb_cgroup_from_page ( page ) ;
if ( unlikely ( ! h_cg ) )
return ;
set_hugetlb_cgroup ( page , NULL ) ;
2014-12-11 02:42:34 +03:00
page_counter_uncharge ( & h_cg - > hugepage [ idx ] , nr_pages ) ;
2012-08-01 03:42:18 +04:00
return ;
}
void hugetlb_cgroup_uncharge_cgroup ( int idx , unsigned long nr_pages ,
struct hugetlb_cgroup * h_cg )
{
if ( hugetlb_cgroup_disabled ( ) | | ! h_cg )
return ;
if ( huge_page_order ( & hstates [ idx ] ) < HUGETLB_CGROUP_MIN_ORDER )
return ;
2014-12-11 02:42:34 +03:00
page_counter_uncharge ( & h_cg - > hugepage [ idx ] , nr_pages ) ;
2012-08-01 03:42:18 +04:00
return ;
}
2014-12-11 02:42:34 +03:00
enum {
RES_USAGE ,
RES_LIMIT ,
RES_MAX_USAGE ,
RES_FAILCNT ,
} ;
2013-12-05 21:28:03 +04:00
static u64 hugetlb_cgroup_read_u64 ( struct cgroup_subsys_state * css ,
struct cftype * cft )
2012-08-01 03:42:24 +04:00
{
2014-12-11 02:42:34 +03:00
struct page_counter * counter ;
2013-08-09 04:11:24 +04:00
struct hugetlb_cgroup * h_cg = hugetlb_cgroup_from_css ( css ) ;
2012-08-01 03:42:24 +04:00
2014-12-11 02:42:34 +03:00
counter = & h_cg - > hugepage [ MEMFILE_IDX ( cft - > private ) ] ;
2012-08-01 03:42:24 +04:00
2014-12-11 02:42:34 +03:00
switch ( MEMFILE_ATTR ( cft - > private ) ) {
case RES_USAGE :
return ( u64 ) page_counter_read ( counter ) * PAGE_SIZE ;
case RES_LIMIT :
return ( u64 ) counter - > limit * PAGE_SIZE ;
case RES_MAX_USAGE :
return ( u64 ) counter - > watermark * PAGE_SIZE ;
case RES_FAILCNT :
return counter - > failcnt ;
default :
BUG ( ) ;
}
2012-08-01 03:42:24 +04:00
}
2014-12-11 02:42:34 +03:00
static DEFINE_MUTEX ( hugetlb_limit_mutex ) ;
2014-05-13 20:16:21 +04:00
static ssize_t hugetlb_cgroup_write ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
2012-08-01 03:42:24 +04:00
{
2014-12-11 02:42:34 +03:00
int ret , idx ;
unsigned long nr_pages ;
2014-05-13 20:16:21 +04:00
struct hugetlb_cgroup * h_cg = hugetlb_cgroup_from_css ( of_css ( of ) ) ;
2012-08-01 03:42:24 +04:00
2014-12-11 02:42:34 +03:00
if ( hugetlb_cgroup_is_root ( h_cg ) ) /* Can't set limit on root */
return - EINVAL ;
2014-05-13 20:16:21 +04:00
buf = strstrip ( buf ) ;
2015-02-12 02:26:03 +03:00
ret = page_counter_memparse ( buf , " -1 " , & nr_pages ) ;
2014-12-11 02:42:34 +03:00
if ( ret )
return ret ;
2014-05-13 20:16:21 +04:00
idx = MEMFILE_IDX ( of_cft ( of ) - > private ) ;
2016-05-21 02:57:50 +03:00
nr_pages = round_down ( nr_pages , 1 < < huge_page_order ( & hstates [ idx ] ) ) ;
2012-08-01 03:42:24 +04:00
2014-12-11 02:42:34 +03:00
switch ( MEMFILE_ATTR ( of_cft ( of ) - > private ) ) {
2012-08-01 03:42:24 +04:00
case RES_LIMIT :
2014-12-11 02:42:34 +03:00
mutex_lock ( & hugetlb_limit_mutex ) ;
ret = page_counter_limit ( & h_cg - > hugepage [ idx ] , nr_pages ) ;
mutex_unlock ( & hugetlb_limit_mutex ) ;
2012-08-01 03:42:24 +04:00
break ;
default :
ret = - EINVAL ;
break ;
}
2014-05-13 20:16:21 +04:00
return ret ? : nbytes ;
2012-08-01 03:42:24 +04:00
}
2014-05-13 20:16:21 +04:00
static ssize_t hugetlb_cgroup_reset ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
2012-08-01 03:42:24 +04:00
{
2014-12-11 02:42:34 +03:00
int ret = 0 ;
struct page_counter * counter ;
2014-05-13 20:16:21 +04:00
struct hugetlb_cgroup * h_cg = hugetlb_cgroup_from_css ( of_css ( of ) ) ;
2012-08-01 03:42:24 +04:00
2014-12-11 02:42:34 +03:00
counter = & h_cg - > hugepage [ MEMFILE_IDX ( of_cft ( of ) - > private ) ] ;
2012-08-01 03:42:24 +04:00
2014-12-11 02:42:34 +03:00
switch ( MEMFILE_ATTR ( of_cft ( of ) - > private ) ) {
2012-08-01 03:42:24 +04:00
case RES_MAX_USAGE :
2014-12-11 02:42:34 +03:00
page_counter_reset_watermark ( counter ) ;
2012-08-01 03:42:24 +04:00
break ;
case RES_FAILCNT :
2014-12-11 02:42:34 +03:00
counter - > failcnt = 0 ;
2012-08-01 03:42:24 +04:00
break ;
default :
ret = - EINVAL ;
break ;
}
2014-05-13 20:16:21 +04:00
return ret ? : nbytes ;
2012-08-01 03:42:24 +04:00
}
static char * mem_fmt ( char * buf , int size , unsigned long hsize )
{
if ( hsize > = ( 1UL < < 30 ) )
snprintf ( buf , size , " %luGB " , hsize > > 30 ) ;
else if ( hsize > = ( 1UL < < 20 ) )
snprintf ( buf , size , " %luMB " , hsize > > 20 ) ;
else
snprintf ( buf , size , " %luKB " , hsize > > 10 ) ;
return buf ;
}
2012-12-19 02:23:19 +04:00
static void __init __hugetlb_cgroup_file_init ( int idx )
2012-08-01 03:42:24 +04:00
{
char buf [ 32 ] ;
struct cftype * cft ;
struct hstate * h = & hstates [ idx ] ;
/* format the size */
mem_fmt ( buf , 32 , huge_page_size ( h ) ) ;
/* Add the limit file */
cft = & h - > cgroup_files [ 0 ] ;
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.limit_in_bytes " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , RES_LIMIT ) ;
2013-12-05 21:28:03 +04:00
cft - > read_u64 = hugetlb_cgroup_read_u64 ;
2014-05-13 20:16:21 +04:00
cft - > write = hugetlb_cgroup_write ;
2012-08-01 03:42:24 +04:00
/* Add the usage file */
cft = & h - > cgroup_files [ 1 ] ;
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.usage_in_bytes " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , RES_USAGE ) ;
2013-12-05 21:28:03 +04:00
cft - > read_u64 = hugetlb_cgroup_read_u64 ;
2012-08-01 03:42:24 +04:00
/* Add the MAX usage file */
cft = & h - > cgroup_files [ 2 ] ;
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.max_usage_in_bytes " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , RES_MAX_USAGE ) ;
2014-05-13 20:16:21 +04:00
cft - > write = hugetlb_cgroup_reset ;
2013-12-05 21:28:03 +04:00
cft - > read_u64 = hugetlb_cgroup_read_u64 ;
2012-08-01 03:42:24 +04:00
/* Add the failcntfile */
cft = & h - > cgroup_files [ 3 ] ;
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.failcnt " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , RES_FAILCNT ) ;
2014-05-13 20:16:21 +04:00
cft - > write = hugetlb_cgroup_reset ;
2013-12-05 21:28:03 +04:00
cft - > read_u64 = hugetlb_cgroup_read_u64 ;
2012-08-01 03:42:24 +04:00
/* NULL terminate the last cft */
cft = & h - > cgroup_files [ 4 ] ;
memset ( cft , 0 , sizeof ( * cft ) ) ;
2014-07-15 19:05:09 +04:00
WARN_ON ( cgroup_add_legacy_cftypes ( & hugetlb_cgrp_subsys ,
h - > cgroup_files ) ) ;
2012-12-19 02:23:19 +04:00
}
void __init hugetlb_cgroup_file_init ( void )
{
struct hstate * h ;
for_each_hstate ( h ) {
/*
* Add cgroup control files only if the huge page consists
* of more than two normal pages . This is because we use
2015-11-07 03:29:54 +03:00
* page [ 2 ] . private for storing cgroup details .
2012-12-19 02:23:19 +04:00
*/
if ( huge_page_order ( h ) > = HUGETLB_CGROUP_MIN_ORDER )
__hugetlb_cgroup_file_init ( hstate_index ( h ) ) ;
}
2012-08-01 03:42:24 +04:00
}
2012-08-01 03:42:36 +04:00
/*
* hugetlb_lock will make sure a parallel cgroup rmdir won ' t happen
* when we migrate hugepages
*/
2012-08-01 03:42:27 +04:00
void hugetlb_cgroup_migrate ( struct page * oldhpage , struct page * newhpage )
{
struct hugetlb_cgroup * h_cg ;
2012-08-01 03:42:35 +04:00
struct hstate * h = page_hstate ( oldhpage ) ;
2012-08-01 03:42:27 +04:00
if ( hugetlb_cgroup_disabled ( ) )
return ;
2014-01-24 03:52:54 +04:00
VM_BUG_ON_PAGE ( ! PageHuge ( oldhpage ) , oldhpage ) ;
2012-08-01 03:42:27 +04:00
spin_lock ( & hugetlb_lock ) ;
h_cg = hugetlb_cgroup_from_page ( oldhpage ) ;
set_hugetlb_cgroup ( oldhpage , NULL ) ;
/* move the h_cg details to new cgroup */
set_hugetlb_cgroup ( newhpage , h_cg ) ;
2012-08-01 03:42:35 +04:00
list_move ( & newhpage - > lru , & h - > hugepage_activelist ) ;
2012-08-01 03:42:27 +04:00
spin_unlock ( & hugetlb_lock ) ;
return ;
}
2014-02-08 19:36:58 +04:00
struct cgroup_subsys hugetlb_cgrp_subsys = {
2012-11-19 20:13:38 +04:00
. css_alloc = hugetlb_cgroup_css_alloc ,
. css_offline = hugetlb_cgroup_css_offline ,
. css_free = hugetlb_cgroup_css_free ,
2012-08-01 03:42:12 +04:00
} ;