2012-07-31 16:42:12 -07:00
/*
*
* Copyright IBM Corporation , 2012
* Author Aneesh Kumar K . V < aneesh . kumar @ linux . vnet . ibm . com >
*
2019-12-16 20:38:31 +01:00
* Cgroup v2
* Copyright ( C ) 2019 Red Hat , Inc .
* Author : Giuseppe Scrivano < gscrivan @ redhat . com >
*
2012-07-31 16:42:12 -07:00
* This program is free software ; you can redistribute it and / or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation .
*
* This program is distributed in the hope that it would be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE .
*
*/
# include <linux/cgroup.h>
2014-12-10 15:42:34 -08:00
# include <linux/page_counter.h>
2012-07-31 16:42:12 -07:00
# include <linux/slab.h>
# include <linux/hugetlb.h>
# include <linux/hugetlb_cgroup.h>
2019-12-16 20:38:31 +01:00
enum hugetlb_memory_event {
HUGETLB_MAX ,
HUGETLB_NR_MEMORY_EVENTS ,
} ;
2012-07-31 16:42:12 -07:00
struct hugetlb_cgroup {
struct cgroup_subsys_state css ;
2019-12-16 20:38:31 +01:00
2012-07-31 16:42:12 -07:00
/*
* the counter to account for hugepages from hugetlb .
*/
2014-12-10 15:42:34 -08:00
struct page_counter hugepage [ HUGE_MAX_HSTATE ] ;
2019-12-16 20:38:31 +01:00
atomic_long_t events [ HUGE_MAX_HSTATE ] [ HUGETLB_NR_MEMORY_EVENTS ] ;
atomic_long_t events_local [ HUGE_MAX_HSTATE ] [ HUGETLB_NR_MEMORY_EVENTS ] ;
/* Handle for "hugetlb.events" */
struct cgroup_file events_file [ HUGE_MAX_HSTATE ] ;
/* Handle for "hugetlb.events.local" */
struct cgroup_file events_local_file [ HUGE_MAX_HSTATE ] ;
2012-07-31 16:42:12 -07:00
} ;
2012-07-31 16:42:24 -07:00
# define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
# define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
# define MEMFILE_ATTR(val) ((val) & 0xffff)
2019-12-16 20:38:31 +01:00
# define hugetlb_cgroup_from_counter(counter, idx) \
container_of ( counter , struct hugetlb_cgroup , hugepage [ idx ] )
2012-07-31 16:42:12 -07:00
static struct hugetlb_cgroup * root_h_cgroup __read_mostly ;
static inline
struct hugetlb_cgroup * hugetlb_cgroup_from_css ( struct cgroup_subsys_state * s )
{
2013-08-08 20:11:23 -04:00
return s ? container_of ( s , struct hugetlb_cgroup , css ) : NULL ;
2012-07-31 16:42:12 -07:00
}
static inline
struct hugetlb_cgroup * hugetlb_cgroup_from_task ( struct task_struct * task )
{
2014-02-08 10:36:58 -05:00
return hugetlb_cgroup_from_css ( task_css ( task , hugetlb_cgrp_id ) ) ;
2012-07-31 16:42:12 -07:00
}
static inline bool hugetlb_cgroup_is_root ( struct hugetlb_cgroup * h_cg )
{
return ( h_cg = = root_h_cgroup ) ;
}
2013-08-08 20:11:22 -04:00
static inline struct hugetlb_cgroup *
parent_hugetlb_cgroup ( struct hugetlb_cgroup * h_cg )
2012-07-31 16:42:12 -07:00
{
2014-05-16 13:22:48 -04:00
return hugetlb_cgroup_from_css ( h_cg - > css . parent ) ;
2012-07-31 16:42:12 -07:00
}
2013-08-08 20:11:22 -04:00
static inline bool hugetlb_cgroup_have_usage ( struct hugetlb_cgroup * h_cg )
2012-07-31 16:42:12 -07:00
{
int idx ;
for ( idx = 0 ; idx < hugetlb_max_hstate ; idx + + ) {
2014-12-10 15:42:34 -08:00
if ( page_counter_read ( & h_cg - > hugepage [ idx ] ) )
2012-07-31 16:42:12 -07:00
return true ;
}
return false ;
}
2016-05-20 16:57:50 -07:00
static void hugetlb_cgroup_init ( struct hugetlb_cgroup * h_cgroup ,
struct hugetlb_cgroup * parent_h_cgroup )
{
int idx ;
for ( idx = 0 ; idx < HUGE_MAX_HSTATE ; idx + + ) {
struct page_counter * counter = & h_cgroup - > hugepage [ idx ] ;
struct page_counter * parent = NULL ;
unsigned long limit ;
int ret ;
if ( parent_h_cgroup )
parent = & parent_h_cgroup - > hugepage [ idx ] ;
page_counter_init ( counter , parent ) ;
limit = round_down ( PAGE_COUNTER_MAX ,
1 < < huge_page_order ( & hstates [ idx ] ) ) ;
2018-06-07 17:06:18 -07:00
ret = page_counter_set_max ( counter , limit ) ;
2016-05-20 16:57:50 -07:00
VM_BUG_ON ( ret ) ;
}
}
2013-08-08 20:11:23 -04:00
static struct cgroup_subsys_state *
hugetlb_cgroup_css_alloc ( struct cgroup_subsys_state * parent_css )
2012-07-31 16:42:12 -07:00
{
2013-08-08 20:11:23 -04:00
struct hugetlb_cgroup * parent_h_cgroup = hugetlb_cgroup_from_css ( parent_css ) ;
struct hugetlb_cgroup * h_cgroup ;
2012-07-31 16:42:12 -07:00
h_cgroup = kzalloc ( sizeof ( * h_cgroup ) , GFP_KERNEL ) ;
if ( ! h_cgroup )
return ERR_PTR ( - ENOMEM ) ;
2016-05-20 16:57:50 -07:00
if ( ! parent_h_cgroup )
2012-07-31 16:42:12 -07:00
root_h_cgroup = h_cgroup ;
2016-05-20 16:57:50 -07:00
hugetlb_cgroup_init ( h_cgroup , parent_h_cgroup ) ;
2012-07-31 16:42:12 -07:00
return & h_cgroup - > css ;
}
2013-08-08 20:11:23 -04:00
static void hugetlb_cgroup_css_free ( struct cgroup_subsys_state * css )
2012-07-31 16:42:12 -07:00
{
struct hugetlb_cgroup * h_cgroup ;
2013-08-08 20:11:23 -04:00
h_cgroup = hugetlb_cgroup_from_css ( css ) ;
2012-07-31 16:42:12 -07:00
kfree ( h_cgroup ) ;
}
2012-07-31 16:42:21 -07:00
/*
* Should be called with hugetlb_lock held .
* Since we are holding hugetlb_lock , pages cannot get moved from
* active list or uncharged from the cgroup , So no need to get
* page reference and test for page active here . This function
* cannot fail .
*/
2013-08-08 20:11:22 -04:00
static void hugetlb_cgroup_move_parent ( int idx , struct hugetlb_cgroup * h_cg ,
2012-07-31 16:42:21 -07:00
struct page * page )
{
2014-12-10 15:42:34 -08:00
unsigned int nr_pages ;
struct page_counter * counter ;
2012-07-31 16:42:21 -07:00
struct hugetlb_cgroup * page_hcg ;
2013-08-08 20:11:22 -04:00
struct hugetlb_cgroup * parent = parent_hugetlb_cgroup ( h_cg ) ;
2012-07-31 16:42:21 -07:00
page_hcg = hugetlb_cgroup_from_page ( page ) ;
/*
* We can have pages in active list without any cgroup
* ie , hugepage with less than 3 pages . We can safely
* ignore those pages .
*/
if ( ! page_hcg | | page_hcg ! = h_cg )
goto out ;
2019-09-23 15:34:30 -07:00
nr_pages = compound_nr ( page ) ;
2012-07-31 16:42:21 -07:00
if ( ! parent ) {
parent = root_h_cgroup ;
/* root has no limit */
2014-12-10 15:42:34 -08:00
page_counter_charge ( & parent - > hugepage [ idx ] , nr_pages ) ;
2012-07-31 16:42:21 -07:00
}
counter = & h_cg - > hugepage [ idx ] ;
2014-12-10 15:42:34 -08:00
/* Take the pages off the local counter */
page_counter_cancel ( counter , nr_pages ) ;
2012-07-31 16:42:21 -07:00
set_hugetlb_cgroup ( page , parent ) ;
out :
return ;
}
/*
* Force the hugetlb cgroup to empty the hugetlb resources by moving them to
* the parent cgroup .
*/
2013-08-08 20:11:23 -04:00
static void hugetlb_cgroup_css_offline ( struct cgroup_subsys_state * css )
2012-07-31 16:42:12 -07:00
{
2013-08-08 20:11:23 -04:00
struct hugetlb_cgroup * h_cg = hugetlb_cgroup_from_css ( css ) ;
2012-07-31 16:42:21 -07:00
struct hstate * h ;
struct page * page ;
2012-10-26 13:37:33 +02:00
int idx = 0 ;
2012-07-31 16:42:21 -07:00
do {
for_each_hstate ( h ) {
spin_lock ( & hugetlb_lock ) ;
list_for_each_entry ( page , & h - > hugepage_activelist , lru )
2013-08-08 20:11:22 -04:00
hugetlb_cgroup_move_parent ( idx , h_cg , page ) ;
2012-07-31 16:42:21 -07:00
spin_unlock ( & hugetlb_lock ) ;
idx + + ;
}
cond_resched ( ) ;
2013-08-08 20:11:22 -04:00
} while ( hugetlb_cgroup_have_usage ( h_cg ) ) ;
2012-07-31 16:42:12 -07:00
}
2019-12-16 20:38:31 +01:00
static inline void hugetlb_event ( struct hugetlb_cgroup * hugetlb , int idx ,
enum hugetlb_memory_event event )
{
atomic_long_inc ( & hugetlb - > events_local [ idx ] [ event ] ) ;
cgroup_file_notify ( & hugetlb - > events_local_file [ idx ] ) ;
do {
atomic_long_inc ( & hugetlb - > events [ idx ] [ event ] ) ;
cgroup_file_notify ( & hugetlb - > events_file [ idx ] ) ;
} while ( ( hugetlb = parent_hugetlb_cgroup ( hugetlb ) ) & &
! hugetlb_cgroup_is_root ( hugetlb ) ) ;
}
2012-07-31 16:42:18 -07:00
int hugetlb_cgroup_charge_cgroup ( int idx , unsigned long nr_pages ,
struct hugetlb_cgroup * * ptr )
{
int ret = 0 ;
2014-12-10 15:42:34 -08:00
struct page_counter * counter ;
2012-07-31 16:42:18 -07:00
struct hugetlb_cgroup * h_cg = NULL ;
if ( hugetlb_cgroup_disabled ( ) )
goto done ;
/*
* We don ' t charge any cgroup if the compound page have less
* than 3 pages .
*/
if ( huge_page_order ( & hstates [ idx ] ) < HUGETLB_CGROUP_MIN_ORDER )
goto done ;
again :
rcu_read_lock ( ) ;
h_cg = hugetlb_cgroup_from_task ( current ) ;
2019-11-15 17:34:46 -08:00
if ( ! css_tryget ( & h_cg - > css ) ) {
2012-07-31 16:42:18 -07:00
rcu_read_unlock ( ) ;
goto again ;
}
rcu_read_unlock ( ) ;
2019-12-16 20:38:31 +01:00
if ( ! page_counter_try_charge ( & h_cg - > hugepage [ idx ] , nr_pages ,
& counter ) ) {
2015-11-05 18:50:26 -08:00
ret = - ENOMEM ;
2019-12-16 20:38:31 +01:00
hugetlb_event ( hugetlb_cgroup_from_counter ( counter , idx ) , idx ,
HUGETLB_MAX ) ;
}
2012-07-31 16:42:18 -07:00
css_put ( & h_cg - > css ) ;
done :
* ptr = h_cg ;
return ret ;
}
2012-07-31 16:42:35 -07:00
/* Should be called with hugetlb_lock held */
2012-07-31 16:42:18 -07:00
void hugetlb_cgroup_commit_charge ( int idx , unsigned long nr_pages ,
struct hugetlb_cgroup * h_cg ,
struct page * page )
{
if ( hugetlb_cgroup_disabled ( ) | | ! h_cg )
return ;
set_hugetlb_cgroup ( page , h_cg ) ;
return ;
}
/*
* Should be called with hugetlb_lock held
*/
void hugetlb_cgroup_uncharge_page ( int idx , unsigned long nr_pages ,
struct page * page )
{
struct hugetlb_cgroup * h_cg ;
if ( hugetlb_cgroup_disabled ( ) )
return ;
2014-08-29 15:18:42 -07:00
lockdep_assert_held ( & hugetlb_lock ) ;
2012-07-31 16:42:18 -07:00
h_cg = hugetlb_cgroup_from_page ( page ) ;
if ( unlikely ( ! h_cg ) )
return ;
set_hugetlb_cgroup ( page , NULL ) ;
2014-12-10 15:42:34 -08:00
page_counter_uncharge ( & h_cg - > hugepage [ idx ] , nr_pages ) ;
2012-07-31 16:42:18 -07:00
return ;
}
void hugetlb_cgroup_uncharge_cgroup ( int idx , unsigned long nr_pages ,
struct hugetlb_cgroup * h_cg )
{
if ( hugetlb_cgroup_disabled ( ) | | ! h_cg )
return ;
if ( huge_page_order ( & hstates [ idx ] ) < HUGETLB_CGROUP_MIN_ORDER )
return ;
2014-12-10 15:42:34 -08:00
page_counter_uncharge ( & h_cg - > hugepage [ idx ] , nr_pages ) ;
2012-07-31 16:42:18 -07:00
return ;
}
2014-12-10 15:42:34 -08:00
enum {
RES_USAGE ,
RES_LIMIT ,
RES_MAX_USAGE ,
RES_FAILCNT ,
} ;
2013-12-05 12:28:03 -05:00
static u64 hugetlb_cgroup_read_u64 ( struct cgroup_subsys_state * css ,
struct cftype * cft )
2012-07-31 16:42:24 -07:00
{
2014-12-10 15:42:34 -08:00
struct page_counter * counter ;
2013-08-08 20:11:24 -04:00
struct hugetlb_cgroup * h_cg = hugetlb_cgroup_from_css ( css ) ;
2012-07-31 16:42:24 -07:00
2014-12-10 15:42:34 -08:00
counter = & h_cg - > hugepage [ MEMFILE_IDX ( cft - > private ) ] ;
2012-07-31 16:42:24 -07:00
2014-12-10 15:42:34 -08:00
switch ( MEMFILE_ATTR ( cft - > private ) ) {
case RES_USAGE :
return ( u64 ) page_counter_read ( counter ) * PAGE_SIZE ;
case RES_LIMIT :
2018-06-07 17:06:18 -07:00
return ( u64 ) counter - > max * PAGE_SIZE ;
2014-12-10 15:42:34 -08:00
case RES_MAX_USAGE :
return ( u64 ) counter - > watermark * PAGE_SIZE ;
case RES_FAILCNT :
return counter - > failcnt ;
default :
BUG ( ) ;
}
2012-07-31 16:42:24 -07:00
}
2019-12-16 20:38:31 +01:00
static int hugetlb_cgroup_read_u64_max ( struct seq_file * seq , void * v )
{
int idx ;
u64 val ;
struct cftype * cft = seq_cft ( seq ) ;
unsigned long limit ;
struct page_counter * counter ;
struct hugetlb_cgroup * h_cg = hugetlb_cgroup_from_css ( seq_css ( seq ) ) ;
idx = MEMFILE_IDX ( cft - > private ) ;
counter = & h_cg - > hugepage [ idx ] ;
limit = round_down ( PAGE_COUNTER_MAX ,
1 < < huge_page_order ( & hstates [ idx ] ) ) ;
switch ( MEMFILE_ATTR ( cft - > private ) ) {
case RES_USAGE :
val = ( u64 ) page_counter_read ( counter ) ;
seq_printf ( seq , " %llu \n " , val * PAGE_SIZE ) ;
break ;
case RES_LIMIT :
val = ( u64 ) counter - > max ;
if ( val = = limit )
seq_puts ( seq , " max \n " ) ;
else
seq_printf ( seq , " %llu \n " , val * PAGE_SIZE ) ;
break ;
default :
BUG ( ) ;
}
return 0 ;
}
2014-12-10 15:42:34 -08:00
static DEFINE_MUTEX ( hugetlb_limit_mutex ) ;
2014-05-13 12:16:21 -04:00
static ssize_t hugetlb_cgroup_write ( struct kernfs_open_file * of ,
2019-12-16 20:38:31 +01:00
char * buf , size_t nbytes , loff_t off ,
const char * max )
2012-07-31 16:42:24 -07:00
{
2014-12-10 15:42:34 -08:00
int ret , idx ;
unsigned long nr_pages ;
2014-05-13 12:16:21 -04:00
struct hugetlb_cgroup * h_cg = hugetlb_cgroup_from_css ( of_css ( of ) ) ;
2012-07-31 16:42:24 -07:00
2014-12-10 15:42:34 -08:00
if ( hugetlb_cgroup_is_root ( h_cg ) ) /* Can't set limit on root */
return - EINVAL ;
2014-05-13 12:16:21 -04:00
buf = strstrip ( buf ) ;
2019-12-16 20:38:31 +01:00
ret = page_counter_memparse ( buf , max , & nr_pages ) ;
2014-12-10 15:42:34 -08:00
if ( ret )
return ret ;
2014-05-13 12:16:21 -04:00
idx = MEMFILE_IDX ( of_cft ( of ) - > private ) ;
2016-05-20 16:57:50 -07:00
nr_pages = round_down ( nr_pages , 1 < < huge_page_order ( & hstates [ idx ] ) ) ;
2012-07-31 16:42:24 -07:00
2014-12-10 15:42:34 -08:00
switch ( MEMFILE_ATTR ( of_cft ( of ) - > private ) ) {
2012-07-31 16:42:24 -07:00
case RES_LIMIT :
2014-12-10 15:42:34 -08:00
mutex_lock ( & hugetlb_limit_mutex ) ;
2018-06-07 17:06:18 -07:00
ret = page_counter_set_max ( & h_cg - > hugepage [ idx ] , nr_pages ) ;
2014-12-10 15:42:34 -08:00
mutex_unlock ( & hugetlb_limit_mutex ) ;
2012-07-31 16:42:24 -07:00
break ;
default :
ret = - EINVAL ;
break ;
}
2014-05-13 12:16:21 -04:00
return ret ? : nbytes ;
2012-07-31 16:42:24 -07:00
}
2019-12-16 20:38:31 +01:00
static ssize_t hugetlb_cgroup_write_legacy ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
{
return hugetlb_cgroup_write ( of , buf , nbytes , off , " -1 " ) ;
}
static ssize_t hugetlb_cgroup_write_dfl ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
{
return hugetlb_cgroup_write ( of , buf , nbytes , off , " max " ) ;
}
2014-05-13 12:16:21 -04:00
static ssize_t hugetlb_cgroup_reset ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
2012-07-31 16:42:24 -07:00
{
2014-12-10 15:42:34 -08:00
int ret = 0 ;
struct page_counter * counter ;
2014-05-13 12:16:21 -04:00
struct hugetlb_cgroup * h_cg = hugetlb_cgroup_from_css ( of_css ( of ) ) ;
2012-07-31 16:42:24 -07:00
2014-12-10 15:42:34 -08:00
counter = & h_cg - > hugepage [ MEMFILE_IDX ( of_cft ( of ) - > private ) ] ;
2012-07-31 16:42:24 -07:00
2014-12-10 15:42:34 -08:00
switch ( MEMFILE_ATTR ( of_cft ( of ) - > private ) ) {
2012-07-31 16:42:24 -07:00
case RES_MAX_USAGE :
2014-12-10 15:42:34 -08:00
page_counter_reset_watermark ( counter ) ;
2012-07-31 16:42:24 -07:00
break ;
case RES_FAILCNT :
2014-12-10 15:42:34 -08:00
counter - > failcnt = 0 ;
2012-07-31 16:42:24 -07:00
break ;
default :
ret = - EINVAL ;
break ;
}
2014-05-13 12:16:21 -04:00
return ret ? : nbytes ;
2012-07-31 16:42:24 -07:00
}
static char * mem_fmt ( char * buf , int size , unsigned long hsize )
{
if ( hsize > = ( 1UL < < 30 ) )
snprintf ( buf , size , " %luGB " , hsize > > 30 ) ;
else if ( hsize > = ( 1UL < < 20 ) )
snprintf ( buf , size , " %luMB " , hsize > > 20 ) ;
else
snprintf ( buf , size , " %luKB " , hsize > > 10 ) ;
return buf ;
}
2019-12-16 20:38:31 +01:00
static int __hugetlb_events_show ( struct seq_file * seq , bool local )
{
int idx ;
long max ;
struct cftype * cft = seq_cft ( seq ) ;
struct hugetlb_cgroup * h_cg = hugetlb_cgroup_from_css ( seq_css ( seq ) ) ;
idx = MEMFILE_IDX ( cft - > private ) ;
if ( local )
max = atomic_long_read ( & h_cg - > events_local [ idx ] [ HUGETLB_MAX ] ) ;
else
max = atomic_long_read ( & h_cg - > events [ idx ] [ HUGETLB_MAX ] ) ;
seq_printf ( seq , " max %lu \n " , max ) ;
return 0 ;
}
static int hugetlb_events_show ( struct seq_file * seq , void * v )
{
return __hugetlb_events_show ( seq , false ) ;
}
static int hugetlb_events_local_show ( struct seq_file * seq , void * v )
{
return __hugetlb_events_show ( seq , true ) ;
}
static void __init __hugetlb_cgroup_file_dfl_init ( int idx )
2012-07-31 16:42:24 -07:00
{
char buf [ 32 ] ;
struct cftype * cft ;
struct hstate * h = & hstates [ idx ] ;
/* format the size */
mem_fmt ( buf , 32 , huge_page_size ( h ) ) ;
/* Add the limit file */
2019-12-16 20:38:31 +01:00
cft = & h - > cgroup_files_dfl [ 0 ] ;
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.max " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , RES_LIMIT ) ;
cft - > seq_show = hugetlb_cgroup_read_u64_max ;
cft - > write = hugetlb_cgroup_write_dfl ;
cft - > flags = CFTYPE_NOT_ON_ROOT ;
/* Add the current usage file */
cft = & h - > cgroup_files_dfl [ 1 ] ;
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.current " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , RES_USAGE ) ;
cft - > seq_show = hugetlb_cgroup_read_u64_max ;
cft - > flags = CFTYPE_NOT_ON_ROOT ;
/* Add the events file */
cft = & h - > cgroup_files_dfl [ 2 ] ;
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.events " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , 0 ) ;
cft - > seq_show = hugetlb_events_show ;
cft - > file_offset = offsetof ( struct hugetlb_cgroup , events_file [ idx ] ) ,
cft - > flags = CFTYPE_NOT_ON_ROOT ;
/* Add the events.local file */
cft = & h - > cgroup_files_dfl [ 3 ] ;
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.events.local " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , 0 ) ;
cft - > seq_show = hugetlb_events_local_show ;
cft - > file_offset = offsetof ( struct hugetlb_cgroup ,
events_local_file [ idx ] ) ,
cft - > flags = CFTYPE_NOT_ON_ROOT ;
/* NULL terminate the last cft */
cft = & h - > cgroup_files_dfl [ 4 ] ;
memset ( cft , 0 , sizeof ( * cft ) ) ;
WARN_ON ( cgroup_add_dfl_cftypes ( & hugetlb_cgrp_subsys ,
h - > cgroup_files_dfl ) ) ;
}
static void __init __hugetlb_cgroup_file_legacy_init ( int idx )
{
char buf [ 32 ] ;
struct cftype * cft ;
struct hstate * h = & hstates [ idx ] ;
/* format the size */
mem_fmt ( buf , 32 , huge_page_size ( h ) ) ;
/* Add the limit file */
cft = & h - > cgroup_files_legacy [ 0 ] ;
2012-07-31 16:42:24 -07:00
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.limit_in_bytes " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , RES_LIMIT ) ;
2013-12-05 12:28:03 -05:00
cft - > read_u64 = hugetlb_cgroup_read_u64 ;
2019-12-16 20:38:31 +01:00
cft - > write = hugetlb_cgroup_write_legacy ;
2012-07-31 16:42:24 -07:00
/* Add the usage file */
2019-12-16 20:38:31 +01:00
cft = & h - > cgroup_files_legacy [ 1 ] ;
2012-07-31 16:42:24 -07:00
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.usage_in_bytes " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , RES_USAGE ) ;
2013-12-05 12:28:03 -05:00
cft - > read_u64 = hugetlb_cgroup_read_u64 ;
2012-07-31 16:42:24 -07:00
/* Add the MAX usage file */
2019-12-16 20:38:31 +01:00
cft = & h - > cgroup_files_legacy [ 2 ] ;
2012-07-31 16:42:24 -07:00
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.max_usage_in_bytes " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , RES_MAX_USAGE ) ;
2014-05-13 12:16:21 -04:00
cft - > write = hugetlb_cgroup_reset ;
2013-12-05 12:28:03 -05:00
cft - > read_u64 = hugetlb_cgroup_read_u64 ;
2012-07-31 16:42:24 -07:00
/* Add the failcntfile */
2019-12-16 20:38:31 +01:00
cft = & h - > cgroup_files_legacy [ 3 ] ;
2012-07-31 16:42:24 -07:00
snprintf ( cft - > name , MAX_CFTYPE_NAME , " %s.failcnt " , buf ) ;
cft - > private = MEMFILE_PRIVATE ( idx , RES_FAILCNT ) ;
2014-05-13 12:16:21 -04:00
cft - > write = hugetlb_cgroup_reset ;
2013-12-05 12:28:03 -05:00
cft - > read_u64 = hugetlb_cgroup_read_u64 ;
2012-07-31 16:42:24 -07:00
/* NULL terminate the last cft */
2019-12-16 20:38:31 +01:00
cft = & h - > cgroup_files_legacy [ 4 ] ;
2012-07-31 16:42:24 -07:00
memset ( cft , 0 , sizeof ( * cft ) ) ;
2014-07-15 11:05:09 -04:00
WARN_ON ( cgroup_add_legacy_cftypes ( & hugetlb_cgrp_subsys ,
2019-12-16 20:38:31 +01:00
h - > cgroup_files_legacy ) ) ;
}
static void __init __hugetlb_cgroup_file_init ( int idx )
{
__hugetlb_cgroup_file_dfl_init ( idx ) ;
__hugetlb_cgroup_file_legacy_init ( idx ) ;
2012-12-18 14:23:19 -08:00
}
void __init hugetlb_cgroup_file_init ( void )
{
struct hstate * h ;
for_each_hstate ( h ) {
/*
* Add cgroup control files only if the huge page consists
* of more than two normal pages . This is because we use
2015-11-06 16:29:54 -08:00
* page [ 2 ] . private for storing cgroup details .
2012-12-18 14:23:19 -08:00
*/
if ( huge_page_order ( h ) > = HUGETLB_CGROUP_MIN_ORDER )
__hugetlb_cgroup_file_init ( hstate_index ( h ) ) ;
}
2012-07-31 16:42:24 -07:00
}
2012-07-31 16:42:36 -07:00
/*
* hugetlb_lock will make sure a parallel cgroup rmdir won ' t happen
* when we migrate hugepages
*/
2012-07-31 16:42:27 -07:00
void hugetlb_cgroup_migrate ( struct page * oldhpage , struct page * newhpage )
{
struct hugetlb_cgroup * h_cg ;
2012-07-31 16:42:35 -07:00
struct hstate * h = page_hstate ( oldhpage ) ;
2012-07-31 16:42:27 -07:00
if ( hugetlb_cgroup_disabled ( ) )
return ;
2014-01-23 15:52:54 -08:00
VM_BUG_ON_PAGE ( ! PageHuge ( oldhpage ) , oldhpage ) ;
2012-07-31 16:42:27 -07:00
spin_lock ( & hugetlb_lock ) ;
h_cg = hugetlb_cgroup_from_page ( oldhpage ) ;
set_hugetlb_cgroup ( oldhpage , NULL ) ;
/* move the h_cg details to new cgroup */
set_hugetlb_cgroup ( newhpage , h_cg ) ;
2012-07-31 16:42:35 -07:00
list_move ( & newhpage - > lru , & h - > hugepage_activelist ) ;
2012-07-31 16:42:27 -07:00
spin_unlock ( & hugetlb_lock ) ;
return ;
}
2019-12-16 20:38:31 +01:00
static struct cftype hugetlb_files [ ] = {
{ } /* terminate */
} ;
2014-02-08 10:36:58 -05:00
struct cgroup_subsys hugetlb_cgrp_subsys = {
2012-11-19 08:13:38 -08:00
. css_alloc = hugetlb_cgroup_css_alloc ,
. css_offline = hugetlb_cgroup_css_offline ,
. css_free = hugetlb_cgroup_css_free ,
2019-12-16 20:38:31 +01:00
. dfl_cftypes = hugetlb_files ,
. legacy_cftypes = hugetlb_files ,
2012-07-31 16:42:12 -07:00
} ;