2014-12-11 02:44:55 +03:00
# include <linux/swap_cgroup.h>
2008-10-23 01:14:58 +04:00
# include <linux/vmalloc.h>
2014-12-11 02:44:55 +03:00
# include <linux/mm.h>
2009-01-08 05:07:58 +03:00
2014-12-11 02:44:55 +03:00
# include <linux/swapops.h> /* depends on mm.h include */
2009-01-08 05:07:58 +03:00
static DEFINE_MUTEX ( swap_cgroup_mutex ) ;
struct swap_cgroup_ctrl {
struct page * * map ;
unsigned long length ;
2010-03-15 07:34:57 +03:00
spinlock_t lock ;
2009-01-08 05:07:58 +03:00
} ;
2011-11-03 00:38:36 +04:00
static struct swap_cgroup_ctrl swap_cgroup_ctrl [ MAX_SWAPFILES ] ;
2009-01-08 05:07:58 +03:00
struct swap_cgroup {
2009-04-03 03:57:45 +04:00
unsigned short id ;
2009-01-08 05:07:58 +03:00
} ;
# define SC_PER_PAGE (PAGE_SIZE / sizeof(struct swap_cgroup))
/*
* SwapCgroup implements " lookup " and " exchange " operations .
* In typical usage , this swap_cgroup is accessed via memcg ' s charge / uncharge
* against SwapCache . At swap_free ( ) , this is accessed directly from swap .
*
* This means ,
* - we have no race in " exchange " when we ' re accessed via SwapCache because
* SwapCache ( and its swp_entry ) is under lock .
* - When called via swap_free ( ) , there is no user of this entry and no race .
* Then , we don ' t need lock around " exchange " .
*
* TODO : we can push these buffers out to HIGHMEM .
*/
/*
* allocate buffer for swap_cgroup .
*/
static int swap_cgroup_prepare ( int type )
{
struct page * page ;
struct swap_cgroup_ctrl * ctrl ;
unsigned long idx , max ;
ctrl = & swap_cgroup_ctrl [ type ] ;
for ( idx = 0 ; idx < ctrl - > length ; idx + + ) {
page = alloc_page ( GFP_KERNEL | __GFP_ZERO ) ;
if ( ! page )
goto not_enough_page ;
ctrl - > map [ idx ] = page ;
}
return 0 ;
not_enough_page :
max = idx ;
for ( idx = 0 ; idx < max ; idx + + )
__free_page ( ctrl - > map [ idx ] ) ;
return - ENOMEM ;
}
2012-01-13 05:18:48 +04:00
static struct swap_cgroup * lookup_swap_cgroup ( swp_entry_t ent ,
struct swap_cgroup_ctrl * * ctrlp )
{
pgoff_t offset = swp_offset ( ent ) ;
struct swap_cgroup_ctrl * ctrl ;
struct page * mappage ;
2012-03-06 08:52:55 +04:00
struct swap_cgroup * sc ;
2012-01-13 05:18:48 +04:00
ctrl = & swap_cgroup_ctrl [ swp_type ( ent ) ] ;
if ( ctrlp )
* ctrlp = ctrl ;
mappage = ctrl - > map [ offset / SC_PER_PAGE ] ;
2012-03-06 08:52:55 +04:00
sc = page_address ( mappage ) ;
return sc + offset % SC_PER_PAGE ;
2012-01-13 05:18:48 +04:00
}
2010-03-11 02:22:17 +03:00
/**
* swap_cgroup_cmpxchg - cmpxchg mem_cgroup ' s id for this swp_entry .
2012-06-20 23:53:01 +04:00
* @ ent : swap entry to be cmpxchged
2010-03-11 02:22:17 +03:00
* @ old : old id
* @ new : new id
*
* Returns old id at success , 0 at failure .
2011-03-31 05:57:33 +04:00
* ( There is no mem_cgroup using 0 as its id )
2010-03-11 02:22:17 +03:00
*/
unsigned short swap_cgroup_cmpxchg ( swp_entry_t ent ,
unsigned short old , unsigned short new )
{
struct swap_cgroup_ctrl * ctrl ;
struct swap_cgroup * sc ;
2010-03-15 07:34:57 +03:00
unsigned long flags ;
unsigned short retval ;
2010-03-11 02:22:17 +03:00
2012-01-13 05:18:48 +04:00
sc = lookup_swap_cgroup ( ent , & ctrl ) ;
2010-03-11 02:22:17 +03:00
2010-03-15 07:34:57 +03:00
spin_lock_irqsave ( & ctrl - > lock , flags ) ;
retval = sc - > id ;
if ( retval = = old )
sc - > id = new ;
2010-03-11 02:22:17 +03:00
else
2010-03-15 07:34:57 +03:00
retval = 0 ;
spin_unlock_irqrestore ( & ctrl - > lock , flags ) ;
return retval ;
2010-03-11 02:22:17 +03:00
}
2009-01-08 05:07:58 +03:00
/**
* swap_cgroup_record - record mem_cgroup for this swp_entry .
* @ ent : swap entry to be recorded into
2012-06-20 23:53:01 +04:00
* @ id : mem_cgroup to be recorded
2009-01-08 05:07:58 +03:00
*
2009-04-03 03:57:45 +04:00
* Returns old value at success , 0 at failure .
* ( Of course , old value can be 0. )
2009-01-08 05:07:58 +03:00
*/
2009-04-03 03:57:45 +04:00
unsigned short swap_cgroup_record ( swp_entry_t ent , unsigned short id )
2009-01-08 05:07:58 +03:00
{
struct swap_cgroup_ctrl * ctrl ;
struct swap_cgroup * sc ;
2009-04-03 03:57:45 +04:00
unsigned short old ;
2010-03-15 07:34:57 +03:00
unsigned long flags ;
2009-01-08 05:07:58 +03:00
2012-01-13 05:18:48 +04:00
sc = lookup_swap_cgroup ( ent , & ctrl ) ;
2009-01-08 05:07:58 +03:00
2010-03-15 07:34:57 +03:00
spin_lock_irqsave ( & ctrl - > lock , flags ) ;
old = sc - > id ;
sc - > id = id ;
spin_unlock_irqrestore ( & ctrl - > lock , flags ) ;
2009-01-08 05:07:58 +03:00
return old ;
}
/**
2012-01-13 05:18:48 +04:00
* lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
2009-01-08 05:07:58 +03:00
* @ ent : swap entry to be looked up .
*
2014-01-13 08:23:27 +04:00
* Returns ID of mem_cgroup at success . 0 at failure . ( 0 is invalid ID )
2009-01-08 05:07:58 +03:00
*/
2012-01-13 05:18:48 +04:00
unsigned short lookup_swap_cgroup_id ( swp_entry_t ent )
2009-01-08 05:07:58 +03:00
{
2012-01-13 05:18:48 +04:00
return lookup_swap_cgroup ( ent , NULL ) - > id ;
2009-01-08 05:07:58 +03:00
}
int swap_cgroup_swapon ( int type , unsigned long max_pages )
{
void * array ;
unsigned long array_size ;
unsigned long length ;
struct swap_cgroup_ctrl * ctrl ;
if ( ! do_swap_account )
return 0 ;
2011-05-27 03:25:30 +04:00
length = DIV_ROUND_UP ( max_pages , SC_PER_PAGE ) ;
2009-01-08 05:07:58 +03:00
array_size = length * sizeof ( void * ) ;
2011-05-28 21:36:34 +04:00
array = vzalloc ( array_size ) ;
2009-01-08 05:07:58 +03:00
if ( ! array )
goto nomem ;
ctrl = & swap_cgroup_ctrl [ type ] ;
mutex_lock ( & swap_cgroup_mutex ) ;
ctrl - > length = length ;
ctrl - > map = array ;
2010-03-15 07:34:57 +03:00
spin_lock_init ( & ctrl - > lock ) ;
2009-01-08 05:07:58 +03:00
if ( swap_cgroup_prepare ( type ) ) {
/* memory shortage */
ctrl - > map = NULL ;
ctrl - > length = 0 ;
mutex_unlock ( & swap_cgroup_mutex ) ;
2011-05-27 03:25:31 +04:00
vfree ( array ) ;
2009-01-08 05:07:58 +03:00
goto nomem ;
}
mutex_unlock ( & swap_cgroup_mutex ) ;
return 0 ;
nomem :
printk ( KERN_INFO " couldn't allocate enough memory for swap_cgroup. \n " ) ;
printk ( KERN_INFO
2011-07-26 04:12:12 +04:00
" swap_cgroup can be disabled by swapaccount=0 boot option \n " ) ;
2009-01-08 05:07:58 +03:00
return - ENOMEM ;
}
void swap_cgroup_swapoff ( int type )
{
2011-05-27 03:25:31 +04:00
struct page * * map ;
unsigned long i , length ;
2009-01-08 05:07:58 +03:00
struct swap_cgroup_ctrl * ctrl ;
if ( ! do_swap_account )
return ;
mutex_lock ( & swap_cgroup_mutex ) ;
ctrl = & swap_cgroup_ctrl [ type ] ;
2011-05-27 03:25:31 +04:00
map = ctrl - > map ;
length = ctrl - > length ;
ctrl - > map = NULL ;
ctrl - > length = 0 ;
mutex_unlock ( & swap_cgroup_mutex ) ;
if ( map ) {
for ( i = 0 ; i < length ; i + + ) {
struct page * page = map [ i ] ;
2009-01-08 05:07:58 +03:00
if ( page )
__free_page ( page ) ;
}
2011-05-27 03:25:31 +04:00
vfree ( map ) ;
2009-01-08 05:07:58 +03:00
}
}