2013-07-11 03:05:03 +04:00
/*
* zswap . c - zswap driver file
*
* zswap is a backend for frontswap that takes pages that are in the process
* of being swapped out and attempts to compress and store them in a
* RAM - based memory pool . This can result in a significant I / O reduction on
* the swap device and , in the case where decompressing from RAM is faster
* than reading from the swap device , can also improve workload performance .
*
* Copyright ( C ) 2012 Seth Jennings < sjenning @ linux . vnet . ibm . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version 2
* of the License , or ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/module.h>
# include <linux/cpu.h>
# include <linux/highmem.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/types.h>
# include <linux/atomic.h>
# include <linux/frontswap.h>
# include <linux/rbtree.h>
# include <linux/swap.h>
# include <linux/crypto.h>
# include <linux/mempool.h>
2014-08-07 03:08:40 +04:00
# include <linux/zpool.h>
2013-07-11 03:05:03 +04:00
# include <linux/mm_types.h>
# include <linux/page-flags.h>
# include <linux/swapops.h>
# include <linux/writeback.h>
# include <linux/pagemap.h>
/*********************************
* statistics
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2014-08-07 03:08:40 +04:00
/* Total bytes used by the compressed storage */
static u64 zswap_pool_total_size ;
2013-07-11 03:05:03 +04:00
/* The number of compressed pages currently stored in zswap */
static atomic_t zswap_stored_pages = ATOMIC_INIT ( 0 ) ;
/*
* The statistics below are not protected from concurrent access for
* performance reasons so they may not be a 100 % accurate . However ,
* they do provide useful information on roughly how many times a
* certain event is occurring .
*/
/* Pool limit was hit (see zswap_max_pool_percent) */
static u64 zswap_pool_limit_hit ;
/* Pages written back when pool limit was reached */
static u64 zswap_written_back_pages ;
/* Store failed due to a reclaim failure after pool limit was reached */
static u64 zswap_reject_reclaim_fail ;
/* Compressed page was too big for the allocator to (optimally) store */
static u64 zswap_reject_compress_poor ;
/* Store failed because underlying allocator could not get memory */
static u64 zswap_reject_alloc_fail ;
/* Store failed because the entry metadata could not be allocated (rare) */
static u64 zswap_reject_kmemcache_fail ;
/* Duplicate store was encountered (rare) */
static u64 zswap_duplicate_entry ;
/*********************************
* tunables
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-06-26 01:00:35 +03:00
/* Enable/disable zswap (disabled by default) */
static bool zswap_enabled ;
module_param_named ( enabled , zswap_enabled , bool , 0644 ) ;
2013-07-11 03:05:03 +04:00
2015-09-10 01:35:21 +03:00
/* Crypto compressor to use */
2013-07-11 03:05:03 +04:00
# define ZSWAP_COMPRESSOR_DEFAULT "lzo"
2015-11-07 03:29:15 +03:00
static char * zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT ;
2015-09-10 01:35:21 +03:00
static int zswap_compressor_param_set ( const char * ,
const struct kernel_param * ) ;
static struct kernel_param_ops zswap_compressor_param_ops = {
. set = zswap_compressor_param_set ,
2015-11-07 03:29:15 +03:00
. get = param_get_charp ,
. free = param_free_charp ,
2015-09-10 01:35:21 +03:00
} ;
module_param_cb ( compressor , & zswap_compressor_param_ops ,
2015-11-07 03:29:15 +03:00
& zswap_compressor , 0644 ) ;
2013-07-11 03:05:03 +04:00
2015-09-10 01:35:21 +03:00
/* Compressed storage zpool to use */
2014-08-07 03:08:40 +04:00
# define ZSWAP_ZPOOL_DEFAULT "zbud"
2015-11-07 03:29:15 +03:00
static char * zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT ;
2015-09-10 01:35:21 +03:00
static int zswap_zpool_param_set ( const char * , const struct kernel_param * ) ;
static struct kernel_param_ops zswap_zpool_param_ops = {
2015-11-07 03:29:15 +03:00
. set = zswap_zpool_param_set ,
. get = param_get_charp ,
. free = param_free_charp ,
2015-09-10 01:35:21 +03:00
} ;
2015-11-07 03:29:15 +03:00
module_param_cb ( zpool , & zswap_zpool_param_ops , & zswap_zpool_type , 0644 ) ;
2014-08-07 03:08:40 +04:00
2015-09-10 01:35:21 +03:00
/* The maximum percentage of memory that the compressed pool can occupy */
static unsigned int zswap_max_pool_percent = 20 ;
module_param_named ( max_pool_percent , zswap_max_pool_percent , uint , 0644 ) ;
2014-04-08 02:38:27 +04:00
2013-07-11 03:05:03 +04:00
/*********************************
2015-09-10 01:35:19 +03:00
* data structures
2013-07-11 03:05:03 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-09-10 01:35:19 +03:00
struct zswap_pool {
struct zpool * zpool ;
struct crypto_comp * __percpu * tfm ;
struct kref kref ;
struct list_head list ;
struct rcu_head rcu_head ;
struct notifier_block notifier ;
char tfm_name [ CRYPTO_MAX_ALG_NAME ] ;
2013-07-11 03:05:03 +04:00
} ;
/*
* struct zswap_entry
*
* This structure contains the metadata for tracking a single compressed
* page within zswap .
*
* rbnode - links the entry into red - black tree for the appropriate swap type
2015-09-10 01:35:19 +03:00
* offset - the swap offset for the entry . Index into the red - black tree .
2013-07-11 03:05:03 +04:00
* refcount - the number of outstanding reference to the entry . This is needed
* to protect against premature freeing of the entry by code
2014-04-08 02:38:25 +04:00
* concurrent calls to load , invalidate , and writeback . The lock
2013-07-11 03:05:03 +04:00
* for the zswap_tree structure that contains the entry must
* be held while changing the refcount . Since the lock must
* be held , there is no reason to also make refcount atomic .
* length - the length in bytes of the compressed page data . Needed during
2014-04-08 02:38:25 +04:00
* decompression
2015-09-10 01:35:19 +03:00
* pool - the zswap_pool the entry ' s data is in
* handle - zpool allocation handle that stores the compressed page data
2013-07-11 03:05:03 +04:00
*/
struct zswap_entry {
struct rb_node rbnode ;
pgoff_t offset ;
int refcount ;
unsigned int length ;
2015-09-10 01:35:19 +03:00
struct zswap_pool * pool ;
2013-07-11 03:05:03 +04:00
unsigned long handle ;
} ;
struct zswap_header {
swp_entry_t swpentry ;
} ;
/*
* The tree lock in the zswap_tree struct protects a few things :
* - the rbtree
* - the refcount field of each entry in the tree
*/
struct zswap_tree {
struct rb_root rbroot ;
spinlock_t lock ;
} ;
static struct zswap_tree * zswap_trees [ MAX_SWAPFILES ] ;
2015-09-10 01:35:19 +03:00
/* RCU-protected iteration */
static LIST_HEAD ( zswap_pools ) ;
/* protects zswap_pools list modification */
static DEFINE_SPINLOCK ( zswap_pools_lock ) ;
2015-09-10 01:35:21 +03:00
/* used by param callback function */
static bool zswap_init_started ;
2015-09-10 01:35:19 +03:00
/*********************************
* helpers and fwd declarations
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# define zswap_pool_debug(msg, p) \
pr_debug ( " %s pool %s/%s \n " , msg , ( p ) - > tfm_name , \
zpool_get_type ( ( p ) - > zpool ) )
static int zswap_writeback_entry ( struct zpool * pool , unsigned long handle ) ;
static int zswap_pool_get ( struct zswap_pool * pool ) ;
static void zswap_pool_put ( struct zswap_pool * pool ) ;
static const struct zpool_ops zswap_zpool_ops = {
. evict = zswap_writeback_entry
} ;
static bool zswap_is_full ( void )
{
return totalram_pages * zswap_max_pool_percent / 100 <
DIV_ROUND_UP ( zswap_pool_total_size , PAGE_SIZE ) ;
}
static void zswap_update_total_size ( void )
{
struct zswap_pool * pool ;
u64 total = 0 ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( pool , & zswap_pools , list )
total + = zpool_get_total_size ( pool - > zpool ) ;
rcu_read_unlock ( ) ;
zswap_pool_total_size = total ;
}
2013-07-11 03:05:03 +04:00
/*********************************
* zswap entry functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static struct kmem_cache * zswap_entry_cache ;
2014-12-13 03:57:15 +03:00
static int __init zswap_entry_cache_create ( void )
2013-07-11 03:05:03 +04:00
{
zswap_entry_cache = KMEM_CACHE ( zswap_entry , 0 ) ;
2014-04-08 02:38:28 +04:00
return zswap_entry_cache = = NULL ;
2013-07-11 03:05:03 +04:00
}
2014-08-09 01:19:35 +04:00
static void __init zswap_entry_cache_destroy ( void )
2013-07-11 03:05:03 +04:00
{
kmem_cache_destroy ( zswap_entry_cache ) ;
}
static struct zswap_entry * zswap_entry_cache_alloc ( gfp_t gfp )
{
struct zswap_entry * entry ;
entry = kmem_cache_alloc ( zswap_entry_cache , gfp ) ;
if ( ! entry )
return NULL ;
entry - > refcount = 1 ;
2013-11-13 03:08:27 +04:00
RB_CLEAR_NODE ( & entry - > rbnode ) ;
2013-07-11 03:05:03 +04:00
return entry ;
}
static void zswap_entry_cache_free ( struct zswap_entry * entry )
{
kmem_cache_free ( zswap_entry_cache , entry ) ;
}
/*********************************
* rbtree functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static struct zswap_entry * zswap_rb_search ( struct rb_root * root , pgoff_t offset )
{
struct rb_node * node = root - > rb_node ;
struct zswap_entry * entry ;
while ( node ) {
entry = rb_entry ( node , struct zswap_entry , rbnode ) ;
if ( entry - > offset > offset )
node = node - > rb_left ;
else if ( entry - > offset < offset )
node = node - > rb_right ;
else
return entry ;
}
return NULL ;
}
/*
* In the case that a entry with the same offset is found , a pointer to
* the existing entry is stored in dupentry and the function returns - EEXIST
*/
static int zswap_rb_insert ( struct rb_root * root , struct zswap_entry * entry ,
struct zswap_entry * * dupentry )
{
struct rb_node * * link = & root - > rb_node , * parent = NULL ;
struct zswap_entry * myentry ;
while ( * link ) {
parent = * link ;
myentry = rb_entry ( parent , struct zswap_entry , rbnode ) ;
if ( myentry - > offset > entry - > offset )
link = & ( * link ) - > rb_left ;
else if ( myentry - > offset < entry - > offset )
link = & ( * link ) - > rb_right ;
else {
* dupentry = myentry ;
return - EEXIST ;
}
}
rb_link_node ( & entry - > rbnode , parent , link ) ;
rb_insert_color ( & entry - > rbnode , root ) ;
return 0 ;
}
2013-11-13 03:08:27 +04:00
static void zswap_rb_erase ( struct rb_root * root , struct zswap_entry * entry )
{
if ( ! RB_EMPTY_NODE ( & entry - > rbnode ) ) {
rb_erase ( & entry - > rbnode , root ) ;
RB_CLEAR_NODE ( & entry - > rbnode ) ;
}
}
/*
2014-08-07 03:08:40 +04:00
* Carries out the common pattern of freeing and entry ' s zpool allocation ,
2013-11-13 03:08:27 +04:00
* freeing the entry itself , and decrementing the number of stored pages .
*/
2014-04-08 02:38:27 +04:00
static void zswap_free_entry ( struct zswap_entry * entry )
2013-11-13 03:08:27 +04:00
{
2015-09-10 01:35:19 +03:00
zpool_free ( entry - > pool - > zpool , entry - > handle ) ;
zswap_pool_put ( entry - > pool ) ;
2013-11-13 03:08:27 +04:00
zswap_entry_cache_free ( entry ) ;
atomic_dec ( & zswap_stored_pages ) ;
2015-09-10 01:35:19 +03:00
zswap_update_total_size ( ) ;
2013-11-13 03:08:27 +04:00
}
/* caller must hold the tree lock */
static void zswap_entry_get ( struct zswap_entry * entry )
{
entry - > refcount + + ;
}
/* caller must hold the tree lock
* remove from the tree and free it , if nobody reference the entry
*/
static void zswap_entry_put ( struct zswap_tree * tree ,
struct zswap_entry * entry )
{
int refcount = - - entry - > refcount ;
BUG_ON ( refcount < 0 ) ;
if ( refcount = = 0 ) {
zswap_rb_erase ( & tree - > rbroot , entry ) ;
2014-04-08 02:38:27 +04:00
zswap_free_entry ( entry ) ;
2013-11-13 03:08:27 +04:00
}
}
/* caller must hold the tree lock */
static struct zswap_entry * zswap_entry_find_get ( struct rb_root * root ,
pgoff_t offset )
{
2015-11-07 03:29:09 +03:00
struct zswap_entry * entry ;
2013-11-13 03:08:27 +04:00
entry = zswap_rb_search ( root , offset ) ;
if ( entry )
zswap_entry_get ( entry ) ;
return entry ;
}
2013-07-11 03:05:03 +04:00
/*********************************
* per - cpu code
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static DEFINE_PER_CPU ( u8 * , zswap_dstmem ) ;
2015-09-10 01:35:19 +03:00
static int __zswap_cpu_dstmem_notifier ( unsigned long action , unsigned long cpu )
2013-07-11 03:05:03 +04:00
{
u8 * dst ;
switch ( action ) {
case CPU_UP_PREPARE :
2014-06-05 03:11:11 +04:00
dst = kmalloc_node ( PAGE_SIZE * 2 , GFP_KERNEL , cpu_to_node ( cpu ) ) ;
2013-07-11 03:05:03 +04:00
if ( ! dst ) {
pr_err ( " can't allocate compressor buffer \n " ) ;
return NOTIFY_BAD ;
}
per_cpu ( zswap_dstmem , cpu ) = dst ;
break ;
case CPU_DEAD :
case CPU_UP_CANCELED :
dst = per_cpu ( zswap_dstmem , cpu ) ;
kfree ( dst ) ;
per_cpu ( zswap_dstmem , cpu ) = NULL ;
break ;
default :
break ;
}
return NOTIFY_OK ;
}
2015-09-10 01:35:19 +03:00
static int zswap_cpu_dstmem_notifier ( struct notifier_block * nb ,
unsigned long action , void * pcpu )
2013-07-11 03:05:03 +04:00
{
2015-09-10 01:35:19 +03:00
return __zswap_cpu_dstmem_notifier ( action , ( unsigned long ) pcpu ) ;
2013-07-11 03:05:03 +04:00
}
2015-09-10 01:35:19 +03:00
static struct notifier_block zswap_dstmem_notifier = {
. notifier_call = zswap_cpu_dstmem_notifier ,
2013-07-11 03:05:03 +04:00
} ;
2015-09-10 01:35:19 +03:00
static int __init zswap_cpu_dstmem_init ( void )
{
unsigned long cpu ;
cpu_notifier_register_begin ( ) ;
for_each_online_cpu ( cpu )
if ( __zswap_cpu_dstmem_notifier ( CPU_UP_PREPARE , cpu ) = =
NOTIFY_BAD )
goto cleanup ;
__register_cpu_notifier ( & zswap_dstmem_notifier ) ;
cpu_notifier_register_done ( ) ;
return 0 ;
cleanup :
for_each_online_cpu ( cpu )
__zswap_cpu_dstmem_notifier ( CPU_UP_CANCELED , cpu ) ;
cpu_notifier_register_done ( ) ;
return - ENOMEM ;
}
static void zswap_cpu_dstmem_destroy ( void )
{
unsigned long cpu ;
cpu_notifier_register_begin ( ) ;
for_each_online_cpu ( cpu )
__zswap_cpu_dstmem_notifier ( CPU_UP_CANCELED , cpu ) ;
__unregister_cpu_notifier ( & zswap_dstmem_notifier ) ;
cpu_notifier_register_done ( ) ;
}
static int __zswap_cpu_comp_notifier ( struct zswap_pool * pool ,
unsigned long action , unsigned long cpu )
{
struct crypto_comp * tfm ;
switch ( action ) {
case CPU_UP_PREPARE :
if ( WARN_ON ( * per_cpu_ptr ( pool - > tfm , cpu ) ) )
break ;
tfm = crypto_alloc_comp ( pool - > tfm_name , 0 , 0 ) ;
if ( IS_ERR_OR_NULL ( tfm ) ) {
pr_err ( " could not alloc crypto comp %s : %ld \n " ,
pool - > tfm_name , PTR_ERR ( tfm ) ) ;
return NOTIFY_BAD ;
}
* per_cpu_ptr ( pool - > tfm , cpu ) = tfm ;
break ;
case CPU_DEAD :
case CPU_UP_CANCELED :
tfm = * per_cpu_ptr ( pool - > tfm , cpu ) ;
if ( ! IS_ERR_OR_NULL ( tfm ) )
crypto_free_comp ( tfm ) ;
* per_cpu_ptr ( pool - > tfm , cpu ) = NULL ;
break ;
default :
break ;
}
return NOTIFY_OK ;
}
static int zswap_cpu_comp_notifier ( struct notifier_block * nb ,
unsigned long action , void * pcpu )
{
unsigned long cpu = ( unsigned long ) pcpu ;
struct zswap_pool * pool = container_of ( nb , typeof ( * pool ) , notifier ) ;
return __zswap_cpu_comp_notifier ( pool , action , cpu ) ;
}
static int zswap_cpu_comp_init ( struct zswap_pool * pool )
2013-07-11 03:05:03 +04:00
{
unsigned long cpu ;
2015-09-10 01:35:19 +03:00
memset ( & pool - > notifier , 0 , sizeof ( pool - > notifier ) ) ;
pool - > notifier . notifier_call = zswap_cpu_comp_notifier ;
2014-03-11 00:42:40 +04:00
cpu_notifier_register_begin ( ) ;
2013-07-11 03:05:03 +04:00
for_each_online_cpu ( cpu )
2015-09-10 01:35:19 +03:00
if ( __zswap_cpu_comp_notifier ( pool , CPU_UP_PREPARE , cpu ) = =
NOTIFY_BAD )
2013-07-11 03:05:03 +04:00
goto cleanup ;
2015-09-10 01:35:19 +03:00
__register_cpu_notifier ( & pool - > notifier ) ;
2014-03-11 00:42:40 +04:00
cpu_notifier_register_done ( ) ;
2013-07-11 03:05:03 +04:00
return 0 ;
cleanup :
for_each_online_cpu ( cpu )
2015-09-10 01:35:19 +03:00
__zswap_cpu_comp_notifier ( pool , CPU_UP_CANCELED , cpu ) ;
2014-03-11 00:42:40 +04:00
cpu_notifier_register_done ( ) ;
2013-07-11 03:05:03 +04:00
return - ENOMEM ;
}
2015-09-10 01:35:19 +03:00
static void zswap_cpu_comp_destroy ( struct zswap_pool * pool )
{
unsigned long cpu ;
cpu_notifier_register_begin ( ) ;
for_each_online_cpu ( cpu )
__zswap_cpu_comp_notifier ( pool , CPU_UP_CANCELED , cpu ) ;
__unregister_cpu_notifier ( & pool - > notifier ) ;
cpu_notifier_register_done ( ) ;
}
2013-07-11 03:05:03 +04:00
/*********************************
2015-09-10 01:35:19 +03:00
* pool functions
2013-07-11 03:05:03 +04:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-09-10 01:35:19 +03:00
static struct zswap_pool * __zswap_pool_current ( void )
2013-07-11 03:05:03 +04:00
{
2015-09-10 01:35:19 +03:00
struct zswap_pool * pool ;
pool = list_first_or_null_rcu ( & zswap_pools , typeof ( * pool ) , list ) ;
WARN_ON ( ! pool ) ;
return pool ;
}
static struct zswap_pool * zswap_pool_current ( void )
{
assert_spin_locked ( & zswap_pools_lock ) ;
return __zswap_pool_current ( ) ;
}
static struct zswap_pool * zswap_pool_current_get ( void )
{
struct zswap_pool * pool ;
rcu_read_lock ( ) ;
pool = __zswap_pool_current ( ) ;
if ( ! pool | | ! zswap_pool_get ( pool ) )
pool = NULL ;
rcu_read_unlock ( ) ;
return pool ;
}
static struct zswap_pool * zswap_pool_last_get ( void )
{
struct zswap_pool * pool , * last = NULL ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( pool , & zswap_pools , list )
last = pool ;
if ( ! WARN_ON ( ! last ) & & ! zswap_pool_get ( last ) )
last = NULL ;
rcu_read_unlock ( ) ;
return last ;
}
2015-12-19 01:22:04 +03:00
/* type and compressor must be null-terminated */
2015-09-10 01:35:19 +03:00
static struct zswap_pool * zswap_pool_find_get ( char * type , char * compressor )
{
struct zswap_pool * pool ;
assert_spin_locked ( & zswap_pools_lock ) ;
list_for_each_entry_rcu ( pool , & zswap_pools , list ) {
2015-12-19 01:22:04 +03:00
if ( strcmp ( pool - > tfm_name , compressor ) )
2015-09-10 01:35:19 +03:00
continue ;
2015-12-19 01:22:04 +03:00
if ( strcmp ( zpool_get_type ( pool - > zpool ) , type ) )
2015-09-10 01:35:19 +03:00
continue ;
/* if we can't get it, it's about to be destroyed */
if ( ! zswap_pool_get ( pool ) )
continue ;
return pool ;
}
return NULL ;
}
static struct zswap_pool * zswap_pool_create ( char * type , char * compressor )
{
struct zswap_pool * pool ;
2015-11-07 03:28:21 +03:00
gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM ;
2015-09-10 01:35:19 +03:00
pool = kzalloc ( sizeof ( * pool ) , GFP_KERNEL ) ;
if ( ! pool ) {
pr_err ( " pool alloc failed \n " ) ;
return NULL ;
}
pool - > zpool = zpool_create_pool ( type , " zswap " , gfp , & zswap_zpool_ops ) ;
if ( ! pool - > zpool ) {
pr_err ( " %s zpool not available \n " , type ) ;
goto error ;
}
pr_debug ( " using %s zpool \n " , zpool_get_type ( pool - > zpool ) ) ;
strlcpy ( pool - > tfm_name , compressor , sizeof ( pool - > tfm_name ) ) ;
pool - > tfm = alloc_percpu ( struct crypto_comp * ) ;
if ( ! pool - > tfm ) {
pr_err ( " percpu alloc failed \n " ) ;
goto error ;
}
if ( zswap_cpu_comp_init ( pool ) )
goto error ;
pr_debug ( " using %s compressor \n " , pool - > tfm_name ) ;
/* being the current pool takes 1 ref; this func expects the
* caller to always add the new pool as the current pool
*/
kref_init ( & pool - > kref ) ;
INIT_LIST_HEAD ( & pool - > list ) ;
zswap_pool_debug ( " created " , pool ) ;
return pool ;
error :
free_percpu ( pool - > tfm ) ;
if ( pool - > zpool )
zpool_destroy_pool ( pool - > zpool ) ;
kfree ( pool ) ;
return NULL ;
}
2015-11-07 03:29:15 +03:00
static __init struct zswap_pool * __zswap_pool_create_fallback ( void )
2015-09-10 01:35:19 +03:00
{
if ( ! crypto_has_comp ( zswap_compressor , 0 , 0 ) ) {
2015-11-07 03:29:15 +03:00
if ( ! strcmp ( zswap_compressor , ZSWAP_COMPRESSOR_DEFAULT ) ) {
pr_err ( " default compressor %s not available \n " ,
zswap_compressor ) ;
return NULL ;
}
2015-09-10 01:35:19 +03:00
pr_err ( " compressor %s not available, using default %s \n " ,
zswap_compressor , ZSWAP_COMPRESSOR_DEFAULT ) ;
2015-11-07 03:29:15 +03:00
param_free_charp ( & zswap_compressor ) ;
zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT ;
2015-09-10 01:35:19 +03:00
}
if ( ! zpool_has_pool ( zswap_zpool_type ) ) {
2015-11-07 03:29:15 +03:00
if ( ! strcmp ( zswap_zpool_type , ZSWAP_ZPOOL_DEFAULT ) ) {
pr_err ( " default zpool %s not available \n " ,
zswap_zpool_type ) ;
return NULL ;
}
2015-09-10 01:35:19 +03:00
pr_err ( " zpool %s not available, using default %s \n " ,
zswap_zpool_type , ZSWAP_ZPOOL_DEFAULT ) ;
2015-11-07 03:29:15 +03:00
param_free_charp ( & zswap_zpool_type ) ;
zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT ;
2015-09-10 01:35:19 +03:00
}
return zswap_pool_create ( zswap_zpool_type , zswap_compressor ) ;
}
static void zswap_pool_destroy ( struct zswap_pool * pool )
{
zswap_pool_debug ( " destroying " , pool ) ;
zswap_cpu_comp_destroy ( pool ) ;
free_percpu ( pool - > tfm ) ;
zpool_destroy_pool ( pool - > zpool ) ;
kfree ( pool ) ;
}
static int __must_check zswap_pool_get ( struct zswap_pool * pool )
{
return kref_get_unless_zero ( & pool - > kref ) ;
}
static void __zswap_pool_release ( struct rcu_head * head )
{
struct zswap_pool * pool = container_of ( head , typeof ( * pool ) , rcu_head ) ;
/* nobody should have been able to get a kref... */
WARN_ON ( kref_get_unless_zero ( & pool - > kref ) ) ;
/* pool is now off zswap_pools list and has no references. */
zswap_pool_destroy ( pool ) ;
}
static void __zswap_pool_empty ( struct kref * kref )
{
struct zswap_pool * pool ;
pool = container_of ( kref , typeof ( * pool ) , kref ) ;
spin_lock ( & zswap_pools_lock ) ;
WARN_ON ( pool = = zswap_pool_current ( ) ) ;
list_del_rcu ( & pool - > list ) ;
call_rcu ( & pool - > rcu_head , __zswap_pool_release ) ;
spin_unlock ( & zswap_pools_lock ) ;
}
static void zswap_pool_put ( struct zswap_pool * pool )
{
kref_put ( & pool - > kref , __zswap_pool_empty ) ;
2013-07-11 03:05:03 +04:00
}
2015-09-10 01:35:21 +03:00
/*********************************
* param callbacks
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-11-07 03:29:15 +03:00
/* val must be a null-terminated string */
2015-09-10 01:35:21 +03:00
static int __zswap_param_set ( const char * val , const struct kernel_param * kp ,
char * type , char * compressor )
{
struct zswap_pool * pool , * put_pool = NULL ;
2015-11-07 03:29:15 +03:00
char * s = strstrip ( ( char * ) val ) ;
2015-09-10 01:35:21 +03:00
int ret ;
2015-11-07 03:29:15 +03:00
/* no change required */
if ( ! strcmp ( s , * ( char * * ) kp - > arg ) )
return 0 ;
2015-09-10 01:35:21 +03:00
/* if this is load-time (pre-init) param setting,
* don ' t create a pool ; that ' s done during init .
*/
if ( ! zswap_init_started )
2015-11-07 03:29:15 +03:00
return param_set_charp ( s , kp ) ;
2015-09-10 01:35:21 +03:00
if ( ! type ) {
2015-11-07 03:29:15 +03:00
if ( ! zpool_has_pool ( s ) ) {
pr_err ( " zpool %s not available \n " , s ) ;
2015-09-10 01:35:21 +03:00
return - ENOENT ;
}
2015-11-07 03:29:15 +03:00
type = s ;
2015-09-10 01:35:21 +03:00
} else if ( ! compressor ) {
2015-11-07 03:29:15 +03:00
if ( ! crypto_has_comp ( s , 0 , 0 ) ) {
pr_err ( " compressor %s not available \n " , s ) ;
2015-09-10 01:35:21 +03:00
return - ENOENT ;
}
2015-11-07 03:29:15 +03:00
compressor = s ;
} else {
WARN_ON ( 1 ) ;
return - EINVAL ;
2015-09-10 01:35:21 +03:00
}
spin_lock ( & zswap_pools_lock ) ;
pool = zswap_pool_find_get ( type , compressor ) ;
if ( pool ) {
zswap_pool_debug ( " using existing " , pool ) ;
list_del_rcu ( & pool - > list ) ;
} else {
spin_unlock ( & zswap_pools_lock ) ;
pool = zswap_pool_create ( type , compressor ) ;
spin_lock ( & zswap_pools_lock ) ;
}
if ( pool )
2015-11-07 03:29:15 +03:00
ret = param_set_charp ( s , kp ) ;
2015-09-10 01:35:21 +03:00
else
ret = - EINVAL ;
if ( ! ret ) {
put_pool = zswap_pool_current ( ) ;
list_add_rcu ( & pool - > list , & zswap_pools ) ;
} else if ( pool ) {
/* add the possibly pre-existing pool to the end of the pools
* list ; if it ' s new ( and empty ) then it ' ll be removed and
* destroyed by the put after we drop the lock
*/
list_add_tail_rcu ( & pool - > list , & zswap_pools ) ;
put_pool = pool ;
}
spin_unlock ( & zswap_pools_lock ) ;
/* drop the ref from either the old current pool,
* or the new pool we failed to add
*/
if ( put_pool )
zswap_pool_put ( put_pool ) ;
return ret ;
}
static int zswap_compressor_param_set ( const char * val ,
const struct kernel_param * kp )
{
return __zswap_param_set ( val , kp , zswap_zpool_type , NULL ) ;
}
static int zswap_zpool_param_set ( const char * val ,
const struct kernel_param * kp )
{
return __zswap_param_set ( val , kp , NULL , zswap_compressor ) ;
}
2013-07-11 03:05:03 +04:00
/*********************************
* writeback code
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* return enum for zswap_get_swap_cache_page */
enum zswap_get_swap_ret {
ZSWAP_SWAPCACHE_NEW ,
ZSWAP_SWAPCACHE_EXIST ,
2013-11-13 03:08:26 +04:00
ZSWAP_SWAPCACHE_FAIL ,
2013-07-11 03:05:03 +04:00
} ;
/*
* zswap_get_swap_cache_page
*
* This is an adaption of read_swap_cache_async ( )
*
* This function tries to find a page with the given swap entry
* in the swapper_space address space ( the swap cache ) . If the page
* is found , it is returned in retpage . Otherwise , a page is allocated ,
* added to the swap cache , and returned in retpage .
*
* If success , the swap cache page is returned in retpage
2013-11-13 03:08:26 +04:00
* Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
* Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated ,
* the new page is added to swapcache and locked
* Returns ZSWAP_SWAPCACHE_FAIL on error
2013-07-11 03:05:03 +04:00
*/
static int zswap_get_swap_cache_page ( swp_entry_t entry ,
struct page * * retpage )
{
2015-09-09 01:05:00 +03:00
bool page_was_allocated ;
2013-07-11 03:05:03 +04:00
2015-09-09 01:05:00 +03:00
* retpage = __read_swap_cache_async ( entry , GFP_KERNEL ,
NULL , 0 , & page_was_allocated ) ;
if ( page_was_allocated )
return ZSWAP_SWAPCACHE_NEW ;
if ( ! * retpage )
2013-11-13 03:08:26 +04:00
return ZSWAP_SWAPCACHE_FAIL ;
2013-07-11 03:05:03 +04:00
return ZSWAP_SWAPCACHE_EXIST ;
}
/*
* Attempts to free an entry by adding a page to the swap cache ,
* decompressing the entry data into the page , and issuing a
* bio write to write the page back to the swap device .
*
* This can be thought of as a " resumed writeback " of the page
* to the swap device . We are basically resuming the same swap
* writeback path that was intercepted with the frontswap_store ( )
* in the first place . After the page has been decompressed into
* the swap cache , the compressed version stored by zswap can be
* freed .
*/
2014-08-07 03:08:40 +04:00
static int zswap_writeback_entry ( struct zpool * pool , unsigned long handle )
2013-07-11 03:05:03 +04:00
{
struct zswap_header * zhdr ;
swp_entry_t swpentry ;
struct zswap_tree * tree ;
pgoff_t offset ;
struct zswap_entry * entry ;
struct page * page ;
2015-09-10 01:35:19 +03:00
struct crypto_comp * tfm ;
2013-07-11 03:05:03 +04:00
u8 * src , * dst ;
unsigned int dlen ;
2013-11-13 03:08:27 +04:00
int ret ;
2013-07-11 03:05:03 +04:00
struct writeback_control wbc = {
. sync_mode = WB_SYNC_NONE ,
} ;
/* extract swpentry from data */
2014-08-07 03:08:40 +04:00
zhdr = zpool_map_handle ( pool , handle , ZPOOL_MM_RO ) ;
2013-07-11 03:05:03 +04:00
swpentry = zhdr - > swpentry ; /* here */
2014-08-07 03:08:40 +04:00
zpool_unmap_handle ( pool , handle ) ;
2013-07-11 03:05:03 +04:00
tree = zswap_trees [ swp_type ( swpentry ) ] ;
offset = swp_offset ( swpentry ) ;
/* find and ref zswap entry */
spin_lock ( & tree - > lock ) ;
2013-11-13 03:08:27 +04:00
entry = zswap_entry_find_get ( & tree - > rbroot , offset ) ;
2013-07-11 03:05:03 +04:00
if ( ! entry ) {
/* entry was invalidated */
spin_unlock ( & tree - > lock ) ;
return 0 ;
}
spin_unlock ( & tree - > lock ) ;
BUG_ON ( offset ! = entry - > offset ) ;
/* try to allocate swap cache page */
switch ( zswap_get_swap_cache_page ( swpentry , & page ) ) {
2013-11-13 03:08:26 +04:00
case ZSWAP_SWAPCACHE_FAIL : /* no memory or invalidate happened */
2013-07-11 03:05:03 +04:00
ret = - ENOMEM ;
goto fail ;
2013-11-13 03:08:26 +04:00
case ZSWAP_SWAPCACHE_EXIST :
2013-07-11 03:05:03 +04:00
/* page is already in the swap cache, ignore for now */
page_cache_release ( page ) ;
ret = - EEXIST ;
goto fail ;
case ZSWAP_SWAPCACHE_NEW : /* page is locked */
/* decompress */
dlen = PAGE_SIZE ;
2015-09-10 01:35:19 +03:00
src = ( u8 * ) zpool_map_handle ( entry - > pool - > zpool , entry - > handle ,
2014-08-07 03:08:40 +04:00
ZPOOL_MM_RO ) + sizeof ( struct zswap_header ) ;
2013-07-11 03:05:03 +04:00
dst = kmap_atomic ( page ) ;
2015-09-10 01:35:19 +03:00
tfm = * get_cpu_ptr ( entry - > pool - > tfm ) ;
ret = crypto_comp_decompress ( tfm , src , entry - > length ,
dst , & dlen ) ;
put_cpu_ptr ( entry - > pool - > tfm ) ;
2013-07-11 03:05:03 +04:00
kunmap_atomic ( dst ) ;
2015-09-10 01:35:19 +03:00
zpool_unmap_handle ( entry - > pool - > zpool , entry - > handle ) ;
2013-07-11 03:05:03 +04:00
BUG_ON ( ret ) ;
BUG_ON ( dlen ! = PAGE_SIZE ) ;
/* page is up to date */
SetPageUptodate ( page ) ;
}
2013-11-13 03:07:52 +04:00
/* move it to the tail of the inactive list after end_writeback */
SetPageReclaim ( page ) ;
2013-07-11 03:05:03 +04:00
/* start writeback */
__swap_writepage ( page , & wbc , end_swap_bio_write ) ;
page_cache_release ( page ) ;
zswap_written_back_pages + + ;
spin_lock ( & tree - > lock ) ;
/* drop local reference */
2013-11-13 03:08:27 +04:00
zswap_entry_put ( tree , entry ) ;
2013-07-11 03:05:03 +04:00
/*
2013-11-13 03:08:27 +04:00
* There are two possible situations for entry here :
* ( 1 ) refcount is 1 ( normal case ) , entry is valid and on the tree
* ( 2 ) refcount is 0 , entry is freed and not on the tree
* because invalidate happened during writeback
* search the tree and free the entry if find entry
*/
if ( entry = = zswap_rb_search ( & tree - > rbroot , offset ) )
zswap_entry_put ( tree , entry ) ;
2013-07-11 03:05:03 +04:00
spin_unlock ( & tree - > lock ) ;
2013-11-13 03:08:27 +04:00
goto end ;
/*
* if we get here due to ZSWAP_SWAPCACHE_EXIST
* a load may happening concurrently
* it is safe and okay to not free the entry
* if we free the entry in the following put
* it it either okay to return ! 0
*/
2013-07-11 03:05:03 +04:00
fail :
spin_lock ( & tree - > lock ) ;
2013-11-13 03:08:27 +04:00
zswap_entry_put ( tree , entry ) ;
2013-07-11 03:05:03 +04:00
spin_unlock ( & tree - > lock ) ;
2013-11-13 03:08:27 +04:00
end :
2013-07-11 03:05:03 +04:00
return ret ;
}
2015-09-10 01:35:19 +03:00
static int zswap_shrink ( void )
{
struct zswap_pool * pool ;
int ret ;
pool = zswap_pool_last_get ( ) ;
if ( ! pool )
return - ENOENT ;
ret = zpool_shrink ( pool - > zpool , 1 , NULL ) ;
zswap_pool_put ( pool ) ;
return ret ;
}
2013-07-11 03:05:03 +04:00
/*********************************
* frontswap hooks
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* attempts to compress and store an single page */
static int zswap_frontswap_store ( unsigned type , pgoff_t offset ,
struct page * page )
{
struct zswap_tree * tree = zswap_trees [ type ] ;
struct zswap_entry * entry , * dupentry ;
2015-09-10 01:35:19 +03:00
struct crypto_comp * tfm ;
2013-07-11 03:05:03 +04:00
int ret ;
unsigned int dlen = PAGE_SIZE , len ;
unsigned long handle ;
char * buf ;
u8 * src , * dst ;
struct zswap_header * zhdr ;
2015-06-26 01:00:35 +03:00
if ( ! zswap_enabled | | ! tree ) {
2013-07-11 03:05:03 +04:00
ret = - ENODEV ;
goto reject ;
}
/* reclaim space if needed */
if ( zswap_is_full ( ) ) {
zswap_pool_limit_hit + + ;
2015-09-10 01:35:19 +03:00
if ( zswap_shrink ( ) ) {
2013-07-11 03:05:03 +04:00
zswap_reject_reclaim_fail + + ;
ret = - ENOMEM ;
goto reject ;
}
}
/* allocate entry */
entry = zswap_entry_cache_alloc ( GFP_KERNEL ) ;
if ( ! entry ) {
zswap_reject_kmemcache_fail + + ;
ret = - ENOMEM ;
goto reject ;
}
2015-09-10 01:35:19 +03:00
/* if entry is successfully added, it keeps the reference */
entry - > pool = zswap_pool_current_get ( ) ;
if ( ! entry - > pool ) {
ret = - EINVAL ;
goto freepage ;
}
2013-07-11 03:05:03 +04:00
/* compress */
dst = get_cpu_var ( zswap_dstmem ) ;
2015-09-10 01:35:19 +03:00
tfm = * get_cpu_ptr ( entry - > pool - > tfm ) ;
2013-07-11 03:05:03 +04:00
src = kmap_atomic ( page ) ;
2015-09-10 01:35:19 +03:00
ret = crypto_comp_compress ( tfm , src , PAGE_SIZE , dst , & dlen ) ;
2013-07-11 03:05:03 +04:00
kunmap_atomic ( src ) ;
2015-09-10 01:35:19 +03:00
put_cpu_ptr ( entry - > pool - > tfm ) ;
2013-07-11 03:05:03 +04:00
if ( ret ) {
ret = - EINVAL ;
2015-09-10 01:35:19 +03:00
goto put_dstmem ;
2013-07-11 03:05:03 +04:00
}
/* store */
len = dlen + sizeof ( struct zswap_header ) ;
2015-09-10 01:35:19 +03:00
ret = zpool_malloc ( entry - > pool - > zpool , len ,
2015-11-07 03:28:21 +03:00
__GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM ,
& handle ) ;
2013-07-11 03:05:03 +04:00
if ( ret = = - ENOSPC ) {
zswap_reject_compress_poor + + ;
2015-09-10 01:35:19 +03:00
goto put_dstmem ;
2013-07-11 03:05:03 +04:00
}
if ( ret ) {
zswap_reject_alloc_fail + + ;
2015-09-10 01:35:19 +03:00
goto put_dstmem ;
2013-07-11 03:05:03 +04:00
}
2015-09-10 01:35:19 +03:00
zhdr = zpool_map_handle ( entry - > pool - > zpool , handle , ZPOOL_MM_RW ) ;
2013-07-11 03:05:03 +04:00
zhdr - > swpentry = swp_entry ( type , offset ) ;
buf = ( u8 * ) ( zhdr + 1 ) ;
memcpy ( buf , dst , dlen ) ;
2015-09-10 01:35:19 +03:00
zpool_unmap_handle ( entry - > pool - > zpool , handle ) ;
2013-07-11 03:05:03 +04:00
put_cpu_var ( zswap_dstmem ) ;
/* populate entry */
entry - > offset = offset ;
entry - > handle = handle ;
entry - > length = dlen ;
/* map */
spin_lock ( & tree - > lock ) ;
do {
ret = zswap_rb_insert ( & tree - > rbroot , entry , & dupentry ) ;
if ( ret = = - EEXIST ) {
zswap_duplicate_entry + + ;
/* remove from rbtree */
2013-11-13 03:08:27 +04:00
zswap_rb_erase ( & tree - > rbroot , dupentry ) ;
zswap_entry_put ( tree , dupentry ) ;
2013-07-11 03:05:03 +04:00
}
} while ( ret = = - EEXIST ) ;
spin_unlock ( & tree - > lock ) ;
/* update stats */
atomic_inc ( & zswap_stored_pages ) ;
2015-09-10 01:35:19 +03:00
zswap_update_total_size ( ) ;
2013-07-11 03:05:03 +04:00
return 0 ;
2015-09-10 01:35:19 +03:00
put_dstmem :
2013-07-11 03:05:03 +04:00
put_cpu_var ( zswap_dstmem ) ;
2015-09-10 01:35:19 +03:00
zswap_pool_put ( entry - > pool ) ;
freepage :
2013-07-11 03:05:03 +04:00
zswap_entry_cache_free ( entry ) ;
reject :
return ret ;
}
/*
* returns 0 if the page was successfully decompressed
* return - 1 on entry not found or error
*/
static int zswap_frontswap_load ( unsigned type , pgoff_t offset ,
struct page * page )
{
struct zswap_tree * tree = zswap_trees [ type ] ;
struct zswap_entry * entry ;
2015-09-10 01:35:19 +03:00
struct crypto_comp * tfm ;
2013-07-11 03:05:03 +04:00
u8 * src , * dst ;
unsigned int dlen ;
2013-11-13 03:08:27 +04:00
int ret ;
2013-07-11 03:05:03 +04:00
/* find */
spin_lock ( & tree - > lock ) ;
2013-11-13 03:08:27 +04:00
entry = zswap_entry_find_get ( & tree - > rbroot , offset ) ;
2013-07-11 03:05:03 +04:00
if ( ! entry ) {
/* entry was written back */
spin_unlock ( & tree - > lock ) ;
return - 1 ;
}
spin_unlock ( & tree - > lock ) ;
/* decompress */
dlen = PAGE_SIZE ;
2015-09-10 01:35:19 +03:00
src = ( u8 * ) zpool_map_handle ( entry - > pool - > zpool , entry - > handle ,
2014-08-07 03:08:40 +04:00
ZPOOL_MM_RO ) + sizeof ( struct zswap_header ) ;
2013-07-11 03:05:03 +04:00
dst = kmap_atomic ( page ) ;
2015-09-10 01:35:19 +03:00
tfm = * get_cpu_ptr ( entry - > pool - > tfm ) ;
ret = crypto_comp_decompress ( tfm , src , entry - > length , dst , & dlen ) ;
put_cpu_ptr ( entry - > pool - > tfm ) ;
2013-07-11 03:05:03 +04:00
kunmap_atomic ( dst ) ;
2015-09-10 01:35:19 +03:00
zpool_unmap_handle ( entry - > pool - > zpool , entry - > handle ) ;
2013-07-11 03:05:03 +04:00
BUG_ON ( ret ) ;
spin_lock ( & tree - > lock ) ;
2013-11-13 03:08:27 +04:00
zswap_entry_put ( tree , entry ) ;
2013-07-11 03:05:03 +04:00
spin_unlock ( & tree - > lock ) ;
return 0 ;
}
/* frees an entry in zswap */
static void zswap_frontswap_invalidate_page ( unsigned type , pgoff_t offset )
{
struct zswap_tree * tree = zswap_trees [ type ] ;
struct zswap_entry * entry ;
/* find */
spin_lock ( & tree - > lock ) ;
entry = zswap_rb_search ( & tree - > rbroot , offset ) ;
if ( ! entry ) {
/* entry was written back */
spin_unlock ( & tree - > lock ) ;
return ;
}
/* remove from rbtree */
2013-11-13 03:08:27 +04:00
zswap_rb_erase ( & tree - > rbroot , entry ) ;
2013-07-11 03:05:03 +04:00
/* drop the initial reference from entry creation */
2013-11-13 03:08:27 +04:00
zswap_entry_put ( tree , entry ) ;
2013-07-11 03:05:03 +04:00
spin_unlock ( & tree - > lock ) ;
}
/* frees all zswap entries for the given swap type */
static void zswap_frontswap_invalidate_area ( unsigned type )
{
struct zswap_tree * tree = zswap_trees [ type ] ;
2013-09-12 01:25:33 +04:00
struct zswap_entry * entry , * n ;
2013-07-11 03:05:03 +04:00
if ( ! tree )
return ;
/* walk the tree and free everything */
spin_lock ( & tree - > lock ) ;
2013-11-13 03:08:27 +04:00
rbtree_postorder_for_each_entry_safe ( entry , n , & tree - > rbroot , rbnode )
2014-04-08 02:38:27 +04:00
zswap_free_entry ( entry ) ;
2013-07-11 03:05:03 +04:00
tree - > rbroot = RB_ROOT ;
spin_unlock ( & tree - > lock ) ;
2013-10-17 00:46:54 +04:00
kfree ( tree ) ;
zswap_trees [ type ] = NULL ;
2013-07-11 03:05:03 +04:00
}
static void zswap_frontswap_init ( unsigned type )
{
struct zswap_tree * tree ;
tree = kzalloc ( sizeof ( struct zswap_tree ) , GFP_KERNEL ) ;
2014-04-08 02:38:27 +04:00
if ( ! tree ) {
pr_err ( " alloc failed, zswap disabled for swap type %d \n " , type ) ;
return ;
}
2013-07-11 03:05:03 +04:00
tree - > rbroot = RB_ROOT ;
spin_lock_init ( & tree - > lock ) ;
zswap_trees [ type ] = tree ;
}
static struct frontswap_ops zswap_frontswap_ops = {
. store = zswap_frontswap_store ,
. load = zswap_frontswap_load ,
. invalidate_page = zswap_frontswap_invalidate_page ,
. invalidate_area = zswap_frontswap_invalidate_area ,
. init = zswap_frontswap_init
} ;
/*********************************
* debugfs functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# ifdef CONFIG_DEBUG_FS
# include <linux/debugfs.h>
static struct dentry * zswap_debugfs_root ;
static int __init zswap_debugfs_init ( void )
{
if ( ! debugfs_initialized ( ) )
return - ENODEV ;
zswap_debugfs_root = debugfs_create_dir ( " zswap " , NULL ) ;
if ( ! zswap_debugfs_root )
return - ENOMEM ;
debugfs_create_u64 ( " pool_limit_hit " , S_IRUGO ,
zswap_debugfs_root , & zswap_pool_limit_hit ) ;
debugfs_create_u64 ( " reject_reclaim_fail " , S_IRUGO ,
zswap_debugfs_root , & zswap_reject_reclaim_fail ) ;
debugfs_create_u64 ( " reject_alloc_fail " , S_IRUGO ,
zswap_debugfs_root , & zswap_reject_alloc_fail ) ;
debugfs_create_u64 ( " reject_kmemcache_fail " , S_IRUGO ,
zswap_debugfs_root , & zswap_reject_kmemcache_fail ) ;
debugfs_create_u64 ( " reject_compress_poor " , S_IRUGO ,
zswap_debugfs_root , & zswap_reject_compress_poor ) ;
debugfs_create_u64 ( " written_back_pages " , S_IRUGO ,
zswap_debugfs_root , & zswap_written_back_pages ) ;
debugfs_create_u64 ( " duplicate_entry " , S_IRUGO ,
zswap_debugfs_root , & zswap_duplicate_entry ) ;
2014-08-07 03:08:40 +04:00
debugfs_create_u64 ( " pool_total_size " , S_IRUGO ,
zswap_debugfs_root , & zswap_pool_total_size ) ;
2013-07-11 03:05:03 +04:00
debugfs_create_atomic_t ( " stored_pages " , S_IRUGO ,
zswap_debugfs_root , & zswap_stored_pages ) ;
return 0 ;
}
static void __exit zswap_debugfs_exit ( void )
{
debugfs_remove_recursive ( zswap_debugfs_root ) ;
}
# else
static int __init zswap_debugfs_init ( void )
{
return 0 ;
}
static void __exit zswap_debugfs_exit ( void ) { }
# endif
/*********************************
* module init and exit
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static int __init init_zswap ( void )
{
2015-09-10 01:35:19 +03:00
struct zswap_pool * pool ;
2014-04-08 02:38:27 +04:00
2015-09-10 01:35:21 +03:00
zswap_init_started = true ;
2013-07-11 03:05:03 +04:00
if ( zswap_entry_cache_create ( ) ) {
pr_err ( " entry cache creation failed \n " ) ;
2015-09-10 01:35:19 +03:00
goto cache_fail ;
2013-07-11 03:05:03 +04:00
}
2015-09-10 01:35:19 +03:00
if ( zswap_cpu_dstmem_init ( ) ) {
pr_err ( " dstmem alloc failed \n " ) ;
goto dstmem_fail ;
2013-07-11 03:05:03 +04:00
}
2015-09-10 01:35:19 +03:00
pool = __zswap_pool_create_fallback ( ) ;
if ( ! pool ) {
pr_err ( " pool creation failed \n " ) ;
goto pool_fail ;
2013-07-11 03:05:03 +04:00
}
2015-09-10 01:35:19 +03:00
pr_info ( " loaded using pool %s/%s \n " , pool - > tfm_name ,
zpool_get_type ( pool - > zpool ) ) ;
list_add ( & pool - > list , & zswap_pools ) ;
2014-04-08 02:38:27 +04:00
2013-07-11 03:05:03 +04:00
frontswap_register_ops ( & zswap_frontswap_ops ) ;
if ( zswap_debugfs_init ( ) )
pr_warn ( " debugfs initialization failed \n " ) ;
return 0 ;
2015-09-10 01:35:19 +03:00
pool_fail :
zswap_cpu_dstmem_destroy ( ) ;
dstmem_fail :
2014-08-09 01:19:35 +04:00
zswap_entry_cache_destroy ( ) ;
2015-09-10 01:35:19 +03:00
cache_fail :
2013-07-11 03:05:03 +04:00
return - ENOMEM ;
}
/* must be late so crypto has time to come up */
late_initcall ( init_zswap ) ;
MODULE_LICENSE ( " GPL " ) ;
2014-11-13 06:08:46 +03:00
MODULE_AUTHOR ( " Seth Jennings <sjennings@variantweb.net> " ) ;
2013-07-11 03:05:03 +04:00
MODULE_DESCRIPTION ( " Compressed cache for swap pages " ) ;