2016-10-21 15:19:48 +03:00
/*
* Synchronous Compression operations
*
* Copyright 2015 LG Electronics Inc .
* Copyright ( c ) 2016 , Intel Corporation
* Author : Giovanni Cabiddu < giovanni . cabiddu @ intel . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
# include <linux/errno.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/seq_file.h>
# include <linux/slab.h>
# include <linux/string.h>
# include <linux/crypto.h>
2016-12-31 18:56:23 +03:00
# include <linux/compiler.h>
2016-10-21 15:19:48 +03:00
# include <linux/vmalloc.h>
# include <crypto/algapi.h>
# include <linux/cryptouser.h>
# include <net/netlink.h>
# include <linux/scatterlist.h>
# include <crypto/scatterwalk.h>
# include <crypto/internal/acompress.h>
# include <crypto/internal/scompress.h>
# include "internal.h"
static const struct crypto_type crypto_scomp_type ;
static void * __percpu * scomp_src_scratches ;
static void * __percpu * scomp_dst_scratches ;
static int scomp_scratch_users ;
static DEFINE_MUTEX ( scomp_lock ) ;
# ifdef CONFIG_NET
static int crypto_scomp_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
struct crypto_report_comp rscomp ;
strncpy ( rscomp . type , " scomp " , sizeof ( rscomp . type ) ) ;
if ( nla_put ( skb , CRYPTOCFGA_REPORT_COMPRESS ,
sizeof ( struct crypto_report_comp ) , & rscomp ) )
goto nla_put_failure ;
return 0 ;
nla_put_failure :
return - EMSGSIZE ;
}
# else
static int crypto_scomp_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
return - ENOSYS ;
}
# endif
static void crypto_scomp_show ( struct seq_file * m , struct crypto_alg * alg )
2016-12-31 18:56:23 +03:00
__maybe_unused ;
2016-10-21 15:19:48 +03:00
static void crypto_scomp_show ( struct seq_file * m , struct crypto_alg * alg )
{
seq_puts ( m , " type : scomp \n " ) ;
}
static void crypto_scomp_free_scratches ( void * __percpu * scratches )
{
int i ;
if ( ! scratches )
return ;
for_each_possible_cpu ( i )
vfree ( * per_cpu_ptr ( scratches , i ) ) ;
free_percpu ( scratches ) ;
}
static void * __percpu * crypto_scomp_alloc_scratches ( void )
{
void * __percpu * scratches ;
int i ;
scratches = alloc_percpu ( void * ) ;
if ( ! scratches )
return NULL ;
for_each_possible_cpu ( i ) {
void * scratch ;
scratch = vmalloc_node ( SCOMP_SCRATCH_SIZE , cpu_to_node ( i ) ) ;
if ( ! scratch )
goto error ;
* per_cpu_ptr ( scratches , i ) = scratch ;
}
return scratches ;
error :
crypto_scomp_free_scratches ( scratches ) ;
return NULL ;
}
static void crypto_scomp_free_all_scratches ( void )
{
if ( ! - - scomp_scratch_users ) {
crypto_scomp_free_scratches ( scomp_src_scratches ) ;
crypto_scomp_free_scratches ( scomp_dst_scratches ) ;
scomp_src_scratches = NULL ;
scomp_dst_scratches = NULL ;
}
}
static int crypto_scomp_alloc_all_scratches ( void )
{
if ( ! scomp_scratch_users + + ) {
scomp_src_scratches = crypto_scomp_alloc_scratches ( ) ;
if ( ! scomp_src_scratches )
return - ENOMEM ;
scomp_dst_scratches = crypto_scomp_alloc_scratches ( ) ;
2017-07-21 18:42:37 +03:00
if ( ! scomp_dst_scratches ) {
crypto_scomp_free_scratches ( scomp_src_scratches ) ;
scomp_src_scratches = NULL ;
2016-10-21 15:19:48 +03:00
return - ENOMEM ;
2017-07-21 18:42:37 +03:00
}
2016-10-21 15:19:48 +03:00
}
return 0 ;
}
2017-07-21 18:42:38 +03:00
static int crypto_scomp_init_tfm ( struct crypto_tfm * tfm )
{
int ret ;
mutex_lock ( & scomp_lock ) ;
ret = crypto_scomp_alloc_all_scratches ( ) ;
mutex_unlock ( & scomp_lock ) ;
return ret ;
}
2016-10-21 15:19:48 +03:00
static int scomp_acomp_comp_decomp ( struct acomp_req * req , int dir )
{
struct crypto_acomp * tfm = crypto_acomp_reqtfm ( req ) ;
void * * tfm_ctx = acomp_tfm_ctx ( tfm ) ;
struct crypto_scomp * scomp = * tfm_ctx ;
void * * ctx = acomp_request_ctx ( req ) ;
const int cpu = get_cpu ( ) ;
u8 * scratch_src = * per_cpu_ptr ( scomp_src_scratches , cpu ) ;
u8 * scratch_dst = * per_cpu_ptr ( scomp_dst_scratches , cpu ) ;
int ret ;
if ( ! req - > src | | ! req - > slen | | req - > slen > SCOMP_SCRATCH_SIZE ) {
ret = - EINVAL ;
goto out ;
}
if ( req - > dst & & ! req - > dlen ) {
ret = - EINVAL ;
goto out ;
}
if ( ! req - > dlen | | req - > dlen > SCOMP_SCRATCH_SIZE )
req - > dlen = SCOMP_SCRATCH_SIZE ;
scatterwalk_map_and_copy ( scratch_src , req - > src , 0 , req - > slen , 0 ) ;
if ( dir )
ret = crypto_scomp_compress ( scomp , scratch_src , req - > slen ,
scratch_dst , & req - > dlen , * ctx ) ;
else
ret = crypto_scomp_decompress ( scomp , scratch_src , req - > slen ,
scratch_dst , & req - > dlen , * ctx ) ;
if ( ! ret ) {
if ( ! req - > dst ) {
2018-01-05 19:26:47 +03:00
req - > dst = sgl_alloc ( req - > dlen , GFP_ATOMIC , NULL ) ;
2016-10-21 15:19:48 +03:00
if ( ! req - > dst )
goto out ;
}
scatterwalk_map_and_copy ( scratch_dst , req - > dst , 0 , req - > dlen ,
1 ) ;
}
out :
put_cpu ( ) ;
return ret ;
}
static int scomp_acomp_compress ( struct acomp_req * req )
{
return scomp_acomp_comp_decomp ( req , 1 ) ;
}
static int scomp_acomp_decompress ( struct acomp_req * req )
{
return scomp_acomp_comp_decomp ( req , 0 ) ;
}
static void crypto_exit_scomp_ops_async ( struct crypto_tfm * tfm )
{
struct crypto_scomp * * ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_scomp ( * ctx ) ;
2017-07-21 18:42:38 +03:00
mutex_lock ( & scomp_lock ) ;
crypto_scomp_free_all_scratches ( ) ;
mutex_unlock ( & scomp_lock ) ;
2016-10-21 15:19:48 +03:00
}
int crypto_init_scomp_ops_async ( struct crypto_tfm * tfm )
{
struct crypto_alg * calg = tfm - > __crt_alg ;
struct crypto_acomp * crt = __crypto_acomp_tfm ( tfm ) ;
struct crypto_scomp * * ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_scomp * scomp ;
if ( ! crypto_mod_get ( calg ) )
return - EAGAIN ;
scomp = crypto_create_tfm ( calg , & crypto_scomp_type ) ;
if ( IS_ERR ( scomp ) ) {
crypto_mod_put ( calg ) ;
return PTR_ERR ( scomp ) ;
}
* ctx = scomp ;
tfm - > exit = crypto_exit_scomp_ops_async ;
crt - > compress = scomp_acomp_compress ;
crt - > decompress = scomp_acomp_decompress ;
2018-01-05 19:26:47 +03:00
crt - > dst_free = sgl_free ;
2016-10-21 15:19:48 +03:00
crt - > reqsize = sizeof ( void * ) ;
return 0 ;
}
struct acomp_req * crypto_acomp_scomp_alloc_ctx ( struct acomp_req * req )
{
struct crypto_acomp * acomp = crypto_acomp_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_acomp_tfm ( acomp ) ;
struct crypto_scomp * * tfm_ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_scomp * scomp = * tfm_ctx ;
void * ctx ;
ctx = crypto_scomp_alloc_ctx ( scomp ) ;
if ( IS_ERR ( ctx ) ) {
kfree ( req ) ;
return NULL ;
}
* req - > __ctx = ctx ;
return req ;
}
void crypto_acomp_scomp_free_ctx ( struct acomp_req * req )
{
struct crypto_acomp * acomp = crypto_acomp_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_acomp_tfm ( acomp ) ;
struct crypto_scomp * * tfm_ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_scomp * scomp = * tfm_ctx ;
void * ctx = * req - > __ctx ;
if ( ctx )
crypto_scomp_free_ctx ( scomp , ctx ) ;
}
static const struct crypto_type crypto_scomp_type = {
. extsize = crypto_alg_extsize ,
. init_tfm = crypto_scomp_init_tfm ,
# ifdef CONFIG_PROC_FS
. show = crypto_scomp_show ,
# endif
. report = crypto_scomp_report ,
. maskclear = ~ CRYPTO_ALG_TYPE_MASK ,
. maskset = CRYPTO_ALG_TYPE_MASK ,
. type = CRYPTO_ALG_TYPE_SCOMPRESS ,
. tfmsize = offsetof ( struct crypto_scomp , base ) ,
} ;
int crypto_register_scomp ( struct scomp_alg * alg )
{
struct crypto_alg * base = & alg - > base ;
base - > cra_type = & crypto_scomp_type ;
base - > cra_flags & = ~ CRYPTO_ALG_TYPE_MASK ;
base - > cra_flags | = CRYPTO_ALG_TYPE_SCOMPRESS ;
2017-07-21 18:42:38 +03:00
return crypto_register_alg ( base ) ;
2016-10-21 15:19:48 +03:00
}
EXPORT_SYMBOL_GPL ( crypto_register_scomp ) ;
int crypto_unregister_scomp ( struct scomp_alg * alg )
{
2017-07-21 18:42:38 +03:00
return crypto_unregister_alg ( & alg - > base ) ;
2016-10-21 15:19:48 +03:00
}
EXPORT_SYMBOL_GPL ( crypto_unregister_scomp ) ;
2017-04-21 23:54:29 +03:00
int crypto_register_scomps ( struct scomp_alg * algs , int count )
{
int i , ret ;
for ( i = 0 ; i < count ; i + + ) {
ret = crypto_register_scomp ( & algs [ i ] ) ;
if ( ret )
goto err ;
}
return 0 ;
err :
for ( - - i ; i > = 0 ; - - i )
crypto_unregister_scomp ( & algs [ i ] ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( crypto_register_scomps ) ;
void crypto_unregister_scomps ( struct scomp_alg * algs , int count )
{
int i ;
for ( i = count - 1 ; i > = 0 ; - - i )
crypto_unregister_scomp ( & algs [ i ] ) ;
}
EXPORT_SYMBOL_GPL ( crypto_unregister_scomps ) ;
2016-10-21 15:19:48 +03:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Synchronous compression type " ) ;