2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2016-10-21 15:19:48 +03:00
/*
* Synchronous Compression operations
*
* Copyright 2015 LG Electronics Inc .
* Copyright ( c ) 2016 , Intel Corporation
* Author : Giovanni Cabiddu < giovanni . cabiddu @ intel . com >
*/
# include <linux/errno.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/seq_file.h>
# include <linux/slab.h>
# include <linux/string.h>
# include <linux/crypto.h>
2016-12-31 18:56:23 +03:00
# include <linux/compiler.h>
2016-10-21 15:19:48 +03:00
# include <linux/vmalloc.h>
# include <crypto/algapi.h>
# include <linux/cryptouser.h>
# include <net/netlink.h>
# include <linux/scatterlist.h>
# include <crypto/scatterwalk.h>
# include <crypto/internal/acompress.h>
# include <crypto/internal/scompress.h>
# include "internal.h"
2019-03-29 16:09:56 +03:00
struct scomp_scratch {
spinlock_t lock ;
void * src ;
void * dst ;
} ;
static DEFINE_PER_CPU ( struct scomp_scratch , scomp_scratch ) = {
. lock = __SPIN_LOCK_UNLOCKED ( scomp_scratch . lock ) ,
} ;
2016-10-21 15:19:48 +03:00
static const struct crypto_type crypto_scomp_type ;
static int scomp_scratch_users ;
static DEFINE_MUTEX ( scomp_lock ) ;
# ifdef CONFIG_NET
static int crypto_scomp_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
struct crypto_report_comp rscomp ;
2018-11-04 00:56:03 +03:00
memset ( & rscomp , 0 , sizeof ( rscomp ) ) ;
2016-10-21 15:19:48 +03:00
2018-11-04 00:56:03 +03:00
strscpy ( rscomp . type , " scomp " , sizeof ( rscomp . type ) ) ;
2016-10-21 15:19:48 +03:00
2018-11-04 00:56:03 +03:00
return nla_put ( skb , CRYPTOCFGA_REPORT_COMPRESS ,
sizeof ( rscomp ) , & rscomp ) ;
2016-10-21 15:19:48 +03:00
}
# else
static int crypto_scomp_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
return - ENOSYS ;
}
# endif
static void crypto_scomp_show ( struct seq_file * m , struct crypto_alg * alg )
2016-12-31 18:56:23 +03:00
__maybe_unused ;
2016-10-21 15:19:48 +03:00
static void crypto_scomp_show ( struct seq_file * m , struct crypto_alg * alg )
{
seq_puts ( m , " type : scomp \n " ) ;
}
2019-03-29 16:09:56 +03:00
static void crypto_scomp_free_scratches ( void )
2016-10-21 15:19:48 +03:00
{
2019-03-29 16:09:56 +03:00
struct scomp_scratch * scratch ;
2016-10-21 15:19:48 +03:00
int i ;
2019-03-29 16:09:56 +03:00
for_each_possible_cpu ( i ) {
2019-04-12 18:14:15 +03:00
scratch = per_cpu_ptr ( & scomp_scratch , i ) ;
2016-10-21 15:19:48 +03:00
2019-03-29 16:09:56 +03:00
vfree ( scratch - > src ) ;
vfree ( scratch - > dst ) ;
scratch - > src = NULL ;
scratch - > dst = NULL ;
}
2016-10-21 15:19:48 +03:00
}
2019-03-29 16:09:56 +03:00
static int crypto_scomp_alloc_scratches ( void )
2016-10-21 15:19:48 +03:00
{
2019-03-29 16:09:56 +03:00
struct scomp_scratch * scratch ;
2016-10-21 15:19:48 +03:00
int i ;
for_each_possible_cpu ( i ) {
2019-03-29 16:09:56 +03:00
void * mem ;
2016-10-21 15:19:48 +03:00
2019-04-12 18:14:15 +03:00
scratch = per_cpu_ptr ( & scomp_scratch , i ) ;
2016-10-21 15:19:48 +03:00
2019-03-29 16:09:56 +03:00
mem = vmalloc_node ( SCOMP_SCRATCH_SIZE , cpu_to_node ( i ) ) ;
if ( ! mem )
goto error ;
scratch - > src = mem ;
mem = vmalloc_node ( SCOMP_SCRATCH_SIZE , cpu_to_node ( i ) ) ;
if ( ! mem )
goto error ;
scratch - > dst = mem ;
2016-10-21 15:19:48 +03:00
}
return 0 ;
2019-03-29 16:09:56 +03:00
error :
crypto_scomp_free_scratches ( ) ;
return - ENOMEM ;
2016-10-21 15:19:48 +03:00
}
2017-07-21 18:42:38 +03:00
static int crypto_scomp_init_tfm ( struct crypto_tfm * tfm )
{
2019-03-29 16:09:56 +03:00
int ret = 0 ;
2017-07-21 18:42:38 +03:00
mutex_lock ( & scomp_lock ) ;
2019-03-29 16:09:56 +03:00
if ( ! scomp_scratch_users + + )
ret = crypto_scomp_alloc_scratches ( ) ;
2017-07-21 18:42:38 +03:00
mutex_unlock ( & scomp_lock ) ;
return ret ;
}
2016-10-21 15:19:48 +03:00
static int scomp_acomp_comp_decomp ( struct acomp_req * req , int dir )
{
struct crypto_acomp * tfm = crypto_acomp_reqtfm ( req ) ;
void * * tfm_ctx = acomp_tfm_ctx ( tfm ) ;
struct crypto_scomp * scomp = * tfm_ctx ;
void * * ctx = acomp_request_ctx ( req ) ;
2019-03-29 16:09:56 +03:00
struct scomp_scratch * scratch ;
2016-10-21 15:19:48 +03:00
int ret ;
2019-03-29 16:09:56 +03:00
if ( ! req - > src | | ! req - > slen | | req - > slen > SCOMP_SCRATCH_SIZE )
return - EINVAL ;
2016-10-21 15:19:48 +03:00
2019-03-29 16:09:56 +03:00
if ( req - > dst & & ! req - > dlen )
return - EINVAL ;
2016-10-21 15:19:48 +03:00
if ( ! req - > dlen | | req - > dlen > SCOMP_SCRATCH_SIZE )
req - > dlen = SCOMP_SCRATCH_SIZE ;
2019-03-29 16:09:56 +03:00
scratch = raw_cpu_ptr ( & scomp_scratch ) ;
spin_lock ( & scratch - > lock ) ;
scatterwalk_map_and_copy ( scratch - > src , req - > src , 0 , req - > slen , 0 ) ;
2016-10-21 15:19:48 +03:00
if ( dir )
2019-03-29 16:09:56 +03:00
ret = crypto_scomp_compress ( scomp , scratch - > src , req - > slen ,
scratch - > dst , & req - > dlen , * ctx ) ;
2016-10-21 15:19:48 +03:00
else
2019-03-29 16:09:56 +03:00
ret = crypto_scomp_decompress ( scomp , scratch - > src , req - > slen ,
scratch - > dst , & req - > dlen , * ctx ) ;
2016-10-21 15:19:48 +03:00
if ( ! ret ) {
if ( ! req - > dst ) {
2018-01-05 19:26:47 +03:00
req - > dst = sgl_alloc ( req - > dlen , GFP_ATOMIC , NULL ) ;
2019-03-29 16:09:55 +03:00
if ( ! req - > dst ) {
ret = - ENOMEM ;
2016-10-21 15:19:48 +03:00
goto out ;
2019-03-29 16:09:55 +03:00
}
2016-10-21 15:19:48 +03:00
}
2019-03-29 16:09:56 +03:00
scatterwalk_map_and_copy ( scratch - > dst , req - > dst , 0 , req - > dlen ,
2016-10-21 15:19:48 +03:00
1 ) ;
}
out :
2019-03-29 16:09:56 +03:00
spin_unlock ( & scratch - > lock ) ;
2016-10-21 15:19:48 +03:00
return ret ;
}
static int scomp_acomp_compress ( struct acomp_req * req )
{
return scomp_acomp_comp_decomp ( req , 1 ) ;
}
static int scomp_acomp_decompress ( struct acomp_req * req )
{
return scomp_acomp_comp_decomp ( req , 0 ) ;
}
static void crypto_exit_scomp_ops_async ( struct crypto_tfm * tfm )
{
struct crypto_scomp * * ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_scomp ( * ctx ) ;
2017-07-21 18:42:38 +03:00
mutex_lock ( & scomp_lock ) ;
2019-03-29 16:09:56 +03:00
if ( ! - - scomp_scratch_users )
crypto_scomp_free_scratches ( ) ;
2017-07-21 18:42:38 +03:00
mutex_unlock ( & scomp_lock ) ;
2016-10-21 15:19:48 +03:00
}
int crypto_init_scomp_ops_async ( struct crypto_tfm * tfm )
{
struct crypto_alg * calg = tfm - > __crt_alg ;
struct crypto_acomp * crt = __crypto_acomp_tfm ( tfm ) ;
struct crypto_scomp * * ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_scomp * scomp ;
if ( ! crypto_mod_get ( calg ) )
return - EAGAIN ;
scomp = crypto_create_tfm ( calg , & crypto_scomp_type ) ;
if ( IS_ERR ( scomp ) ) {
crypto_mod_put ( calg ) ;
return PTR_ERR ( scomp ) ;
}
* ctx = scomp ;
tfm - > exit = crypto_exit_scomp_ops_async ;
crt - > compress = scomp_acomp_compress ;
crt - > decompress = scomp_acomp_decompress ;
2018-01-05 19:26:47 +03:00
crt - > dst_free = sgl_free ;
2016-10-21 15:19:48 +03:00
crt - > reqsize = sizeof ( void * ) ;
return 0 ;
}
struct acomp_req * crypto_acomp_scomp_alloc_ctx ( struct acomp_req * req )
{
struct crypto_acomp * acomp = crypto_acomp_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_acomp_tfm ( acomp ) ;
struct crypto_scomp * * tfm_ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_scomp * scomp = * tfm_ctx ;
void * ctx ;
ctx = crypto_scomp_alloc_ctx ( scomp ) ;
if ( IS_ERR ( ctx ) ) {
kfree ( req ) ;
return NULL ;
}
* req - > __ctx = ctx ;
return req ;
}
void crypto_acomp_scomp_free_ctx ( struct acomp_req * req )
{
struct crypto_acomp * acomp = crypto_acomp_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_acomp_tfm ( acomp ) ;
struct crypto_scomp * * tfm_ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_scomp * scomp = * tfm_ctx ;
void * ctx = * req - > __ctx ;
if ( ctx )
crypto_scomp_free_ctx ( scomp , ctx ) ;
}
static const struct crypto_type crypto_scomp_type = {
. extsize = crypto_alg_extsize ,
. init_tfm = crypto_scomp_init_tfm ,
# ifdef CONFIG_PROC_FS
. show = crypto_scomp_show ,
# endif
. report = crypto_scomp_report ,
. maskclear = ~ CRYPTO_ALG_TYPE_MASK ,
. maskset = CRYPTO_ALG_TYPE_MASK ,
. type = CRYPTO_ALG_TYPE_SCOMPRESS ,
. tfmsize = offsetof ( struct crypto_scomp , base ) ,
} ;
int crypto_register_scomp ( struct scomp_alg * alg )
{
struct crypto_alg * base = & alg - > base ;
base - > cra_type = & crypto_scomp_type ;
base - > cra_flags & = ~ CRYPTO_ALG_TYPE_MASK ;
base - > cra_flags | = CRYPTO_ALG_TYPE_SCOMPRESS ;
2017-07-21 18:42:38 +03:00
return crypto_register_alg ( base ) ;
2016-10-21 15:19:48 +03:00
}
EXPORT_SYMBOL_GPL ( crypto_register_scomp ) ;
2019-12-16 02:51:19 +03:00
void crypto_unregister_scomp ( struct scomp_alg * alg )
2016-10-21 15:19:48 +03:00
{
2019-12-16 02:51:19 +03:00
crypto_unregister_alg ( & alg - > base ) ;
2016-10-21 15:19:48 +03:00
}
EXPORT_SYMBOL_GPL ( crypto_unregister_scomp ) ;
2017-04-21 23:54:29 +03:00
int crypto_register_scomps ( struct scomp_alg * algs , int count )
{
int i , ret ;
for ( i = 0 ; i < count ; i + + ) {
ret = crypto_register_scomp ( & algs [ i ] ) ;
if ( ret )
goto err ;
}
return 0 ;
err :
for ( - - i ; i > = 0 ; - - i )
crypto_unregister_scomp ( & algs [ i ] ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( crypto_register_scomps ) ;
void crypto_unregister_scomps ( struct scomp_alg * algs , int count )
{
int i ;
for ( i = count - 1 ; i > = 0 ; - - i )
crypto_unregister_scomp ( & algs [ i ] ) ;
}
EXPORT_SYMBOL_GPL ( crypto_unregister_scomps ) ;
2016-10-21 15:19:48 +03:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Synchronous compression type " ) ;