2006-11-26 01:43:10 +03:00
/* LRW: as defined by Cyril Guyot in
* http : //grouper.ieee.org/groups/1619/email/pdf00017.pdf
*
* Copyright ( c ) 2006 Rik Snel < rsnel @ cube . dyndns . org >
*
2011-11-09 07:50:31 +04:00
* Based on ecb . c
2006-11-26 01:43:10 +03:00
* Copyright ( c ) 2006 Herbert Xu < herbert @ gondor . apana . org . au >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*/
/* This implementation is checked against the test vectors in the above
* document and by a test vector provided by Ken Buchanan at
* http : //www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
*
* The test vectors are included in the testing module tcrypt . [ ch ] */
2011-11-09 07:50:31 +04:00
2016-11-22 15:08:16 +03:00
# include <crypto/internal/skcipher.h>
# include <crypto/scatterwalk.h>
2006-11-26 01:43:10 +03:00
# include <linux/err.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/scatterlist.h>
# include <linux/slab.h>
# include <crypto/b128ops.h>
# include <crypto/gf128mul.h>
2011-11-09 07:50:31 +04:00
# include <crypto/lrw.h>
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
# define LRW_BUFFER_SIZE 128u
2011-10-18 14:32:24 +04:00
struct priv {
2016-11-22 15:08:16 +03:00
struct crypto_skcipher * child ;
2011-10-18 14:32:24 +04:00
struct lrw_table_ctx table ;
} ;
2016-11-22 15:08:16 +03:00
struct rctx {
be128 buf [ LRW_BUFFER_SIZE / sizeof ( be128 ) ] ;
be128 t ;
be128 * ext ;
struct scatterlist srcbuf [ 2 ] ;
struct scatterlist dstbuf [ 2 ] ;
struct scatterlist * src ;
struct scatterlist * dst ;
unsigned int left ;
struct skcipher_request subreq ;
} ;
2006-11-26 01:43:10 +03:00
static inline void setbit128_bbe ( void * b , int bit )
{
2009-02-17 15:00:11 +03:00
__set_bit ( bit ^ ( 0x80 -
# ifdef __BIG_ENDIAN
BITS_PER_LONG
# else
BITS_PER_BYTE
# endif
) , b ) ;
2006-11-26 01:43:10 +03:00
}
2011-11-09 07:50:31 +04:00
int lrw_init_table ( struct lrw_table_ctx * ctx , const u8 * tweak )
2006-11-26 01:43:10 +03:00
{
be128 tmp = { 0 } ;
2011-10-18 14:32:24 +04:00
int i ;
2006-11-26 01:43:10 +03:00
if ( ctx - > table )
gf128mul_free_64k ( ctx - > table ) ;
/* initialize multiplication table for Key2 */
2011-10-18 14:32:24 +04:00
ctx - > table = gf128mul_init_64k_bbe ( ( be128 * ) tweak ) ;
2006-11-26 01:43:10 +03:00
if ( ! ctx - > table )
return - ENOMEM ;
/* initialize optimization table */
for ( i = 0 ; i < 128 ; i + + ) {
setbit128_bbe ( & tmp , i ) ;
ctx - > mulinc [ i ] = tmp ;
gf128mul_64k_bbe ( & ctx - > mulinc [ i ] , ctx - > table ) ;
}
return 0 ;
}
2011-11-09 07:50:31 +04:00
EXPORT_SYMBOL_GPL ( lrw_init_table ) ;
2006-11-26 01:43:10 +03:00
2011-11-09 07:50:31 +04:00
void lrw_free_table ( struct lrw_table_ctx * ctx )
2011-10-18 14:32:24 +04:00
{
if ( ctx - > table )
gf128mul_free_64k ( ctx - > table ) ;
}
2011-11-09 07:50:31 +04:00
EXPORT_SYMBOL_GPL ( lrw_free_table ) ;
2011-10-18 14:32:24 +04:00
2016-11-22 15:08:16 +03:00
static int setkey ( struct crypto_skcipher * parent , const u8 * key ,
2011-10-18 14:32:24 +04:00
unsigned int keylen )
{
2016-11-22 15:08:16 +03:00
struct priv * ctx = crypto_skcipher_ctx ( parent ) ;
struct crypto_skcipher * child = ctx - > child ;
2011-10-18 14:32:24 +04:00
int err , bsize = LRW_BLOCK_SIZE ;
const u8 * tweak = key + keylen - bsize ;
2016-11-22 15:08:16 +03:00
crypto_skcipher_clear_flags ( child , CRYPTO_TFM_REQ_MASK ) ;
crypto_skcipher_set_flags ( child , crypto_skcipher_get_flags ( parent ) &
CRYPTO_TFM_REQ_MASK ) ;
err = crypto_skcipher_setkey ( child , key , keylen - bsize ) ;
crypto_skcipher_set_flags ( parent , crypto_skcipher_get_flags ( child ) &
CRYPTO_TFM_RES_MASK ) ;
2011-10-18 14:32:24 +04:00
if ( err )
return err ;
return lrw_init_table ( & ctx - > table , tweak ) ;
}
2006-11-26 01:43:10 +03:00
static inline void inc ( be128 * iv )
{
2008-03-14 11:22:53 +03:00
be64_add_cpu ( & iv - > b , 1 ) ;
if ( ! iv - > b )
be64_add_cpu ( & iv - > a , 1 ) ;
2006-11-26 01:43:10 +03:00
}
/* this returns the number of consequative 1 bits starting
* from the right , get_index128 ( 00 00 00 00 00 00 . . . 00 00 10 FB ) = 2 */
static inline int get_index128 ( be128 * block )
{
int x ;
__be32 * p = ( __be32 * ) block ;
for ( p + = 3 , x = 0 ; x < 128 ; p - - , x + = 32 ) {
u32 val = be32_to_cpup ( p ) ;
if ( ! ~ val )
continue ;
return x + ffz ( val ) ;
}
return x ;
}
2016-11-22 15:08:16 +03:00
static int post_crypt ( struct skcipher_request * req )
2006-11-26 01:43:10 +03:00
{
2016-11-22 15:08:16 +03:00
struct rctx * rctx = skcipher_request_ctx ( req ) ;
be128 * buf = rctx - > ext ? : rctx - > buf ;
struct skcipher_request * subreq ;
const int bs = LRW_BLOCK_SIZE ;
struct skcipher_walk w ;
struct scatterlist * sg ;
unsigned offset ;
2006-11-26 01:43:10 +03:00
int err ;
2016-11-22 15:08:16 +03:00
subreq = & rctx - > subreq ;
err = skcipher_walk_virt ( & w , subreq , false ) ;
while ( w . nbytes ) {
unsigned int avail = w . nbytes ;
be128 * wdst ;
wdst = w . dst . virt . addr ;
do {
be128_xor ( wdst , buf + + , wdst ) ;
wdst + + ;
} while ( ( avail - = bs ) > = bs ) ;
err = skcipher_walk_done ( & w , avail ) ;
}
rctx - > left - = subreq - > cryptlen ;
if ( err | | ! rctx - > left )
goto out ;
rctx - > dst = rctx - > dstbuf ;
scatterwalk_done ( & w . out , 0 , 1 ) ;
sg = w . out . sg ;
offset = w . out . offset ;
if ( rctx - > dst ! = sg ) {
rctx - > dst [ 0 ] = * sg ;
sg_unmark_end ( rctx - > dst ) ;
scatterwalk_crypto_chain ( rctx - > dst , sg_next ( sg ) , 0 , 2 ) ;
}
rctx - > dst [ 0 ] . length - = offset - sg - > offset ;
rctx - > dst [ 0 ] . offset = offset ;
out :
return err ;
}
static int pre_crypt ( struct skcipher_request * req )
{
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct rctx * rctx = skcipher_request_ctx ( req ) ;
struct priv * ctx = crypto_skcipher_ctx ( tfm ) ;
be128 * buf = rctx - > ext ? : rctx - > buf ;
struct skcipher_request * subreq ;
2011-10-18 14:32:19 +04:00
const int bs = LRW_BLOCK_SIZE ;
2016-11-22 15:08:16 +03:00
struct skcipher_walk w ;
struct scatterlist * sg ;
unsigned cryptlen ;
unsigned offset ;
2006-11-26 01:43:10 +03:00
be128 * iv ;
2016-11-22 15:08:16 +03:00
bool more ;
int err ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
subreq = & rctx - > subreq ;
skcipher_request_set_tfm ( subreq , tfm ) ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
cryptlen = subreq - > cryptlen ;
more = rctx - > left > cryptlen ;
if ( ! more )
cryptlen = rctx - > left ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
skcipher_request_set_crypt ( subreq , rctx - > src , rctx - > dst ,
cryptlen , req - > iv ) ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
err = skcipher_walk_virt ( & w , subreq , false ) ;
iv = w . iv ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
while ( w . nbytes ) {
unsigned int avail = w . nbytes ;
be128 * wsrc ;
be128 * wdst ;
wsrc = w . src . virt . addr ;
wdst = w . dst . virt . addr ;
2006-11-26 01:43:10 +03:00
do {
2016-11-22 15:08:16 +03:00
* buf + + = rctx - > t ;
be128_xor ( wdst + + , & rctx - > t , wsrc + + ) ;
2006-11-26 01:43:10 +03:00
/* T <- I*Key2, using the optimization
* discussed in the specification */
2016-11-22 15:08:16 +03:00
be128_xor ( & rctx - > t , & rctx - > t ,
2011-10-18 14:32:24 +04:00
& ctx - > table . mulinc [ get_index128 ( iv ) ] ) ;
2006-11-26 01:43:10 +03:00
inc ( iv ) ;
2016-11-22 15:08:16 +03:00
} while ( ( avail - = bs ) > = bs ) ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
err = skcipher_walk_done ( & w , avail ) ;
}
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
skcipher_request_set_tfm ( subreq , ctx - > child ) ;
skcipher_request_set_crypt ( subreq , rctx - > dst , rctx - > dst ,
cryptlen , NULL ) ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
if ( err | | ! more )
goto out ;
rctx - > src = rctx - > srcbuf ;
scatterwalk_done ( & w . in , 0 , 1 ) ;
sg = w . in . sg ;
offset = w . in . offset ;
if ( rctx - > src ! = sg ) {
rctx - > src [ 0 ] = * sg ;
sg_unmark_end ( rctx - > src ) ;
scatterwalk_crypto_chain ( rctx - > src , sg_next ( sg ) , 0 , 2 ) ;
}
rctx - > src [ 0 ] . length - = offset - sg - > offset ;
rctx - > src [ 0 ] . offset = offset ;
out :
return err ;
}
static int init_crypt ( struct skcipher_request * req , crypto_completion_t done )
{
struct priv * ctx = crypto_skcipher_ctx ( crypto_skcipher_reqtfm ( req ) ) ;
struct rctx * rctx = skcipher_request_ctx ( req ) ;
struct skcipher_request * subreq ;
gfp_t gfp ;
subreq = & rctx - > subreq ;
skcipher_request_set_callback ( subreq , req - > base . flags , done , req ) ;
gfp = req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC ;
rctx - > ext = NULL ;
subreq - > cryptlen = LRW_BUFFER_SIZE ;
if ( req - > cryptlen > LRW_BUFFER_SIZE ) {
2017-03-23 23:39:46 +03:00
unsigned int n = min ( req - > cryptlen , ( unsigned int ) PAGE_SIZE ) ;
rctx - > ext = kmalloc ( n , gfp ) ;
if ( rctx - > ext )
subreq - > cryptlen = n ;
2016-11-22 15:08:16 +03:00
}
rctx - > src = req - > src ;
rctx - > dst = req - > dst ;
rctx - > left = req - > cryptlen ;
/* calculate first value of T */
memcpy ( & rctx - > t , req - > iv , sizeof ( rctx - > t ) ) ;
/* T <- I*Key2 */
gf128mul_64k_bbe ( & rctx - > t , ctx - > table . table ) ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
return 0 ;
}
static void exit_crypt ( struct skcipher_request * req )
{
struct rctx * rctx = skcipher_request_ctx ( req ) ;
rctx - > left = 0 ;
if ( rctx - > ext )
kfree ( rctx - > ext ) ;
}
static int do_encrypt ( struct skcipher_request * req , int err )
{
struct rctx * rctx = skcipher_request_ctx ( req ) ;
struct skcipher_request * subreq ;
subreq = & rctx - > subreq ;
while ( ! err & & rctx - > left ) {
err = pre_crypt ( req ) ? :
crypto_skcipher_encrypt ( subreq ) ? :
post_crypt ( req ) ;
if ( err = = - EINPROGRESS | |
( err = = - EBUSY & &
req - > base . flags & CRYPTO_TFM_REQ_MAY_BACKLOG ) )
return err ;
2006-11-26 01:43:10 +03:00
}
2016-11-22 15:08:16 +03:00
exit_crypt ( req ) ;
2006-11-26 01:43:10 +03:00
return err ;
}
2016-11-22 15:08:16 +03:00
static void encrypt_done ( struct crypto_async_request * areq , int err )
{
struct skcipher_request * req = areq - > data ;
struct skcipher_request * subreq ;
struct rctx * rctx ;
rctx = skcipher_request_ctx ( req ) ;
2017-04-10 12:15:48 +03:00
if ( err = = - EINPROGRESS ) {
if ( rctx - > left ! = req - > cryptlen )
return ;
goto out ;
}
2016-11-22 15:08:16 +03:00
subreq = & rctx - > subreq ;
subreq - > base . flags & = CRYPTO_TFM_REQ_MAY_BACKLOG ;
err = do_encrypt ( req , err ? : post_crypt ( req ) ) ;
if ( rctx - > left )
return ;
2017-04-10 12:15:48 +03:00
out :
2016-11-22 15:08:16 +03:00
skcipher_request_complete ( req , err ) ;
}
static int encrypt ( struct skcipher_request * req )
{
return do_encrypt ( req , init_crypt ( req , encrypt_done ) ) ;
}
static int do_decrypt ( struct skcipher_request * req , int err )
2006-11-26 01:43:10 +03:00
{
2016-11-22 15:08:16 +03:00
struct rctx * rctx = skcipher_request_ctx ( req ) ;
struct skcipher_request * subreq ;
subreq = & rctx - > subreq ;
while ( ! err & & rctx - > left ) {
err = pre_crypt ( req ) ? :
crypto_skcipher_decrypt ( subreq ) ? :
post_crypt ( req ) ;
if ( err = = - EINPROGRESS | |
( err = = - EBUSY & &
req - > base . flags & CRYPTO_TFM_REQ_MAY_BACKLOG ) )
return err ;
}
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
exit_crypt ( req ) ;
return err ;
2006-11-26 01:43:10 +03:00
}
2016-11-22 15:08:16 +03:00
static void decrypt_done ( struct crypto_async_request * areq , int err )
2006-11-26 01:43:10 +03:00
{
2016-11-22 15:08:16 +03:00
struct skcipher_request * req = areq - > data ;
struct skcipher_request * subreq ;
struct rctx * rctx ;
rctx = skcipher_request_ctx ( req ) ;
2017-04-10 12:15:48 +03:00
if ( err = = - EINPROGRESS ) {
if ( rctx - > left ! = req - > cryptlen )
return ;
goto out ;
}
2016-11-22 15:08:16 +03:00
subreq = & rctx - > subreq ;
subreq - > base . flags & = CRYPTO_TFM_REQ_MAY_BACKLOG ;
err = do_decrypt ( req , err ? : post_crypt ( req ) ) ;
if ( rctx - > left )
return ;
2006-11-26 01:43:10 +03:00
2017-04-10 12:15:48 +03:00
out :
2016-11-22 15:08:16 +03:00
skcipher_request_complete ( req , err ) ;
}
static int decrypt ( struct skcipher_request * req )
{
return do_decrypt ( req , init_crypt ( req , decrypt_done ) ) ;
2006-11-26 01:43:10 +03:00
}
2011-11-09 07:50:31 +04:00
int lrw_crypt ( struct blkcipher_desc * desc , struct scatterlist * sdst ,
struct scatterlist * ssrc , unsigned int nbytes ,
struct lrw_crypt_req * req )
{
const unsigned int bsize = LRW_BLOCK_SIZE ;
const unsigned int max_blks = req - > tbuflen / bsize ;
struct lrw_table_ctx * ctx = req - > table_ctx ;
struct blkcipher_walk walk ;
unsigned int nblocks ;
be128 * iv , * src , * dst , * t ;
be128 * t_buf = req - > tbuf ;
int err , i ;
BUG_ON ( max_blks < 1 ) ;
blkcipher_walk_init ( & walk , sdst , ssrc , nbytes ) ;
err = blkcipher_walk_virt ( desc , & walk ) ;
nbytes = walk . nbytes ;
if ( ! nbytes )
return err ;
nblocks = min ( walk . nbytes / bsize , max_blks ) ;
src = ( be128 * ) walk . src . virt . addr ;
dst = ( be128 * ) walk . dst . virt . addr ;
/* calculate first value of T */
iv = ( be128 * ) walk . iv ;
t_buf [ 0 ] = * iv ;
/* T <- I*Key2 */
gf128mul_64k_bbe ( & t_buf [ 0 ] , ctx - > table ) ;
i = 0 ;
goto first ;
for ( ; ; ) {
do {
for ( i = 0 ; i < nblocks ; i + + ) {
/* T <- I*Key2, using the optimization
* discussed in the specification */
be128_xor ( & t_buf [ i ] , t ,
& ctx - > mulinc [ get_index128 ( iv ) ] ) ;
inc ( iv ) ;
first :
t = & t_buf [ i ] ;
/* PP <- T xor P */
be128_xor ( dst + i , t , src + i ) ;
}
/* CC <- E(Key2,PP) */
req - > crypt_fn ( req - > crypt_ctx , ( u8 * ) dst ,
nblocks * bsize ) ;
/* C <- T xor CC */
for ( i = 0 ; i < nblocks ; i + + )
be128_xor ( dst + i , dst + i , & t_buf [ i ] ) ;
src + = nblocks ;
dst + = nblocks ;
nbytes - = nblocks * bsize ;
nblocks = min ( nbytes / bsize , max_blks ) ;
} while ( nblocks > 0 ) ;
err = blkcipher_walk_done ( desc , & walk , nbytes ) ;
nbytes = walk . nbytes ;
if ( ! nbytes )
break ;
nblocks = min ( nbytes / bsize , max_blks ) ;
src = ( be128 * ) walk . src . virt . addr ;
dst = ( be128 * ) walk . dst . virt . addr ;
}
return err ;
}
EXPORT_SYMBOL_GPL ( lrw_crypt ) ;
2016-11-22 15:08:16 +03:00
static int init_tfm ( struct crypto_skcipher * tfm )
2006-11-26 01:43:10 +03:00
{
2016-11-22 15:08:16 +03:00
struct skcipher_instance * inst = skcipher_alg_instance ( tfm ) ;
struct crypto_skcipher_spawn * spawn = skcipher_instance_ctx ( inst ) ;
struct priv * ctx = crypto_skcipher_ctx ( tfm ) ;
struct crypto_skcipher * cipher ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
cipher = crypto_spawn_skcipher ( spawn ) ;
2006-12-17 02:05:58 +03:00
if ( IS_ERR ( cipher ) )
return PTR_ERR ( cipher ) ;
2006-11-26 01:43:10 +03:00
2006-12-17 02:05:58 +03:00
ctx - > child = cipher ;
2016-11-22 15:08:16 +03:00
crypto_skcipher_set_reqsize ( tfm , crypto_skcipher_reqsize ( cipher ) +
sizeof ( struct rctx ) ) ;
2006-11-26 01:43:10 +03:00
return 0 ;
}
2016-11-22 15:08:16 +03:00
static void exit_tfm ( struct crypto_skcipher * tfm )
2006-11-26 01:43:10 +03:00
{
2016-11-22 15:08:16 +03:00
struct priv * ctx = crypto_skcipher_ctx ( tfm ) ;
2011-10-18 14:32:24 +04:00
lrw_free_table ( & ctx - > table ) ;
2016-11-22 15:08:16 +03:00
crypto_free_skcipher ( ctx - > child ) ;
}
static void free ( struct skcipher_instance * inst )
{
crypto_drop_skcipher ( skcipher_instance_ctx ( inst ) ) ;
kfree ( inst ) ;
2006-11-26 01:43:10 +03:00
}
2016-11-22 15:08:16 +03:00
static int create ( struct crypto_template * tmpl , struct rtattr * * tb )
2006-11-26 01:43:10 +03:00
{
2016-11-22 15:08:16 +03:00
struct crypto_skcipher_spawn * spawn ;
struct skcipher_instance * inst ;
struct crypto_attr_type * algt ;
struct skcipher_alg * alg ;
const char * cipher_name ;
char ecb_name [ CRYPTO_MAX_ALG_NAME ] ;
2007-01-01 10:37:02 +03:00
int err ;
2016-11-22 15:08:16 +03:00
algt = crypto_get_attr_type ( tb ) ;
if ( IS_ERR ( algt ) )
return PTR_ERR ( algt ) ;
if ( ( algt - > type ^ CRYPTO_ALG_TYPE_SKCIPHER ) & algt - > mask )
return - EINVAL ;
cipher_name = crypto_attr_alg_name ( tb [ 1 ] ) ;
if ( IS_ERR ( cipher_name ) )
return PTR_ERR ( cipher_name ) ;
inst = kzalloc ( sizeof ( * inst ) + sizeof ( * spawn ) , GFP_KERNEL ) ;
if ( ! inst )
return - ENOMEM ;
spawn = skcipher_instance_ctx ( inst ) ;
crypto_set_skcipher_spawn ( spawn , skcipher_crypto_instance ( inst ) ) ;
err = crypto_grab_skcipher ( spawn , cipher_name , 0 ,
crypto_requires_sync ( algt - > type ,
algt - > mask ) ) ;
if ( err = = - ENOENT ) {
err = - ENAMETOOLONG ;
if ( snprintf ( ecb_name , CRYPTO_MAX_ALG_NAME , " ecb(%s) " ,
cipher_name ) > = CRYPTO_MAX_ALG_NAME )
goto err_free_inst ;
err = crypto_grab_skcipher ( spawn , ecb_name , 0 ,
crypto_requires_sync ( algt - > type ,
algt - > mask ) ) ;
}
2007-01-01 10:37:02 +03:00
if ( err )
2016-11-22 15:08:16 +03:00
goto err_free_inst ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
alg = crypto_skcipher_spawn_alg ( spawn ) ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
err = - EINVAL ;
if ( alg - > base . cra_blocksize ! = LRW_BLOCK_SIZE )
goto err_drop_spawn ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
if ( crypto_skcipher_alg_ivsize ( alg ) )
goto err_drop_spawn ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
err = crypto_inst_setname ( skcipher_crypto_instance ( inst ) , " lrw " ,
& alg - > base ) ;
if ( err )
goto err_drop_spawn ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
err = - EINVAL ;
cipher_name = alg - > base . cra_name ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
/* Alas we screwed up the naming so we have to mangle the
* cipher name .
*/
if ( ! strncmp ( cipher_name , " ecb( " , 4 ) ) {
unsigned len ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
len = strlcpy ( ecb_name , cipher_name + 4 , sizeof ( ecb_name ) ) ;
if ( len < 2 | | len > = sizeof ( ecb_name ) )
goto err_drop_spawn ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
if ( ecb_name [ len - 1 ] ! = ' ) ' )
goto err_drop_spawn ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
ecb_name [ len - 1 ] = 0 ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
if ( snprintf ( inst - > alg . base . cra_name , CRYPTO_MAX_ALG_NAME ,
" lrw(%s) " , ecb_name ) > = CRYPTO_MAX_ALG_NAME )
return - ENAMETOOLONG ;
}
inst - > alg . base . cra_flags = alg - > base . cra_flags & CRYPTO_ALG_ASYNC ;
inst - > alg . base . cra_priority = alg - > base . cra_priority ;
inst - > alg . base . cra_blocksize = LRW_BLOCK_SIZE ;
inst - > alg . base . cra_alignmask = alg - > base . cra_alignmask |
( __alignof__ ( u64 ) - 1 ) ;
inst - > alg . ivsize = LRW_BLOCK_SIZE ;
inst - > alg . min_keysize = crypto_skcipher_alg_min_keysize ( alg ) +
LRW_BLOCK_SIZE ;
inst - > alg . max_keysize = crypto_skcipher_alg_max_keysize ( alg ) +
LRW_BLOCK_SIZE ;
inst - > alg . base . cra_ctxsize = sizeof ( struct priv ) ;
inst - > alg . init = init_tfm ;
inst - > alg . exit = exit_tfm ;
inst - > alg . setkey = setkey ;
inst - > alg . encrypt = encrypt ;
inst - > alg . decrypt = decrypt ;
inst - > free = free ;
err = skcipher_register_instance ( tmpl , inst ) ;
if ( err )
goto err_drop_spawn ;
out :
return err ;
err_drop_spawn :
crypto_drop_skcipher ( spawn ) ;
err_free_inst :
2006-11-26 01:43:10 +03:00
kfree ( inst ) ;
2016-11-22 15:08:16 +03:00
goto out ;
2006-11-26 01:43:10 +03:00
}
static struct crypto_template crypto_tmpl = {
. name = " lrw " ,
2016-11-22 15:08:16 +03:00
. create = create ,
2006-11-26 01:43:10 +03:00
. module = THIS_MODULE ,
} ;
static int __init crypto_module_init ( void )
{
return crypto_register_template ( & crypto_tmpl ) ;
}
static void __exit crypto_module_exit ( void )
{
crypto_unregister_template ( & crypto_tmpl ) ;
}
module_init ( crypto_module_init ) ;
module_exit ( crypto_module_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " LRW block cipher mode " ) ;
2014-11-25 03:32:38 +03:00
MODULE_ALIAS_CRYPTO ( " lrw " ) ;