2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2006-11-26 01:43:10 +03:00
/* LRW: as defined by Cyril Guyot in
* http : //grouper.ieee.org/groups/1619/email/pdf00017.pdf
*
* Copyright ( c ) 2006 Rik Snel < rsnel @ cube . dyndns . org >
*
2011-11-09 07:50:31 +04:00
* Based on ecb . c
2006-11-26 01:43:10 +03:00
* Copyright ( c ) 2006 Herbert Xu < herbert @ gondor . apana . org . au >
*/
/* This implementation is checked against the test vectors in the above
* document and by a test vector provided by Ken Buchanan at
2020-07-19 19:49:59 +03:00
* https : //www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
2006-11-26 01:43:10 +03:00
*
* The test vectors are included in the testing module tcrypt . [ ch ] */
2011-11-09 07:50:31 +04:00
2016-11-22 15:08:16 +03:00
# include <crypto/internal/skcipher.h>
# include <crypto/scatterwalk.h>
2006-11-26 01:43:10 +03:00
# include <linux/err.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/scatterlist.h>
# include <linux/slab.h>
# include <crypto/b128ops.h>
# include <crypto/gf128mul.h>
2018-02-20 10:48:25 +03:00
# define LRW_BLOCK_SIZE 16
2020-07-11 06:36:49 +03:00
struct lrw_tfm_ctx {
2016-11-22 15:08:16 +03:00
struct crypto_skcipher * child ;
2018-02-20 10:48:25 +03:00
/*
* optimizes multiplying a random ( non incrementing , as at the
* start of a new sector ) value with key2 , we could also have
* used 4 k optimization tables or no optimization at all . In the
* latter case we would have to store key2 here
*/
struct gf128mul_64k * table ;
/*
* stores :
* key2 * { 0 , 0 , . . .0 , 0 , 0 , 0 , 1 } , key2 * { 0 , 0 , . . .0 , 0 , 0 , 1 , 1 } ,
* key2 * { 0 , 0 , . . .0 , 0 , 1 , 1 , 1 } , key2 * { 0 , 0 , . . .0 , 1 , 1 , 1 , 1 }
* key2 * { 0 , 0 , . . .1 , 1 , 1 , 1 , 1 } , etc
* needed for optimized multiplication of incrementing values
* with key2
*/
be128 mulinc [ 128 ] ;
2011-10-18 14:32:24 +04:00
} ;
2020-07-11 06:36:49 +03:00
struct lrw_request_ctx {
2016-11-22 15:08:16 +03:00
be128 t ;
struct skcipher_request subreq ;
} ;
2020-07-11 06:36:49 +03:00
static inline void lrw_setbit128_bbe ( void * b , int bit )
2006-11-26 01:43:10 +03:00
{
2009-02-17 15:00:11 +03:00
__set_bit ( bit ^ ( 0x80 -
# ifdef __BIG_ENDIAN
BITS_PER_LONG
# else
BITS_PER_BYTE
# endif
) , b ) ;
2006-11-26 01:43:10 +03:00
}
2020-07-11 06:36:49 +03:00
static int lrw_setkey ( struct crypto_skcipher * parent , const u8 * key ,
unsigned int keylen )
2006-11-26 01:43:10 +03:00
{
2020-07-11 06:36:49 +03:00
struct lrw_tfm_ctx * ctx = crypto_skcipher_ctx ( parent ) ;
2018-02-20 10:48:25 +03:00
struct crypto_skcipher * child = ctx - > child ;
int err , bsize = LRW_BLOCK_SIZE ;
const u8 * tweak = key + keylen - bsize ;
2006-11-26 01:43:10 +03:00
be128 tmp = { 0 } ;
2011-10-18 14:32:24 +04:00
int i ;
2006-11-26 01:43:10 +03:00
2018-02-20 10:48:25 +03:00
crypto_skcipher_clear_flags ( child , CRYPTO_TFM_REQ_MASK ) ;
crypto_skcipher_set_flags ( child , crypto_skcipher_get_flags ( parent ) &
CRYPTO_TFM_REQ_MASK ) ;
err = crypto_skcipher_setkey ( child , key , keylen - bsize ) ;
if ( err )
return err ;
2006-11-26 01:43:10 +03:00
if ( ctx - > table )
gf128mul_free_64k ( ctx - > table ) ;
/* initialize multiplication table for Key2 */
2011-10-18 14:32:24 +04:00
ctx - > table = gf128mul_init_64k_bbe ( ( be128 * ) tweak ) ;
2006-11-26 01:43:10 +03:00
if ( ! ctx - > table )
return - ENOMEM ;
/* initialize optimization table */
for ( i = 0 ; i < 128 ; i + + ) {
2020-07-11 06:36:49 +03:00
lrw_setbit128_bbe ( & tmp , i ) ;
2006-11-26 01:43:10 +03:00
ctx - > mulinc [ i ] = tmp ;
gf128mul_64k_bbe ( & ctx - > mulinc [ i ] , ctx - > table ) ;
}
return 0 ;
}
2011-10-18 14:32:24 +04:00
2018-09-13 11:51:33 +03:00
/*
* Returns the number of trailing ' 1 ' bits in the words of the counter , which is
* represented by 4 32 - bit words , arranged from least to most significant .
* At the same time , increments the counter by one .
*
* For example :
*
* u32 counter [ 4 ] = { 0xFFFFFFFF , 0x1 , 0x0 , 0x0 } ;
2020-07-11 06:36:49 +03:00
* int i = lrw_next_index ( & counter ) ;
2018-09-13 11:51:33 +03:00
* // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
*/
2020-07-11 06:36:49 +03:00
static int lrw_next_index ( u32 * counter )
2006-11-26 01:43:10 +03:00
{
2018-09-13 11:51:33 +03:00
int i , res = 0 ;
2006-11-26 01:43:10 +03:00
2018-09-13 11:51:33 +03:00
for ( i = 0 ; i < 4 ; i + + ) {
2018-09-30 22:51:16 +03:00
if ( counter [ i ] + 1 ! = 0 )
return res + ffz ( counter [ i ] + + ) ;
2018-09-13 11:51:33 +03:00
counter [ i ] = 0 ;
res + = 32 ;
2006-11-26 01:43:10 +03:00
}
2018-09-13 11:51:31 +03:00
/*
* If we get here , then x = = 128 and we are incrementing the counter
* from all ones to all zeros . This means we must return index 127 , i . e .
* the one corresponding to key2 * { 1 , . . . , 1 } .
*/
return 127 ;
2006-11-26 01:43:10 +03:00
}
2018-09-13 11:51:34 +03:00
/*
* We compute the tweak masks twice ( both before and after the ECB encryption or
* decryption ) to avoid having to allocate a temporary buffer and / or make
* mutliple calls to the ' ecb ( . . ) ' instance , which usually would be slower than
2020-07-11 06:36:49 +03:00
* just doing the lrw_next_index ( ) calls again .
2018-09-13 11:51:34 +03:00
*/
2020-07-11 06:36:49 +03:00
static int lrw_xor_tweak ( struct skcipher_request * req , bool second_pass )
2006-11-26 01:43:10 +03:00
{
2016-11-22 15:08:16 +03:00
const int bs = LRW_BLOCK_SIZE ;
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
2020-07-11 06:36:49 +03:00
const struct lrw_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
struct lrw_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2018-09-13 11:51:34 +03:00
be128 t = rctx - > t ;
2016-11-22 15:08:16 +03:00
struct skcipher_walk w ;
2018-09-13 11:51:33 +03:00
__be32 * iv ;
u32 counter [ 4 ] ;
2016-11-22 15:08:16 +03:00
int err ;
2006-11-26 01:43:10 +03:00
2018-09-13 11:51:34 +03:00
if ( second_pass ) {
req = & rctx - > subreq ;
/* set to our TFM to enforce correct alignment: */
skcipher_request_set_tfm ( req , tfm ) ;
}
2006-11-26 01:43:10 +03:00
2018-09-13 11:51:34 +03:00
err = skcipher_walk_virt ( & w , req , false ) ;
2019-04-10 09:46:29 +03:00
if ( err )
return err ;
2018-09-13 11:51:33 +03:00
2019-04-10 09:46:29 +03:00
iv = ( __be32 * ) w . iv ;
2018-09-13 11:51:33 +03:00
counter [ 0 ] = be32_to_cpu ( iv [ 3 ] ) ;
counter [ 1 ] = be32_to_cpu ( iv [ 2 ] ) ;
counter [ 2 ] = be32_to_cpu ( iv [ 1 ] ) ;
counter [ 3 ] = be32_to_cpu ( iv [ 0 ] ) ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
while ( w . nbytes ) {
unsigned int avail = w . nbytes ;
be128 * wsrc ;
be128 * wdst ;
wsrc = w . src . virt . addr ;
wdst = w . dst . virt . addr ;
2006-11-26 01:43:10 +03:00
do {
2018-09-13 11:51:34 +03:00
be128_xor ( wdst + + , & t , wsrc + + ) ;
2016-11-22 15:08:16 +03:00
2006-11-26 01:43:10 +03:00
/* T <- I*Key2, using the optimization
* discussed in the specification */
2020-07-11 06:36:49 +03:00
be128_xor ( & t , & t ,
& ctx - > mulinc [ lrw_next_index ( counter ) ] ) ;
2016-11-22 15:08:16 +03:00
} while ( ( avail - = bs ) > = bs ) ;
2006-11-26 01:43:10 +03:00
2018-09-13 11:51:34 +03:00
if ( second_pass & & w . nbytes = = w . total ) {
2018-09-13 11:51:33 +03:00
iv [ 0 ] = cpu_to_be32 ( counter [ 3 ] ) ;
iv [ 1 ] = cpu_to_be32 ( counter [ 2 ] ) ;
iv [ 2 ] = cpu_to_be32 ( counter [ 1 ] ) ;
iv [ 3 ] = cpu_to_be32 ( counter [ 0 ] ) ;
}
2016-11-22 15:08:16 +03:00
err = skcipher_walk_done ( & w , avail ) ;
}
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
return err ;
}
2020-07-11 06:36:49 +03:00
static int lrw_xor_tweak_pre ( struct skcipher_request * req )
2016-11-22 15:08:16 +03:00
{
2020-07-11 06:36:49 +03:00
return lrw_xor_tweak ( req , false ) ;
2016-11-22 15:08:16 +03:00
}
2020-07-11 06:36:49 +03:00
static int lrw_xor_tweak_post ( struct skcipher_request * req )
2016-11-22 15:08:16 +03:00
{
2020-07-11 06:36:49 +03:00
return lrw_xor_tweak ( req , true ) ;
2006-11-26 01:43:10 +03:00
}
2023-02-08 08:58:44 +03:00
static void lrw_crypt_done ( void * data , int err )
2016-11-22 15:08:16 +03:00
{
2023-02-08 08:58:44 +03:00
struct skcipher_request * req = data ;
2016-11-22 15:08:16 +03:00
2019-04-15 09:37:34 +03:00
if ( ! err ) {
2020-07-11 06:36:49 +03:00
struct lrw_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2019-04-15 09:37:34 +03:00
rctx - > subreq . base . flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
2020-07-11 06:36:49 +03:00
err = lrw_xor_tweak_post ( req ) ;
2019-04-15 09:37:34 +03:00
}
2016-11-22 15:08:16 +03:00
skcipher_request_complete ( req , err ) ;
}
2020-07-11 06:36:49 +03:00
static void lrw_init_crypt ( struct skcipher_request * req )
2006-11-26 01:43:10 +03:00
{
2020-07-11 06:36:49 +03:00
const struct lrw_tfm_ctx * ctx =
crypto_skcipher_ctx ( crypto_skcipher_reqtfm ( req ) ) ;
struct lrw_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2018-09-13 11:51:34 +03:00
struct skcipher_request * subreq = & rctx - > subreq ;
2016-11-22 15:08:16 +03:00
2018-09-13 11:51:34 +03:00
skcipher_request_set_tfm ( subreq , ctx - > child ) ;
2020-07-11 06:36:49 +03:00
skcipher_request_set_callback ( subreq , req - > base . flags , lrw_crypt_done ,
req ) ;
2018-09-13 11:51:34 +03:00
/* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
skcipher_request_set_crypt ( subreq , req - > dst , req - > dst ,
req - > cryptlen , req - > iv ) ;
2016-11-22 15:08:16 +03:00
2018-09-13 11:51:34 +03:00
/* calculate first value of T */
memcpy ( & rctx - > t , req - > iv , sizeof ( rctx - > t ) ) ;
2006-11-26 01:43:10 +03:00
2018-09-13 11:51:34 +03:00
/* T <- I*Key2 */
gf128mul_64k_bbe ( & rctx - > t , ctx - > table ) ;
2006-11-26 01:43:10 +03:00
}
2020-07-11 06:36:49 +03:00
static int lrw_encrypt ( struct skcipher_request * req )
2006-11-26 01:43:10 +03:00
{
2020-07-11 06:36:49 +03:00
struct lrw_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2018-09-13 11:51:34 +03:00
struct skcipher_request * subreq = & rctx - > subreq ;
2006-11-26 01:43:10 +03:00
2020-07-11 06:36:49 +03:00
lrw_init_crypt ( req ) ;
return lrw_xor_tweak_pre ( req ) ? :
2018-09-13 11:51:34 +03:00
crypto_skcipher_encrypt ( subreq ) ? :
2020-07-11 06:36:49 +03:00
lrw_xor_tweak_post ( req ) ;
2016-11-22 15:08:16 +03:00
}
2020-07-11 06:36:49 +03:00
static int lrw_decrypt ( struct skcipher_request * req )
2016-11-22 15:08:16 +03:00
{
2020-07-11 06:36:49 +03:00
struct lrw_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2018-09-13 11:51:34 +03:00
struct skcipher_request * subreq = & rctx - > subreq ;
2020-07-11 06:36:49 +03:00
lrw_init_crypt ( req ) ;
return lrw_xor_tweak_pre ( req ) ? :
2018-09-13 11:51:34 +03:00
crypto_skcipher_decrypt ( subreq ) ? :
2020-07-11 06:36:49 +03:00
lrw_xor_tweak_post ( req ) ;
2006-11-26 01:43:10 +03:00
}
2020-07-11 06:36:49 +03:00
static int lrw_init_tfm ( struct crypto_skcipher * tfm )
2006-11-26 01:43:10 +03:00
{
2016-11-22 15:08:16 +03:00
struct skcipher_instance * inst = skcipher_alg_instance ( tfm ) ;
struct crypto_skcipher_spawn * spawn = skcipher_instance_ctx ( inst ) ;
2020-07-11 06:36:49 +03:00
struct lrw_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2016-11-22 15:08:16 +03:00
struct crypto_skcipher * cipher ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
cipher = crypto_spawn_skcipher ( spawn ) ;
2006-12-17 02:05:58 +03:00
if ( IS_ERR ( cipher ) )
return PTR_ERR ( cipher ) ;
2006-11-26 01:43:10 +03:00
2006-12-17 02:05:58 +03:00
ctx - > child = cipher ;
2016-11-22 15:08:16 +03:00
crypto_skcipher_set_reqsize ( tfm , crypto_skcipher_reqsize ( cipher ) +
2020-07-11 06:36:49 +03:00
sizeof ( struct lrw_request_ctx ) ) ;
2016-11-22 15:08:16 +03:00
2006-11-26 01:43:10 +03:00
return 0 ;
}
2020-07-11 06:36:49 +03:00
static void lrw_exit_tfm ( struct crypto_skcipher * tfm )
2006-11-26 01:43:10 +03:00
{
2020-07-11 06:36:49 +03:00
struct lrw_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2011-10-18 14:32:24 +04:00
2018-02-20 10:48:25 +03:00
if ( ctx - > table )
gf128mul_free_64k ( ctx - > table ) ;
2016-11-22 15:08:16 +03:00
crypto_free_skcipher ( ctx - > child ) ;
}
2020-07-11 06:36:49 +03:00
static void lrw_free_instance ( struct skcipher_instance * inst )
2016-11-22 15:08:16 +03:00
{
crypto_drop_skcipher ( skcipher_instance_ctx ( inst ) ) ;
kfree ( inst ) ;
2006-11-26 01:43:10 +03:00
}
2020-07-11 06:36:49 +03:00
static int lrw_create ( struct crypto_template * tmpl , struct rtattr * * tb )
2006-11-26 01:43:10 +03:00
{
2016-11-22 15:08:16 +03:00
struct crypto_skcipher_spawn * spawn ;
struct skcipher_instance * inst ;
struct skcipher_alg * alg ;
const char * cipher_name ;
char ecb_name [ CRYPTO_MAX_ALG_NAME ] ;
2020-01-03 06:58:45 +03:00
u32 mask ;
2007-01-01 10:37:02 +03:00
int err ;
2020-07-10 09:20:38 +03:00
err = crypto_check_attr_type ( tb , CRYPTO_ALG_TYPE_SKCIPHER , & mask ) ;
if ( err )
return err ;
2020-01-03 06:58:45 +03:00
2016-11-22 15:08:16 +03:00
cipher_name = crypto_attr_alg_name ( tb [ 1 ] ) ;
if ( IS_ERR ( cipher_name ) )
return PTR_ERR ( cipher_name ) ;
inst = kzalloc ( sizeof ( * inst ) + sizeof ( * spawn ) , GFP_KERNEL ) ;
if ( ! inst )
return - ENOMEM ;
spawn = skcipher_instance_ctx ( inst ) ;
2020-01-03 06:58:45 +03:00
err = crypto_grab_skcipher ( spawn , skcipher_crypto_instance ( inst ) ,
cipher_name , 0 , mask ) ;
2016-11-22 15:08:16 +03:00
if ( err = = - ENOENT ) {
err = - ENAMETOOLONG ;
if ( snprintf ( ecb_name , CRYPTO_MAX_ALG_NAME , " ecb(%s) " ,
cipher_name ) > = CRYPTO_MAX_ALG_NAME )
goto err_free_inst ;
2020-01-03 06:58:45 +03:00
err = crypto_grab_skcipher ( spawn ,
skcipher_crypto_instance ( inst ) ,
ecb_name , 0 , mask ) ;
2016-11-22 15:08:16 +03:00
}
2007-01-01 10:37:02 +03:00
if ( err )
2016-11-22 15:08:16 +03:00
goto err_free_inst ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
alg = crypto_skcipher_spawn_alg ( spawn ) ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
err = - EINVAL ;
if ( alg - > base . cra_blocksize ! = LRW_BLOCK_SIZE )
2020-02-26 07:59:21 +03:00
goto err_free_inst ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
if ( crypto_skcipher_alg_ivsize ( alg ) )
2020-02-26 07:59:21 +03:00
goto err_free_inst ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
err = crypto_inst_setname ( skcipher_crypto_instance ( inst ) , " lrw " ,
& alg - > base ) ;
if ( err )
2020-02-26 07:59:21 +03:00
goto err_free_inst ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
err = - EINVAL ;
cipher_name = alg - > base . cra_name ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
/* Alas we screwed up the naming so we have to mangle the
* cipher name .
*/
if ( ! strncmp ( cipher_name , " ecb( " , 4 ) ) {
2023-06-20 23:08:32 +03:00
int len ;
2006-11-26 01:43:10 +03:00
2023-06-20 23:08:32 +03:00
len = strscpy ( ecb_name , cipher_name + 4 , sizeof ( ecb_name ) ) ;
if ( len < 2 )
2020-02-26 07:59:21 +03:00
goto err_free_inst ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
if ( ecb_name [ len - 1 ] ! = ' ) ' )
2020-02-26 07:59:21 +03:00
goto err_free_inst ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
ecb_name [ len - 1 ] = 0 ;
2006-11-26 01:43:10 +03:00
2016-11-22 15:08:16 +03:00
if ( snprintf ( inst - > alg . base . cra_name , CRYPTO_MAX_ALG_NAME ,
2017-10-08 12:39:49 +03:00
" lrw(%s) " , ecb_name ) > = CRYPTO_MAX_ALG_NAME ) {
err = - ENAMETOOLONG ;
2020-02-26 07:59:21 +03:00
goto err_free_inst ;
2017-10-08 12:39:49 +03:00
}
2017-10-08 12:39:50 +03:00
} else
2020-02-26 07:59:21 +03:00
goto err_free_inst ;
2016-11-22 15:08:16 +03:00
inst - > alg . base . cra_priority = alg - > base . cra_priority ;
inst - > alg . base . cra_blocksize = LRW_BLOCK_SIZE ;
inst - > alg . base . cra_alignmask = alg - > base . cra_alignmask |
2019-05-30 20:53:08 +03:00
( __alignof__ ( be128 ) - 1 ) ;
2016-11-22 15:08:16 +03:00
inst - > alg . ivsize = LRW_BLOCK_SIZE ;
inst - > alg . min_keysize = crypto_skcipher_alg_min_keysize ( alg ) +
LRW_BLOCK_SIZE ;
inst - > alg . max_keysize = crypto_skcipher_alg_max_keysize ( alg ) +
LRW_BLOCK_SIZE ;
2020-07-11 06:36:49 +03:00
inst - > alg . base . cra_ctxsize = sizeof ( struct lrw_tfm_ctx ) ;
2016-11-22 15:08:16 +03:00
2020-07-11 06:36:49 +03:00
inst - > alg . init = lrw_init_tfm ;
inst - > alg . exit = lrw_exit_tfm ;
2016-11-22 15:08:16 +03:00
2020-07-11 06:36:49 +03:00
inst - > alg . setkey = lrw_setkey ;
inst - > alg . encrypt = lrw_encrypt ;
inst - > alg . decrypt = lrw_decrypt ;
2016-11-22 15:08:16 +03:00
2020-07-11 06:36:49 +03:00
inst - > free = lrw_free_instance ;
2016-11-22 15:08:16 +03:00
err = skcipher_register_instance ( tmpl , inst ) ;
2020-02-26 07:59:21 +03:00
if ( err ) {
2016-11-22 15:08:16 +03:00
err_free_inst :
2020-07-11 06:36:49 +03:00
lrw_free_instance ( inst ) ;
2020-02-26 07:59:21 +03:00
}
return err ;
2006-11-26 01:43:10 +03:00
}
2020-07-11 06:36:49 +03:00
static struct crypto_template lrw_tmpl = {
2006-11-26 01:43:10 +03:00
. name = " lrw " ,
2020-07-11 06:36:49 +03:00
. create = lrw_create ,
2006-11-26 01:43:10 +03:00
. module = THIS_MODULE ,
} ;
2020-07-11 06:36:49 +03:00
static int __init lrw_module_init ( void )
2006-11-26 01:43:10 +03:00
{
2020-07-11 06:36:49 +03:00
return crypto_register_template ( & lrw_tmpl ) ;
2006-11-26 01:43:10 +03:00
}
2020-07-11 06:36:49 +03:00
static void __exit lrw_module_exit ( void )
2006-11-26 01:43:10 +03:00
{
2020-07-11 06:36:49 +03:00
crypto_unregister_template ( & lrw_tmpl ) ;
2006-11-26 01:43:10 +03:00
}
2020-07-11 06:36:49 +03:00
subsys_initcall ( lrw_module_init ) ;
module_exit ( lrw_module_exit ) ;
2006-11-26 01:43:10 +03:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " LRW block cipher mode " ) ;
2014-11-25 03:32:38 +03:00
MODULE_ALIAS_CRYPTO ( " lrw " ) ;
2022-02-10 05:31:13 +03:00
MODULE_SOFTDEP ( " pre: ecb " ) ;