2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2007-09-19 16:23:13 +04:00
/* XTS: as defined in IEEE1619/D16
* http : //grouper.ieee.org/groups/1619/email/pdf00086.pdf
*
* Copyright ( c ) 2007 Rik Snel < rsnel @ cube . dyndns . org >
*
2016-08-10 12:29:33 +03:00
* Based on ecb . c
2007-09-19 16:23:13 +04:00
* Copyright ( c ) 2006 Herbert Xu < herbert @ gondor . apana . org . au >
*/
2020-12-11 15:27:15 +03:00
# include <crypto/internal/cipher.h>
2016-11-22 15:08:19 +03:00
# include <crypto/internal/skcipher.h>
# include <crypto/scatterwalk.h>
2007-09-19 16:23:13 +04:00
# include <linux/err.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/scatterlist.h>
# include <linux/slab.h>
2011-11-09 07:56:06 +04:00
# include <crypto/xts.h>
2007-09-19 16:23:13 +04:00
# include <crypto/b128ops.h>
# include <crypto/gf128mul.h>
2020-07-11 06:34:28 +03:00
struct xts_tfm_ctx {
2016-11-22 15:08:19 +03:00
struct crypto_skcipher * child ;
2007-09-19 16:23:13 +04:00
struct crypto_cipher * tweak ;
} ;
2016-11-22 15:08:19 +03:00
struct xts_instance_ctx {
struct crypto_skcipher_spawn spawn ;
char name [ CRYPTO_MAX_ALG_NAME ] ;
} ;
2020-07-11 06:34:28 +03:00
struct xts_request_ctx {
2017-04-02 22:19:14 +03:00
le128 t ;
2019-08-09 20:14:57 +03:00
struct scatterlist * tail ;
struct scatterlist sg [ 2 ] ;
2016-11-22 15:08:19 +03:00
struct skcipher_request subreq ;
} ;
2020-07-11 06:34:28 +03:00
static int xts_setkey ( struct crypto_skcipher * parent , const u8 * key ,
unsigned int keylen )
2007-09-19 16:23:13 +04:00
{
2020-07-11 06:34:28 +03:00
struct xts_tfm_ctx * ctx = crypto_skcipher_ctx ( parent ) ;
2016-11-22 15:08:19 +03:00
struct crypto_skcipher * child ;
struct crypto_cipher * tweak ;
2007-09-19 16:23:13 +04:00
int err ;
2016-11-22 15:08:19 +03:00
err = xts_verify_key ( parent , key , keylen ) ;
2016-02-09 17:37:47 +03:00
if ( err )
return err ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
keylen / = 2 ;
2011-03-31 05:57:33 +04:00
/* we need two cipher instances: one to compute the initial 'tweak'
2007-09-19 16:23:13 +04:00
* by encrypting the IV ( usually the ' plain ' iv ) and the other
* one to encrypt and decrypt the data */
/* tweak cipher, uses Key2 i.e. the second half of *key */
2016-11-22 15:08:19 +03:00
tweak = ctx - > tweak ;
crypto_cipher_clear_flags ( tweak , CRYPTO_TFM_REQ_MASK ) ;
crypto_cipher_set_flags ( tweak , crypto_skcipher_get_flags ( parent ) &
2007-09-19 16:23:13 +04:00
CRYPTO_TFM_REQ_MASK ) ;
2016-11-22 15:08:19 +03:00
err = crypto_cipher_setkey ( tweak , key + keylen , keylen ) ;
2007-09-19 16:23:13 +04:00
if ( err )
return err ;
2016-11-22 15:08:19 +03:00
/* data cipher, uses Key1 i.e. the first half of *key */
2007-09-19 16:23:13 +04:00
child = ctx - > child ;
2016-11-22 15:08:19 +03:00
crypto_skcipher_clear_flags ( child , CRYPTO_TFM_REQ_MASK ) ;
crypto_skcipher_set_flags ( child , crypto_skcipher_get_flags ( parent ) &
CRYPTO_TFM_REQ_MASK ) ;
2019-12-31 06:19:38 +03:00
return crypto_skcipher_setkey ( child , key , keylen ) ;
2016-11-22 15:08:19 +03:00
}
2007-09-19 16:23:13 +04:00
2018-09-11 10:40:08 +03:00
/*
* We compute the tweak masks twice ( both before and after the ECB encryption or
* decryption ) to avoid having to allocate a temporary buffer and / or make
* mutliple calls to the ' ecb ( . . ) ' instance , which usually would be slower than
* just doing the gf128mul_x_ble ( ) calls again .
*/
2020-07-11 06:34:28 +03:00
static int xts_xor_tweak ( struct skcipher_request * req , bool second_pass ,
bool enc )
2016-11-22 15:08:19 +03:00
{
2020-07-11 06:34:28 +03:00
struct xts_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2018-09-11 10:40:08 +03:00
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
2019-08-09 20:14:57 +03:00
const bool cts = ( req - > cryptlen % XTS_BLOCK_SIZE ) ;
2016-11-22 15:08:19 +03:00
const int bs = XTS_BLOCK_SIZE ;
struct skcipher_walk w ;
2018-09-11 10:40:08 +03:00
le128 t = rctx - > t ;
2016-11-22 15:08:19 +03:00
int err ;
2007-09-19 16:23:13 +04:00
2018-09-11 10:40:08 +03:00
if ( second_pass ) {
req = & rctx - > subreq ;
/* set to our TFM to enforce correct alignment: */
skcipher_request_set_tfm ( req , tfm ) ;
2016-11-22 15:08:19 +03:00
}
2018-09-11 10:40:08 +03:00
err = skcipher_walk_virt ( & w , req , false ) ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
while ( w . nbytes ) {
unsigned int avail = w . nbytes ;
2017-04-02 22:19:14 +03:00
le128 * wsrc ;
le128 * wdst ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
wsrc = w . src . virt . addr ;
wdst = w . dst . virt . addr ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
do {
2019-08-09 20:14:57 +03:00
if ( unlikely ( cts ) & &
w . total - w . nbytes + avail < 2 * XTS_BLOCK_SIZE ) {
if ( ! enc ) {
if ( second_pass )
rctx - > t = t ;
gf128mul_x_ble ( & t , & t ) ;
}
le128_xor ( wdst , & t , wsrc ) ;
if ( enc & & second_pass )
gf128mul_x_ble ( & rctx - > t , & t ) ;
skcipher_walk_done ( & w , avail - bs ) ;
return 0 ;
}
2018-09-11 10:40:08 +03:00
le128_xor ( wdst + + , & t , wsrc + + ) ;
gf128mul_x_ble ( & t , & t ) ;
2007-09-19 16:23:13 +04:00
} while ( ( avail - = bs ) > = bs ) ;
2016-11-22 15:08:19 +03:00
err = skcipher_walk_done ( & w , avail ) ;
}
2007-09-19 16:23:13 +04:00
return err ;
}
2020-07-11 06:34:28 +03:00
static int xts_xor_tweak_pre ( struct skcipher_request * req , bool enc )
2019-08-09 20:14:57 +03:00
{
2020-07-11 06:34:28 +03:00
return xts_xor_tweak ( req , false , enc ) ;
2019-08-09 20:14:57 +03:00
}
2020-07-11 06:34:28 +03:00
static int xts_xor_tweak_post ( struct skcipher_request * req , bool enc )
2019-08-09 20:14:57 +03:00
{
2020-07-11 06:34:28 +03:00
return xts_xor_tweak ( req , true , enc ) ;
2019-08-09 20:14:57 +03:00
}
2023-02-08 08:58:44 +03:00
static void xts_cts_done ( void * data , int err )
2007-09-19 16:23:13 +04:00
{
2023-02-08 08:58:44 +03:00
struct skcipher_request * req = data ;
2019-08-09 20:14:57 +03:00
le128 b ;
if ( ! err ) {
2020-07-11 06:34:28 +03:00
struct xts_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2019-08-09 20:14:57 +03:00
scatterwalk_map_and_copy ( & b , rctx - > tail , 0 , XTS_BLOCK_SIZE , 0 ) ;
le128_xor ( & b , & rctx - > t , & b ) ;
scatterwalk_map_and_copy ( & b , rctx - > tail , 0 , XTS_BLOCK_SIZE , 1 ) ;
}
skcipher_request_complete ( req , err ) ;
}
2020-07-11 06:34:28 +03:00
static int xts_cts_final ( struct skcipher_request * req ,
int ( * crypt ) ( struct skcipher_request * req ) )
2019-08-09 20:14:57 +03:00
{
2020-07-11 06:34:28 +03:00
const struct xts_tfm_ctx * ctx =
crypto_skcipher_ctx ( crypto_skcipher_reqtfm ( req ) ) ;
2019-08-09 20:14:57 +03:00
int offset = req - > cryptlen & ~ ( XTS_BLOCK_SIZE - 1 ) ;
2020-07-11 06:34:28 +03:00
struct xts_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2019-08-09 20:14:57 +03:00
struct skcipher_request * subreq = & rctx - > subreq ;
int tail = req - > cryptlen % XTS_BLOCK_SIZE ;
le128 b [ 2 ] ;
int err ;
rctx - > tail = scatterwalk_ffwd ( rctx - > sg , req - > dst ,
offset - XTS_BLOCK_SIZE ) ;
scatterwalk_map_and_copy ( b , rctx - > tail , 0 , XTS_BLOCK_SIZE , 0 ) ;
2020-07-21 09:05:54 +03:00
b [ 1 ] = b [ 0 ] ;
2019-08-09 20:14:57 +03:00
scatterwalk_map_and_copy ( b , req - > src , offset , tail , 0 ) ;
le128_xor ( b , & rctx - > t , b ) ;
scatterwalk_map_and_copy ( b , rctx - > tail , 0 , XTS_BLOCK_SIZE + tail , 1 ) ;
skcipher_request_set_tfm ( subreq , ctx - > child ) ;
2020-07-11 06:34:28 +03:00
skcipher_request_set_callback ( subreq , req - > base . flags , xts_cts_done ,
req ) ;
2019-08-09 20:14:57 +03:00
skcipher_request_set_crypt ( subreq , rctx - > tail , rctx - > tail ,
XTS_BLOCK_SIZE , NULL ) ;
err = crypt ( subreq ) ;
if ( err )
return err ;
scatterwalk_map_and_copy ( b , rctx - > tail , 0 , XTS_BLOCK_SIZE , 0 ) ;
le128_xor ( b , & rctx - > t , b ) ;
scatterwalk_map_and_copy ( b , rctx - > tail , 0 , XTS_BLOCK_SIZE , 1 ) ;
return 0 ;
2016-11-22 15:08:19 +03:00
}
2023-02-08 08:58:44 +03:00
static void xts_encrypt_done ( void * data , int err )
2016-11-22 15:08:19 +03:00
{
2023-02-08 08:58:44 +03:00
struct skcipher_request * req = data ;
2019-08-09 20:14:57 +03:00
if ( ! err ) {
2020-07-11 06:34:28 +03:00
struct xts_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2019-08-09 20:14:57 +03:00
2023-01-22 11:07:37 +03:00
rctx - > subreq . base . flags & = CRYPTO_TFM_REQ_MAY_BACKLOG ;
2020-07-11 06:34:28 +03:00
err = xts_xor_tweak_post ( req , true ) ;
2019-08-09 20:14:57 +03:00
if ( ! err & & unlikely ( req - > cryptlen % XTS_BLOCK_SIZE ) ) {
2020-07-11 06:34:28 +03:00
err = xts_cts_final ( req , crypto_skcipher_encrypt ) ;
2023-01-22 11:07:37 +03:00
if ( err = = - EINPROGRESS | | err = = - EBUSY )
2019-08-09 20:14:57 +03:00
return ;
}
}
skcipher_request_complete ( req , err ) ;
2016-11-22 15:08:19 +03:00
}
2023-02-08 08:58:44 +03:00
static void xts_decrypt_done ( void * data , int err )
2016-11-22 15:08:19 +03:00
{
2023-02-08 08:58:44 +03:00
struct skcipher_request * req = data ;
2017-04-08 05:02:46 +03:00
2019-04-15 09:35:19 +03:00
if ( ! err ) {
2020-07-11 06:34:28 +03:00
struct xts_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2019-04-15 09:35:19 +03:00
2023-01-22 11:07:37 +03:00
rctx - > subreq . base . flags & = CRYPTO_TFM_REQ_MAY_BACKLOG ;
2020-07-11 06:34:28 +03:00
err = xts_xor_tweak_post ( req , false ) ;
2019-08-09 20:14:57 +03:00
if ( ! err & & unlikely ( req - > cryptlen % XTS_BLOCK_SIZE ) ) {
2020-07-11 06:34:28 +03:00
err = xts_cts_final ( req , crypto_skcipher_decrypt ) ;
2023-01-22 11:07:37 +03:00
if ( err = = - EINPROGRESS | | err = = - EBUSY )
2019-08-09 20:14:57 +03:00
return ;
}
2019-04-15 09:35:19 +03:00
}
2016-11-22 15:08:19 +03:00
skcipher_request_complete ( req , err ) ;
}
2020-07-11 06:34:28 +03:00
static int xts_init_crypt ( struct skcipher_request * req ,
crypto_completion_t compl )
2016-11-22 15:08:19 +03:00
{
2020-07-11 06:34:28 +03:00
const struct xts_tfm_ctx * ctx =
crypto_skcipher_ctx ( crypto_skcipher_reqtfm ( req ) ) ;
struct xts_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2018-09-11 10:40:08 +03:00
struct skcipher_request * subreq = & rctx - > subreq ;
2016-11-22 15:08:19 +03:00
2019-08-09 20:14:57 +03:00
if ( req - > cryptlen < XTS_BLOCK_SIZE )
return - EINVAL ;
2018-09-11 10:40:08 +03:00
skcipher_request_set_tfm ( subreq , ctx - > child ) ;
2019-08-09 20:14:57 +03:00
skcipher_request_set_callback ( subreq , req - > base . flags , compl , req ) ;
2018-09-11 10:40:08 +03:00
skcipher_request_set_crypt ( subreq , req - > dst , req - > dst ,
2019-08-09 20:14:57 +03:00
req - > cryptlen & ~ ( XTS_BLOCK_SIZE - 1 ) , NULL ) ;
2016-11-22 15:08:19 +03:00
2018-09-11 10:40:08 +03:00
/* calculate first value of T */
crypto_cipher_encrypt_one ( ctx - > tweak , ( u8 * ) & rctx - > t , req - > iv ) ;
2019-08-09 20:14:57 +03:00
return 0 ;
2016-11-22 15:08:19 +03:00
}
2020-07-11 06:34:28 +03:00
static int xts_encrypt ( struct skcipher_request * req )
2016-11-22 15:08:19 +03:00
{
2020-07-11 06:34:28 +03:00
struct xts_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2018-09-11 10:40:08 +03:00
struct skcipher_request * subreq = & rctx - > subreq ;
2019-08-09 20:14:57 +03:00
int err ;
2020-07-11 06:34:28 +03:00
err = xts_init_crypt ( req , xts_encrypt_done ) ? :
xts_xor_tweak_pre ( req , true ) ? :
2019-08-09 20:14:57 +03:00
crypto_skcipher_encrypt ( subreq ) ? :
2020-07-11 06:34:28 +03:00
xts_xor_tweak_post ( req , true ) ;
2016-11-22 15:08:19 +03:00
2019-08-09 20:14:57 +03:00
if ( err | | likely ( ( req - > cryptlen % XTS_BLOCK_SIZE ) = = 0 ) )
return err ;
2020-07-11 06:34:28 +03:00
return xts_cts_final ( req , crypto_skcipher_encrypt ) ;
2016-11-22 15:08:19 +03:00
}
2020-07-11 06:34:28 +03:00
static int xts_decrypt ( struct skcipher_request * req )
2016-11-22 15:08:19 +03:00
{
2020-07-11 06:34:28 +03:00
struct xts_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2018-09-11 10:40:08 +03:00
struct skcipher_request * subreq = & rctx - > subreq ;
2019-08-09 20:14:57 +03:00
int err ;
2020-07-11 06:34:28 +03:00
err = xts_init_crypt ( req , xts_decrypt_done ) ? :
xts_xor_tweak_pre ( req , false ) ? :
2019-08-09 20:14:57 +03:00
crypto_skcipher_decrypt ( subreq ) ? :
2020-07-11 06:34:28 +03:00
xts_xor_tweak_post ( req , false ) ;
2019-08-09 20:14:57 +03:00
if ( err | | likely ( ( req - > cryptlen % XTS_BLOCK_SIZE ) = = 0 ) )
return err ;
2018-09-11 10:40:08 +03:00
2020-07-11 06:34:28 +03:00
return xts_cts_final ( req , crypto_skcipher_decrypt ) ;
2007-09-19 16:23:13 +04:00
}
2020-07-11 06:34:28 +03:00
static int xts_init_tfm ( struct crypto_skcipher * tfm )
2007-09-19 16:23:13 +04:00
{
2016-11-22 15:08:19 +03:00
struct skcipher_instance * inst = skcipher_alg_instance ( tfm ) ;
struct xts_instance_ctx * ictx = skcipher_instance_ctx ( inst ) ;
2020-07-11 06:34:28 +03:00
struct xts_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2016-11-22 15:08:19 +03:00
struct crypto_skcipher * child ;
struct crypto_cipher * tweak ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
child = crypto_spawn_skcipher ( & ictx - > spawn ) ;
if ( IS_ERR ( child ) )
return PTR_ERR ( child ) ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
ctx - > child = child ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
tweak = crypto_alloc_cipher ( ictx - > name , 0 , 0 ) ;
if ( IS_ERR ( tweak ) ) {
crypto_free_skcipher ( ctx - > child ) ;
return PTR_ERR ( tweak ) ;
2007-09-19 16:23:13 +04:00
}
2016-11-22 15:08:19 +03:00
ctx - > tweak = tweak ;
crypto_skcipher_set_reqsize ( tfm , crypto_skcipher_reqsize ( child ) +
2020-07-11 06:34:28 +03:00
sizeof ( struct xts_request_ctx ) ) ;
2007-09-19 16:23:13 +04:00
return 0 ;
}
2020-07-11 06:34:28 +03:00
static void xts_exit_tfm ( struct crypto_skcipher * tfm )
2007-09-19 16:23:13 +04:00
{
2020-07-11 06:34:28 +03:00
struct xts_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2016-11-22 15:08:19 +03:00
crypto_free_skcipher ( ctx - > child ) ;
2007-09-19 16:23:13 +04:00
crypto_free_cipher ( ctx - > tweak ) ;
}
2020-07-11 06:34:28 +03:00
static void xts_free_instance ( struct skcipher_instance * inst )
2016-11-22 15:08:19 +03:00
{
2020-07-11 06:34:28 +03:00
struct xts_instance_ctx * ictx = skcipher_instance_ctx ( inst ) ;
crypto_drop_skcipher ( & ictx - > spawn ) ;
2016-11-22 15:08:19 +03:00
kfree ( inst ) ;
}
2020-07-11 06:34:28 +03:00
static int xts_create ( struct crypto_template * tmpl , struct rtattr * * tb )
2007-09-19 16:23:13 +04:00
{
2016-11-22 15:08:19 +03:00
struct skcipher_instance * inst ;
struct xts_instance_ctx * ctx ;
struct skcipher_alg * alg ;
const char * cipher_name ;
2017-02-26 07:24:10 +03:00
u32 mask ;
2007-09-19 16:23:13 +04:00
int err ;
2020-07-10 09:20:38 +03:00
err = crypto_check_attr_type ( tb , CRYPTO_ALG_TYPE_SKCIPHER , & mask ) ;
if ( err )
return err ;
2016-11-22 15:08:19 +03:00
cipher_name = crypto_attr_alg_name ( tb [ 1 ] ) ;
if ( IS_ERR ( cipher_name ) )
return PTR_ERR ( cipher_name ) ;
inst = kzalloc ( sizeof ( * inst ) + sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! inst )
return - ENOMEM ;
ctx = skcipher_instance_ctx ( inst ) ;
2020-01-03 06:58:45 +03:00
err = crypto_grab_skcipher ( & ctx - > spawn , skcipher_crypto_instance ( inst ) ,
cipher_name , 0 , mask ) ;
2016-11-22 15:08:19 +03:00
if ( err = = - ENOENT ) {
err = - ENAMETOOLONG ;
if ( snprintf ( ctx - > name , CRYPTO_MAX_ALG_NAME , " ecb(%s) " ,
cipher_name ) > = CRYPTO_MAX_ALG_NAME )
goto err_free_inst ;
2020-01-03 06:58:45 +03:00
err = crypto_grab_skcipher ( & ctx - > spawn ,
skcipher_crypto_instance ( inst ) ,
ctx - > name , 0 , mask ) ;
2016-11-22 15:08:19 +03:00
}
2007-09-19 16:23:13 +04:00
if ( err )
2016-11-22 15:08:19 +03:00
goto err_free_inst ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
alg = crypto_skcipher_spawn_alg ( & ctx - > spawn ) ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
err = - EINVAL ;
if ( alg - > base . cra_blocksize ! = XTS_BLOCK_SIZE )
2020-02-26 07:59:24 +03:00
goto err_free_inst ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
if ( crypto_skcipher_alg_ivsize ( alg ) )
2020-02-26 07:59:24 +03:00
goto err_free_inst ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
err = crypto_inst_setname ( skcipher_crypto_instance ( inst ) , " xts " ,
& alg - > base ) ;
if ( err )
2020-02-26 07:59:24 +03:00
goto err_free_inst ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
err = - EINVAL ;
cipher_name = alg - > base . cra_name ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
/* Alas we screwed up the naming so we have to mangle the
* cipher name .
*/
if ( ! strncmp ( cipher_name , " ecb( " , 4 ) ) {
2023-06-20 23:08:32 +03:00
int len ;
2007-09-19 16:23:13 +04:00
2023-06-20 23:08:32 +03:00
len = strscpy ( ctx - > name , cipher_name + 4 , sizeof ( ctx - > name ) ) ;
if ( len < 2 )
2020-02-26 07:59:24 +03:00
goto err_free_inst ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
if ( ctx - > name [ len - 1 ] ! = ' ) ' )
2020-02-26 07:59:24 +03:00
goto err_free_inst ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
ctx - > name [ len - 1 ] = 0 ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
if ( snprintf ( inst - > alg . base . cra_name , CRYPTO_MAX_ALG_NAME ,
2017-09-26 09:17:44 +03:00
" xts(%s) " , ctx - > name ) > = CRYPTO_MAX_ALG_NAME ) {
err = - ENAMETOOLONG ;
2020-02-26 07:59:24 +03:00
goto err_free_inst ;
2017-09-26 09:17:44 +03:00
}
2016-11-22 15:08:19 +03:00
} else
2020-02-26 07:59:24 +03:00
goto err_free_inst ;
2007-09-19 16:23:13 +04:00
2016-11-22 15:08:19 +03:00
inst - > alg . base . cra_priority = alg - > base . cra_priority ;
inst - > alg . base . cra_blocksize = XTS_BLOCK_SIZE ;
inst - > alg . base . cra_alignmask = alg - > base . cra_alignmask |
( __alignof__ ( u64 ) - 1 ) ;
inst - > alg . ivsize = XTS_BLOCK_SIZE ;
inst - > alg . min_keysize = crypto_skcipher_alg_min_keysize ( alg ) * 2 ;
inst - > alg . max_keysize = crypto_skcipher_alg_max_keysize ( alg ) * 2 ;
2020-07-11 06:34:28 +03:00
inst - > alg . base . cra_ctxsize = sizeof ( struct xts_tfm_ctx ) ;
2016-11-22 15:08:19 +03:00
2020-07-11 06:34:28 +03:00
inst - > alg . init = xts_init_tfm ;
inst - > alg . exit = xts_exit_tfm ;
2016-11-22 15:08:19 +03:00
2020-07-11 06:34:28 +03:00
inst - > alg . setkey = xts_setkey ;
inst - > alg . encrypt = xts_encrypt ;
inst - > alg . decrypt = xts_decrypt ;
2016-11-22 15:08:19 +03:00
2020-07-11 06:34:28 +03:00
inst - > free = xts_free_instance ;
2016-11-22 15:08:19 +03:00
err = skcipher_register_instance ( tmpl , inst ) ;
2020-02-26 07:59:24 +03:00
if ( err ) {
2016-11-22 15:08:19 +03:00
err_free_inst :
2020-07-11 06:34:28 +03:00
xts_free_instance ( inst ) ;
2020-02-26 07:59:24 +03:00
}
return err ;
2007-09-19 16:23:13 +04:00
}
2020-07-11 06:34:28 +03:00
static struct crypto_template xts_tmpl = {
2007-09-19 16:23:13 +04:00
. name = " xts " ,
2020-07-11 06:34:28 +03:00
. create = xts_create ,
2007-09-19 16:23:13 +04:00
. module = THIS_MODULE ,
} ;
2020-07-11 06:34:28 +03:00
static int __init xts_module_init ( void )
2007-09-19 16:23:13 +04:00
{
2020-07-11 06:34:28 +03:00
return crypto_register_template ( & xts_tmpl ) ;
2007-09-19 16:23:13 +04:00
}
2020-07-11 06:34:28 +03:00
static void __exit xts_module_exit ( void )
2007-09-19 16:23:13 +04:00
{
2020-07-11 06:34:28 +03:00
crypto_unregister_template ( & xts_tmpl ) ;
2007-09-19 16:23:13 +04:00
}
2020-07-11 06:34:28 +03:00
subsys_initcall ( xts_module_init ) ;
module_exit ( xts_module_exit ) ;
2007-09-19 16:23:13 +04:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " XTS block cipher mode " ) ;
2014-11-25 03:32:38 +03:00
MODULE_ALIAS_CRYPTO ( " xts " ) ;
2020-12-11 15:27:15 +03:00
MODULE_IMPORT_NS ( CRYPTO_INTERNAL ) ;
2022-02-10 05:09:40 +03:00
MODULE_SOFTDEP ( " pre: ecb " ) ;