2011-11-09 18:26:25 +04:00
/*
* Glue Code for SSE2 assembler versions of Serpent Cipher
*
* Copyright ( c ) 2011 Jussi Kivilinna < jussi . kivilinna @ mbnet . fi >
*
* Glue code based on aesni - intel_glue . c by :
* Copyright ( C ) 2008 , Intel Corp .
* Author : Huang Ying < ying . huang @ intel . com >
*
* CBC & ECB parts based on code ( crypto / cbc . c , ecb . c ) by :
* Copyright ( c ) 2006 Herbert Xu < herbert @ gondor . apana . org . au >
* CTR part based on code ( crypto / ctr . c ) by :
* ( C ) Copyright IBM Corp . 2007 - Joy Latten < latten @ us . ibm . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307
* USA
*
*/
# include <linux/module.h>
# include <linux/hardirq.h>
# include <linux/types.h>
# include <linux/crypto.h>
# include <linux/err.h>
# include <crypto/algapi.h>
# include <crypto/serpent.h>
# include <crypto/cryptd.h>
# include <crypto/b128ops.h>
# include <crypto/ctr.h>
2011-11-09 18:26:36 +04:00
# include <crypto/lrw.h>
2011-11-09 18:26:41 +04:00
# include <crypto/xts.h>
2011-11-09 18:26:25 +04:00
# include <asm/i387.h>
2012-06-14 06:09:03 +04:00
# include <asm/serpent-sse2.h>
2012-06-18 15:06:58 +04:00
# include <asm/crypto/ablk_helper.h>
2011-11-09 18:26:25 +04:00
# include <crypto/scatterwalk.h>
# include <linux/workqueue.h>
# include <linux/spinlock.h>
static inline bool serpent_fpu_begin ( bool fpu_enabled , unsigned int nbytes )
{
if ( fpu_enabled )
return true ;
/* SSE2 is only used when chunk to be processed is large enough, so
* do not enable FPU until it is necessary .
*/
if ( nbytes < SERPENT_BLOCK_SIZE * SERPENT_PARALLEL_BLOCKS )
return false ;
kernel_fpu_begin ( ) ;
return true ;
}
static inline void serpent_fpu_end ( bool fpu_enabled )
{
if ( fpu_enabled )
kernel_fpu_end ( ) ;
}
static int ecb_crypt ( struct blkcipher_desc * desc , struct blkcipher_walk * walk ,
bool enc )
{
bool fpu_enabled = false ;
struct serpent_ctx * ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
const unsigned int bsize = SERPENT_BLOCK_SIZE ;
unsigned int nbytes ;
int err ;
err = blkcipher_walk_virt ( desc , walk ) ;
desc - > flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
while ( ( nbytes = walk - > nbytes ) ) {
u8 * wsrc = walk - > src . virt . addr ;
u8 * wdst = walk - > dst . virt . addr ;
fpu_enabled = serpent_fpu_begin ( fpu_enabled , nbytes ) ;
/* Process multi-block batch */
if ( nbytes > = bsize * SERPENT_PARALLEL_BLOCKS ) {
do {
if ( enc )
serpent_enc_blk_xway ( ctx , wdst , wsrc ) ;
else
serpent_dec_blk_xway ( ctx , wdst , wsrc ) ;
wsrc + = bsize * SERPENT_PARALLEL_BLOCKS ;
wdst + = bsize * SERPENT_PARALLEL_BLOCKS ;
nbytes - = bsize * SERPENT_PARALLEL_BLOCKS ;
} while ( nbytes > = bsize * SERPENT_PARALLEL_BLOCKS ) ;
if ( nbytes < bsize )
goto done ;
}
/* Handle leftovers */
do {
if ( enc )
__serpent_encrypt ( ctx , wdst , wsrc ) ;
else
__serpent_decrypt ( ctx , wdst , wsrc ) ;
wsrc + = bsize ;
wdst + = bsize ;
nbytes - = bsize ;
} while ( nbytes > = bsize ) ;
done :
err = blkcipher_walk_done ( desc , walk , nbytes ) ;
}
serpent_fpu_end ( fpu_enabled ) ;
return err ;
}
static int ecb_encrypt ( struct blkcipher_desc * desc , struct scatterlist * dst ,
struct scatterlist * src , unsigned int nbytes )
{
struct blkcipher_walk walk ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return ecb_crypt ( desc , & walk , true ) ;
}
static int ecb_decrypt ( struct blkcipher_desc * desc , struct scatterlist * dst ,
struct scatterlist * src , unsigned int nbytes )
{
struct blkcipher_walk walk ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return ecb_crypt ( desc , & walk , false ) ;
}
static unsigned int __cbc_encrypt ( struct blkcipher_desc * desc ,
struct blkcipher_walk * walk )
{
struct serpent_ctx * ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
const unsigned int bsize = SERPENT_BLOCK_SIZE ;
unsigned int nbytes = walk - > nbytes ;
u128 * src = ( u128 * ) walk - > src . virt . addr ;
u128 * dst = ( u128 * ) walk - > dst . virt . addr ;
u128 * iv = ( u128 * ) walk - > iv ;
do {
u128_xor ( dst , src , iv ) ;
__serpent_encrypt ( ctx , ( u8 * ) dst , ( u8 * ) dst ) ;
iv = dst ;
src + = 1 ;
dst + = 1 ;
nbytes - = bsize ;
} while ( nbytes > = bsize ) ;
u128_xor ( ( u128 * ) walk - > iv , ( u128 * ) walk - > iv , iv ) ;
return nbytes ;
}
static int cbc_encrypt ( struct blkcipher_desc * desc , struct scatterlist * dst ,
struct scatterlist * src , unsigned int nbytes )
{
struct blkcipher_walk walk ;
int err ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
err = blkcipher_walk_virt ( desc , & walk ) ;
while ( ( nbytes = walk . nbytes ) ) {
nbytes = __cbc_encrypt ( desc , & walk ) ;
err = blkcipher_walk_done ( desc , & walk , nbytes ) ;
}
return err ;
}
static unsigned int __cbc_decrypt ( struct blkcipher_desc * desc ,
struct blkcipher_walk * walk )
{
struct serpent_ctx * ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
const unsigned int bsize = SERPENT_BLOCK_SIZE ;
unsigned int nbytes = walk - > nbytes ;
u128 * src = ( u128 * ) walk - > src . virt . addr ;
u128 * dst = ( u128 * ) walk - > dst . virt . addr ;
u128 ivs [ SERPENT_PARALLEL_BLOCKS - 1 ] ;
u128 last_iv ;
int i ;
/* Start of the last block. */
src + = nbytes / bsize - 1 ;
dst + = nbytes / bsize - 1 ;
last_iv = * src ;
/* Process multi-block batch */
if ( nbytes > = bsize * SERPENT_PARALLEL_BLOCKS ) {
do {
nbytes - = bsize * ( SERPENT_PARALLEL_BLOCKS - 1 ) ;
src - = SERPENT_PARALLEL_BLOCKS - 1 ;
dst - = SERPENT_PARALLEL_BLOCKS - 1 ;
for ( i = 0 ; i < SERPENT_PARALLEL_BLOCKS - 1 ; i + + )
ivs [ i ] = src [ i ] ;
serpent_dec_blk_xway ( ctx , ( u8 * ) dst , ( u8 * ) src ) ;
for ( i = 0 ; i < SERPENT_PARALLEL_BLOCKS - 1 ; i + + )
u128_xor ( dst + ( i + 1 ) , dst + ( i + 1 ) , ivs + i ) ;
nbytes - = bsize ;
if ( nbytes < bsize )
goto done ;
u128_xor ( dst , dst , src - 1 ) ;
src - = 1 ;
dst - = 1 ;
} while ( nbytes > = bsize * SERPENT_PARALLEL_BLOCKS ) ;
if ( nbytes < bsize )
goto done ;
}
/* Handle leftovers */
for ( ; ; ) {
__serpent_decrypt ( ctx , ( u8 * ) dst , ( u8 * ) src ) ;
nbytes - = bsize ;
if ( nbytes < bsize )
break ;
u128_xor ( dst , dst , src - 1 ) ;
src - = 1 ;
dst - = 1 ;
}
done :
u128_xor ( dst , dst , ( u128 * ) walk - > iv ) ;
* ( u128 * ) walk - > iv = last_iv ;
return nbytes ;
}
static int cbc_decrypt ( struct blkcipher_desc * desc , struct scatterlist * dst ,
struct scatterlist * src , unsigned int nbytes )
{
bool fpu_enabled = false ;
struct blkcipher_walk walk ;
int err ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
err = blkcipher_walk_virt ( desc , & walk ) ;
desc - > flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
while ( ( nbytes = walk . nbytes ) ) {
fpu_enabled = serpent_fpu_begin ( fpu_enabled , nbytes ) ;
nbytes = __cbc_decrypt ( desc , & walk ) ;
err = blkcipher_walk_done ( desc , & walk , nbytes ) ;
}
serpent_fpu_end ( fpu_enabled ) ;
return err ;
}
static inline void u128_to_be128 ( be128 * dst , const u128 * src )
{
dst - > a = cpu_to_be64 ( src - > a ) ;
dst - > b = cpu_to_be64 ( src - > b ) ;
}
static inline void be128_to_u128 ( u128 * dst , const be128 * src )
{
dst - > a = be64_to_cpu ( src - > a ) ;
dst - > b = be64_to_cpu ( src - > b ) ;
}
static inline void u128_inc ( u128 * i )
{
i - > b + + ;
if ( ! i - > b )
i - > a + + ;
}
static void ctr_crypt_final ( struct blkcipher_desc * desc ,
struct blkcipher_walk * walk )
{
struct serpent_ctx * ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
u8 * ctrblk = walk - > iv ;
u8 keystream [ SERPENT_BLOCK_SIZE ] ;
u8 * src = walk - > src . virt . addr ;
u8 * dst = walk - > dst . virt . addr ;
unsigned int nbytes = walk - > nbytes ;
__serpent_encrypt ( ctx , keystream , ctrblk ) ;
crypto_xor ( keystream , src , nbytes ) ;
memcpy ( dst , keystream , nbytes ) ;
crypto_inc ( ctrblk , SERPENT_BLOCK_SIZE ) ;
}
static unsigned int __ctr_crypt ( struct blkcipher_desc * desc ,
struct blkcipher_walk * walk )
{
struct serpent_ctx * ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
const unsigned int bsize = SERPENT_BLOCK_SIZE ;
unsigned int nbytes = walk - > nbytes ;
u128 * src = ( u128 * ) walk - > src . virt . addr ;
u128 * dst = ( u128 * ) walk - > dst . virt . addr ;
u128 ctrblk ;
be128 ctrblocks [ SERPENT_PARALLEL_BLOCKS ] ;
int i ;
be128_to_u128 ( & ctrblk , ( be128 * ) walk - > iv ) ;
/* Process multi-block batch */
if ( nbytes > = bsize * SERPENT_PARALLEL_BLOCKS ) {
do {
/* create ctrblks for parallel encrypt */
for ( i = 0 ; i < SERPENT_PARALLEL_BLOCKS ; i + + ) {
if ( dst ! = src )
dst [ i ] = src [ i ] ;
u128_to_be128 ( & ctrblocks [ i ] , & ctrblk ) ;
u128_inc ( & ctrblk ) ;
}
serpent_enc_blk_xway_xor ( ctx , ( u8 * ) dst ,
( u8 * ) ctrblocks ) ;
src + = SERPENT_PARALLEL_BLOCKS ;
dst + = SERPENT_PARALLEL_BLOCKS ;
nbytes - = bsize * SERPENT_PARALLEL_BLOCKS ;
} while ( nbytes > = bsize * SERPENT_PARALLEL_BLOCKS ) ;
if ( nbytes < bsize )
goto done ;
}
/* Handle leftovers */
do {
if ( dst ! = src )
* dst = * src ;
u128_to_be128 ( & ctrblocks [ 0 ] , & ctrblk ) ;
u128_inc ( & ctrblk ) ;
__serpent_encrypt ( ctx , ( u8 * ) ctrblocks , ( u8 * ) ctrblocks ) ;
u128_xor ( dst , dst , ( u128 * ) ctrblocks ) ;
src + = 1 ;
dst + = 1 ;
nbytes - = bsize ;
} while ( nbytes > = bsize ) ;
done :
u128_to_be128 ( ( be128 * ) walk - > iv , & ctrblk ) ;
return nbytes ;
}
static int ctr_crypt ( struct blkcipher_desc * desc , struct scatterlist * dst ,
struct scatterlist * src , unsigned int nbytes )
{
bool fpu_enabled = false ;
struct blkcipher_walk walk ;
int err ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
err = blkcipher_walk_virt_block ( desc , & walk , SERPENT_BLOCK_SIZE ) ;
desc - > flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
while ( ( nbytes = walk . nbytes ) > = SERPENT_BLOCK_SIZE ) {
fpu_enabled = serpent_fpu_begin ( fpu_enabled , nbytes ) ;
nbytes = __ctr_crypt ( desc , & walk ) ;
err = blkcipher_walk_done ( desc , & walk , nbytes ) ;
}
serpent_fpu_end ( fpu_enabled ) ;
if ( walk . nbytes ) {
ctr_crypt_final ( desc , & walk ) ;
err = blkcipher_walk_done ( desc , & walk , 0 ) ;
}
return err ;
}
2011-11-09 18:26:36 +04:00
struct crypt_priv {
struct serpent_ctx * ctx ;
bool fpu_enabled ;
} ;
static void encrypt_callback ( void * priv , u8 * srcdst , unsigned int nbytes )
{
const unsigned int bsize = SERPENT_BLOCK_SIZE ;
struct crypt_priv * ctx = priv ;
int i ;
ctx - > fpu_enabled = serpent_fpu_begin ( ctx - > fpu_enabled , nbytes ) ;
if ( nbytes = = bsize * SERPENT_PARALLEL_BLOCKS ) {
serpent_enc_blk_xway ( ctx - > ctx , srcdst , srcdst ) ;
return ;
}
for ( i = 0 ; i < nbytes / bsize ; i + + , srcdst + = bsize )
__serpent_encrypt ( ctx - > ctx , srcdst , srcdst ) ;
}
static void decrypt_callback ( void * priv , u8 * srcdst , unsigned int nbytes )
{
const unsigned int bsize = SERPENT_BLOCK_SIZE ;
struct crypt_priv * ctx = priv ;
int i ;
ctx - > fpu_enabled = serpent_fpu_begin ( ctx - > fpu_enabled , nbytes ) ;
if ( nbytes = = bsize * SERPENT_PARALLEL_BLOCKS ) {
serpent_dec_blk_xway ( ctx - > ctx , srcdst , srcdst ) ;
return ;
}
for ( i = 0 ; i < nbytes / bsize ; i + + , srcdst + = bsize )
__serpent_decrypt ( ctx - > ctx , srcdst , srcdst ) ;
}
struct serpent_lrw_ctx {
struct lrw_table_ctx lrw_table ;
struct serpent_ctx serpent_ctx ;
} ;
static int lrw_serpent_setkey ( struct crypto_tfm * tfm , const u8 * key ,
unsigned int keylen )
{
struct serpent_lrw_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
int err ;
err = __serpent_setkey ( & ctx - > serpent_ctx , key , keylen -
SERPENT_BLOCK_SIZE ) ;
if ( err )
return err ;
return lrw_init_table ( & ctx - > lrw_table , key + keylen -
SERPENT_BLOCK_SIZE ) ;
}
static int lrw_encrypt ( struct blkcipher_desc * desc , struct scatterlist * dst ,
struct scatterlist * src , unsigned int nbytes )
{
struct serpent_lrw_ctx * ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
be128 buf [ SERPENT_PARALLEL_BLOCKS ] ;
struct crypt_priv crypt_ctx = {
. ctx = & ctx - > serpent_ctx ,
. fpu_enabled = false ,
} ;
struct lrw_crypt_req req = {
. tbuf = buf ,
. tbuflen = sizeof ( buf ) ,
. table_ctx = & ctx - > lrw_table ,
. crypt_ctx = & crypt_ctx ,
. crypt_fn = encrypt_callback ,
} ;
int ret ;
2011-11-09 21:44:12 +04:00
desc - > flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
2011-11-09 18:26:36 +04:00
ret = lrw_crypt ( desc , dst , src , nbytes , & req ) ;
serpent_fpu_end ( crypt_ctx . fpu_enabled ) ;
return ret ;
}
static int lrw_decrypt ( struct blkcipher_desc * desc , struct scatterlist * dst ,
struct scatterlist * src , unsigned int nbytes )
{
struct serpent_lrw_ctx * ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
be128 buf [ SERPENT_PARALLEL_BLOCKS ] ;
struct crypt_priv crypt_ctx = {
. ctx = & ctx - > serpent_ctx ,
. fpu_enabled = false ,
} ;
struct lrw_crypt_req req = {
. tbuf = buf ,
. tbuflen = sizeof ( buf ) ,
. table_ctx = & ctx - > lrw_table ,
. crypt_ctx = & crypt_ctx ,
. crypt_fn = decrypt_callback ,
} ;
int ret ;
2011-11-09 21:44:12 +04:00
desc - > flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
2011-11-09 18:26:36 +04:00
ret = lrw_crypt ( desc , dst , src , nbytes , & req ) ;
serpent_fpu_end ( crypt_ctx . fpu_enabled ) ;
return ret ;
}
static void lrw_exit_tfm ( struct crypto_tfm * tfm )
{
struct serpent_lrw_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
lrw_free_table ( & ctx - > lrw_table ) ;
}
2011-11-09 18:26:41 +04:00
struct serpent_xts_ctx {
struct serpent_ctx tweak_ctx ;
struct serpent_ctx crypt_ctx ;
} ;
static int xts_serpent_setkey ( struct crypto_tfm * tfm , const u8 * key ,
unsigned int keylen )
{
struct serpent_xts_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
u32 * flags = & tfm - > crt_flags ;
int err ;
/* key consists of keys of equal size concatenated, therefore
* the length must be even
*/
if ( keylen % 2 ) {
* flags | = CRYPTO_TFM_RES_BAD_KEY_LEN ;
return - EINVAL ;
}
/* first half of xts-key is for crypt */
err = __serpent_setkey ( & ctx - > crypt_ctx , key , keylen / 2 ) ;
if ( err )
return err ;
/* second half of xts-key is for tweak */
return __serpent_setkey ( & ctx - > tweak_ctx , key + keylen / 2 , keylen / 2 ) ;
}
static int xts_encrypt ( struct blkcipher_desc * desc , struct scatterlist * dst ,
struct scatterlist * src , unsigned int nbytes )
{
struct serpent_xts_ctx * ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
be128 buf [ SERPENT_PARALLEL_BLOCKS ] ;
struct crypt_priv crypt_ctx = {
. ctx = & ctx - > crypt_ctx ,
. fpu_enabled = false ,
} ;
struct xts_crypt_req req = {
. tbuf = buf ,
. tbuflen = sizeof ( buf ) ,
. tweak_ctx = & ctx - > tweak_ctx ,
. tweak_fn = XTS_TWEAK_CAST ( __serpent_encrypt ) ,
. crypt_ctx = & crypt_ctx ,
. crypt_fn = encrypt_callback ,
} ;
int ret ;
2011-11-09 21:44:12 +04:00
desc - > flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
2011-11-09 18:26:41 +04:00
ret = xts_crypt ( desc , dst , src , nbytes , & req ) ;
serpent_fpu_end ( crypt_ctx . fpu_enabled ) ;
return ret ;
}
static int xts_decrypt ( struct blkcipher_desc * desc , struct scatterlist * dst ,
struct scatterlist * src , unsigned int nbytes )
{
struct serpent_xts_ctx * ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
be128 buf [ SERPENT_PARALLEL_BLOCKS ] ;
struct crypt_priv crypt_ctx = {
. ctx = & ctx - > crypt_ctx ,
. fpu_enabled = false ,
} ;
struct xts_crypt_req req = {
. tbuf = buf ,
. tbuflen = sizeof ( buf ) ,
. tweak_ctx = & ctx - > tweak_ctx ,
. tweak_fn = XTS_TWEAK_CAST ( __serpent_encrypt ) ,
. crypt_ctx = & crypt_ctx ,
. crypt_fn = decrypt_callback ,
} ;
int ret ;
2011-11-09 21:44:12 +04:00
desc - > flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
2011-11-09 18:26:41 +04:00
ret = xts_crypt ( desc , dst , src , nbytes , & req ) ;
serpent_fpu_end ( crypt_ctx . fpu_enabled ) ;
return ret ;
}
2012-02-18 00:48:37 +04:00
static struct crypto_alg serpent_algs [ 10 ] = { {
. cra_name = " __ecb-serpent-sse2 " ,
. cra_driver_name = " __driver-ecb-serpent-sse2 " ,
. cra_priority = 0 ,
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER ,
. cra_blocksize = SERPENT_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct serpent_ctx ) ,
. cra_alignmask = 0 ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_list = LIST_HEAD_INIT ( serpent_algs [ 0 ] . cra_list ) ,
. cra_u = {
. blkcipher = {
. min_keysize = SERPENT_MIN_KEY_SIZE ,
. max_keysize = SERPENT_MAX_KEY_SIZE ,
. setkey = serpent_setkey ,
. encrypt = ecb_encrypt ,
. decrypt = ecb_decrypt ,
} ,
} ,
} , {
. cra_name = " __cbc-serpent-sse2 " ,
. cra_driver_name = " __driver-cbc-serpent-sse2 " ,
. cra_priority = 0 ,
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER ,
. cra_blocksize = SERPENT_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct serpent_ctx ) ,
. cra_alignmask = 0 ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_list = LIST_HEAD_INIT ( serpent_algs [ 1 ] . cra_list ) ,
. cra_u = {
. blkcipher = {
. min_keysize = SERPENT_MIN_KEY_SIZE ,
. max_keysize = SERPENT_MAX_KEY_SIZE ,
. setkey = serpent_setkey ,
. encrypt = cbc_encrypt ,
. decrypt = cbc_decrypt ,
} ,
} ,
} , {
. cra_name = " __ctr-serpent-sse2 " ,
. cra_driver_name = " __driver-ctr-serpent-sse2 " ,
. cra_priority = 0 ,
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct serpent_ctx ) ,
. cra_alignmask = 0 ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_list = LIST_HEAD_INIT ( serpent_algs [ 2 ] . cra_list ) ,
. cra_u = {
. blkcipher = {
. min_keysize = SERPENT_MIN_KEY_SIZE ,
. max_keysize = SERPENT_MAX_KEY_SIZE ,
. ivsize = SERPENT_BLOCK_SIZE ,
. setkey = serpent_setkey ,
. encrypt = ctr_crypt ,
. decrypt = ctr_crypt ,
} ,
} ,
} , {
. cra_name = " __lrw-serpent-sse2 " ,
. cra_driver_name = " __driver-lrw-serpent-sse2 " ,
. cra_priority = 0 ,
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER ,
. cra_blocksize = SERPENT_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct serpent_lrw_ctx ) ,
. cra_alignmask = 0 ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_list = LIST_HEAD_INIT ( serpent_algs [ 3 ] . cra_list ) ,
. cra_exit = lrw_exit_tfm ,
. cra_u = {
. blkcipher = {
. min_keysize = SERPENT_MIN_KEY_SIZE +
SERPENT_BLOCK_SIZE ,
. max_keysize = SERPENT_MAX_KEY_SIZE +
SERPENT_BLOCK_SIZE ,
. ivsize = SERPENT_BLOCK_SIZE ,
. setkey = lrw_serpent_setkey ,
. encrypt = lrw_encrypt ,
. decrypt = lrw_decrypt ,
} ,
} ,
} , {
. cra_name = " __xts-serpent-sse2 " ,
. cra_driver_name = " __driver-xts-serpent-sse2 " ,
. cra_priority = 0 ,
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER ,
. cra_blocksize = SERPENT_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct serpent_xts_ctx ) ,
. cra_alignmask = 0 ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_list = LIST_HEAD_INIT ( serpent_algs [ 4 ] . cra_list ) ,
. cra_u = {
. blkcipher = {
. min_keysize = SERPENT_MIN_KEY_SIZE * 2 ,
. max_keysize = SERPENT_MAX_KEY_SIZE * 2 ,
. ivsize = SERPENT_BLOCK_SIZE ,
. setkey = xts_serpent_setkey ,
. encrypt = xts_encrypt ,
. decrypt = xts_decrypt ,
} ,
} ,
} , {
2011-11-09 18:26:25 +04:00
. cra_name = " ecb(serpent) " ,
. cra_driver_name = " ecb-serpent-sse2 " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC ,
. cra_blocksize = SERPENT_BLOCK_SIZE ,
2012-06-18 15:06:58 +04:00
. cra_ctxsize = sizeof ( struct async_helper_ctx ) ,
2011-11-09 18:26:25 +04:00
. cra_alignmask = 0 ,
. cra_type = & crypto_ablkcipher_type ,
. cra_module = THIS_MODULE ,
2012-02-18 00:48:37 +04:00
. cra_list = LIST_HEAD_INIT ( serpent_algs [ 5 ] . cra_list ) ,
2012-02-18 00:48:53 +04:00
. cra_init = ablk_init ,
2011-11-09 18:26:25 +04:00
. cra_exit = ablk_exit ,
. cra_u = {
. ablkcipher = {
. min_keysize = SERPENT_MIN_KEY_SIZE ,
. max_keysize = SERPENT_MAX_KEY_SIZE ,
. setkey = ablk_set_key ,
. encrypt = ablk_encrypt ,
. decrypt = ablk_decrypt ,
} ,
} ,
2012-02-18 00:48:37 +04:00
} , {
2011-11-09 18:26:25 +04:00
. cra_name = " cbc(serpent) " ,
. cra_driver_name = " cbc-serpent-sse2 " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC ,
. cra_blocksize = SERPENT_BLOCK_SIZE ,
2012-06-18 15:06:58 +04:00
. cra_ctxsize = sizeof ( struct async_helper_ctx ) ,
2011-11-09 18:26:25 +04:00
. cra_alignmask = 0 ,
. cra_type = & crypto_ablkcipher_type ,
. cra_module = THIS_MODULE ,
2012-02-18 00:48:37 +04:00
. cra_list = LIST_HEAD_INIT ( serpent_algs [ 6 ] . cra_list ) ,
2012-02-18 00:48:53 +04:00
. cra_init = ablk_init ,
2011-11-09 18:26:25 +04:00
. cra_exit = ablk_exit ,
. cra_u = {
. ablkcipher = {
. min_keysize = SERPENT_MIN_KEY_SIZE ,
. max_keysize = SERPENT_MAX_KEY_SIZE ,
. ivsize = SERPENT_BLOCK_SIZE ,
. setkey = ablk_set_key ,
. encrypt = __ablk_encrypt ,
. decrypt = ablk_decrypt ,
} ,
} ,
2012-02-18 00:48:37 +04:00
} , {
2011-11-09 18:26:25 +04:00
. cra_name = " ctr(serpent) " ,
. cra_driver_name = " ctr-serpent-sse2 " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC ,
. cra_blocksize = 1 ,
2012-06-18 15:06:58 +04:00
. cra_ctxsize = sizeof ( struct async_helper_ctx ) ,
2011-11-09 18:26:25 +04:00
. cra_alignmask = 0 ,
. cra_type = & crypto_ablkcipher_type ,
. cra_module = THIS_MODULE ,
2012-02-18 00:48:37 +04:00
. cra_list = LIST_HEAD_INIT ( serpent_algs [ 7 ] . cra_list ) ,
2012-02-18 00:48:53 +04:00
. cra_init = ablk_init ,
2011-11-09 18:26:25 +04:00
. cra_exit = ablk_exit ,
. cra_u = {
. ablkcipher = {
. min_keysize = SERPENT_MIN_KEY_SIZE ,
. max_keysize = SERPENT_MAX_KEY_SIZE ,
. ivsize = SERPENT_BLOCK_SIZE ,
. setkey = ablk_set_key ,
. encrypt = ablk_encrypt ,
. decrypt = ablk_encrypt ,
. geniv = " chainiv " ,
} ,
} ,
2012-02-18 00:48:37 +04:00
} , {
2011-11-09 18:26:36 +04:00
. cra_name = " lrw(serpent) " ,
. cra_driver_name = " lrw-serpent-sse2 " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC ,
. cra_blocksize = SERPENT_BLOCK_SIZE ,
2012-06-18 15:06:58 +04:00
. cra_ctxsize = sizeof ( struct async_helper_ctx ) ,
2011-11-09 18:26:36 +04:00
. cra_alignmask = 0 ,
. cra_type = & crypto_ablkcipher_type ,
. cra_module = THIS_MODULE ,
2012-02-18 00:48:37 +04:00
. cra_list = LIST_HEAD_INIT ( serpent_algs [ 8 ] . cra_list ) ,
2012-02-18 00:48:53 +04:00
. cra_init = ablk_init ,
2011-11-09 18:26:36 +04:00
. cra_exit = ablk_exit ,
. cra_u = {
. ablkcipher = {
. min_keysize = SERPENT_MIN_KEY_SIZE +
SERPENT_BLOCK_SIZE ,
. max_keysize = SERPENT_MAX_KEY_SIZE +
SERPENT_BLOCK_SIZE ,
. ivsize = SERPENT_BLOCK_SIZE ,
. setkey = ablk_set_key ,
. encrypt = ablk_encrypt ,
. decrypt = ablk_decrypt ,
} ,
} ,
2012-02-18 00:48:37 +04:00
} , {
2011-11-09 18:26:41 +04:00
. cra_name = " xts(serpent) " ,
. cra_driver_name = " xts-serpent-sse2 " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC ,
. cra_blocksize = SERPENT_BLOCK_SIZE ,
2012-06-18 15:06:58 +04:00
. cra_ctxsize = sizeof ( struct async_helper_ctx ) ,
2011-11-09 18:26:41 +04:00
. cra_alignmask = 0 ,
. cra_type = & crypto_ablkcipher_type ,
. cra_module = THIS_MODULE ,
2012-02-18 00:48:37 +04:00
. cra_list = LIST_HEAD_INIT ( serpent_algs [ 9 ] . cra_list ) ,
2012-02-18 00:48:53 +04:00
. cra_init = ablk_init ,
2011-11-09 18:26:41 +04:00
. cra_exit = ablk_exit ,
. cra_u = {
. ablkcipher = {
. min_keysize = SERPENT_MIN_KEY_SIZE * 2 ,
. max_keysize = SERPENT_MAX_KEY_SIZE * 2 ,
. ivsize = SERPENT_BLOCK_SIZE ,
. setkey = ablk_set_key ,
. encrypt = ablk_encrypt ,
. decrypt = ablk_decrypt ,
} ,
} ,
2012-02-18 00:48:37 +04:00
} } ;
2011-11-09 18:26:41 +04:00
2011-11-09 18:26:25 +04:00
static int __init serpent_sse2_init ( void )
{
if ( ! cpu_has_xmm2 ) {
printk ( KERN_INFO " SSE2 instructions are not detected. \n " ) ;
return - ENODEV ;
}
2012-02-18 00:48:37 +04:00
return crypto_register_algs ( serpent_algs , ARRAY_SIZE ( serpent_algs ) ) ;
2011-11-09 18:26:25 +04:00
}
static void __exit serpent_sse2_exit ( void )
{
2012-02-18 00:48:37 +04:00
crypto_unregister_algs ( serpent_algs , ARRAY_SIZE ( serpent_algs ) ) ;
2011-11-09 18:26:25 +04:00
}
module_init ( serpent_sse2_init ) ;
module_exit ( serpent_sse2_exit ) ;
MODULE_DESCRIPTION ( " Serpent Cipher Algorithm, SSE2 optimized " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_ALIAS ( " serpent " ) ;