2005-04-17 02:20:36 +04:00
/*
* Cryptographic API .
*
* Cipher operations .
*
* Copyright ( c ) 2002 James Morris < jmorris @ intercode . com . au >
2005-07-07 00:51:31 +04:00
* Copyright ( c ) 2005 Herbert Xu < herbert @ gondor . apana . org . au >
2005-04-17 02:20:36 +04:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
# include <linux/compiler.h>
# include <linux/kernel.h>
# include <linux/crypto.h>
# include <linux/errno.h>
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/string.h>
# include <asm/scatterlist.h>
# include "internal.h"
# include "scatterwalk.h"
static inline void xor_64 ( u8 * a , const u8 * b )
{
( ( u32 * ) a ) [ 0 ] ^ = ( ( u32 * ) b ) [ 0 ] ;
( ( u32 * ) a ) [ 1 ] ^ = ( ( u32 * ) b ) [ 1 ] ;
}
static inline void xor_128 ( u8 * a , const u8 * b )
{
( ( u32 * ) a ) [ 0 ] ^ = ( ( u32 * ) b ) [ 0 ] ;
( ( u32 * ) a ) [ 1 ] ^ = ( ( u32 * ) b ) [ 1 ] ;
( ( u32 * ) a ) [ 2 ] ^ = ( ( u32 * ) b ) [ 2 ] ;
( ( u32 * ) a ) [ 3 ] ^ = ( ( u32 * ) b ) [ 3 ] ;
}
2005-07-07 00:51:31 +04:00
static unsigned int crypt_slow ( const struct cipher_desc * desc ,
struct scatter_walk * in ,
struct scatter_walk * out , unsigned int bsize )
2005-04-17 02:20:36 +04:00
{
2005-07-15 18:41:31 +04:00
unsigned long alignmask = crypto_tfm_alg_alignmask ( desc - > tfm ) ;
2005-07-07 00:52:09 +04:00
u8 buffer [ bsize * 2 + alignmask ] ;
u8 * src = ( u8 * ) ALIGN ( ( unsigned long ) buffer , alignmask + 1 ) ;
u8 * dst = src + bsize ;
2005-07-07 00:51:31 +04:00
unsigned int n ;
2005-04-17 02:20:36 +04:00
2005-07-07 00:51:31 +04:00
n = scatterwalk_copychunks ( src , in , bsize , 0 ) ;
scatterwalk_advance ( in , n ) ;
2005-04-17 02:20:36 +04:00
2005-07-07 00:51:31 +04:00
desc - > prfn ( desc , dst , src , bsize ) ;
2005-04-17 02:20:36 +04:00
2005-07-07 00:51:31 +04:00
n = scatterwalk_copychunks ( dst , out , bsize , 1 ) ;
scatterwalk_advance ( out , n ) ;
2005-04-17 02:20:36 +04:00
2005-07-07 00:51:31 +04:00
return bsize ;
2005-04-17 02:20:36 +04:00
}
2005-07-07 00:51:31 +04:00
static inline unsigned int crypt_fast ( const struct cipher_desc * desc ,
struct scatter_walk * in ,
struct scatter_walk * out ,
2005-07-07 00:52:09 +04:00
unsigned int nbytes , u8 * tmp )
2005-04-17 02:20:36 +04:00
{
2005-07-07 00:51:31 +04:00
u8 * src , * dst ;
src = in - > data ;
dst = scatterwalk_samebuf ( in , out ) ? src : out - > data ;
2005-07-07 00:52:09 +04:00
if ( tmp ) {
memcpy ( tmp , in - > data , nbytes ) ;
src = tmp ;
dst = tmp ;
}
2005-07-07 00:51:31 +04:00
nbytes = desc - > prfn ( desc , dst , src , nbytes ) ;
2005-07-07 00:52:09 +04:00
if ( tmp )
memcpy ( out - > data , tmp , nbytes ) ;
2005-07-07 00:51:31 +04:00
scatterwalk_advance ( in , nbytes ) ;
scatterwalk_advance ( out , nbytes ) ;
2005-04-17 02:20:36 +04:00
2005-07-07 00:51:31 +04:00
return nbytes ;
2005-04-17 02:20:36 +04:00
}
/*
* Generic encrypt / decrypt wrapper for ciphers , handles operations across
* multiple page boundaries by using temporary blocks . In user context ,
2005-07-07 00:51:31 +04:00
* the kernel is given a chance to schedule us once per page .
2005-04-17 02:20:36 +04:00
*/
2005-07-07 00:51:31 +04:00
static int crypt ( const struct cipher_desc * desc ,
2005-04-17 02:20:36 +04:00
struct scatterlist * dst ,
struct scatterlist * src ,
2005-07-07 00:51:31 +04:00
unsigned int nbytes )
2005-04-17 02:20:36 +04:00
{
struct scatter_walk walk_in , walk_out ;
2005-07-07 00:51:31 +04:00
struct crypto_tfm * tfm = desc - > tfm ;
2005-04-17 02:20:36 +04:00
const unsigned int bsize = crypto_tfm_alg_blocksize ( tfm ) ;
2005-07-07 00:53:29 +04:00
unsigned int alignmask = crypto_tfm_alg_alignmask ( tfm ) ;
2005-07-07 00:52:09 +04:00
unsigned long buffer = 0 ;
2005-04-17 02:20:36 +04:00
if ( ! nbytes )
return 0 ;
if ( nbytes % bsize ) {
tfm - > crt_flags | = CRYPTO_TFM_RES_BAD_BLOCK_LEN ;
return - EINVAL ;
}
scatterwalk_start ( & walk_in , src ) ;
scatterwalk_start ( & walk_out , dst ) ;
for ( ; ; ) {
2005-07-07 00:52:09 +04:00
unsigned int n = nbytes ;
u8 * tmp = NULL ;
if ( ! scatterwalk_aligned ( & walk_in , alignmask ) | |
! scatterwalk_aligned ( & walk_out , alignmask ) ) {
if ( ! buffer ) {
buffer = __get_free_page ( GFP_ATOMIC ) ;
if ( ! buffer )
n = 0 ;
}
tmp = ( u8 * ) buffer ;
}
2005-04-17 02:20:36 +04:00
scatterwalk_map ( & walk_in , 0 ) ;
scatterwalk_map ( & walk_out , 1 ) ;
2005-07-07 00:52:09 +04:00
n = scatterwalk_clamp ( & walk_in , n ) ;
2005-07-07 00:51:31 +04:00
n = scatterwalk_clamp ( & walk_out , n ) ;
2005-04-17 02:20:36 +04:00
2005-07-07 00:51:31 +04:00
if ( likely ( n > = bsize ) )
2005-07-07 00:52:09 +04:00
n = crypt_fast ( desc , & walk_in , & walk_out , n , tmp ) ;
2005-07-07 00:51:31 +04:00
else
n = crypt_slow ( desc , & walk_in , & walk_out , bsize ) ;
2005-04-17 02:20:36 +04:00
2005-07-07 00:51:31 +04:00
nbytes - = n ;
2005-04-17 02:20:36 +04:00
scatterwalk_done ( & walk_in , 0 , nbytes ) ;
scatterwalk_done ( & walk_out , 1 , nbytes ) ;
if ( ! nbytes )
2005-07-07 00:52:09 +04:00
break ;
2005-04-17 02:20:36 +04:00
crypto_yield ( tfm ) ;
}
2005-07-07 00:52:09 +04:00
if ( buffer )
free_page ( buffer ) ;
return 0 ;
2005-04-17 02:20:36 +04:00
}
2005-07-07 00:53:47 +04:00
static int crypt_iv_unaligned ( struct cipher_desc * desc ,
struct scatterlist * dst ,
struct scatterlist * src ,
unsigned int nbytes )
{
struct crypto_tfm * tfm = desc - > tfm ;
2005-07-15 18:41:31 +04:00
unsigned long alignmask = crypto_tfm_alg_alignmask ( tfm ) ;
2005-07-07 00:53:47 +04:00
u8 * iv = desc - > info ;
if ( unlikely ( ( ( unsigned long ) iv & alignmask ) ) ) {
unsigned int ivsize = tfm - > crt_cipher . cit_ivsize ;
u8 buffer [ ivsize + alignmask ] ;
u8 * tmp = ( u8 * ) ALIGN ( ( unsigned long ) buffer , alignmask + 1 ) ;
int err ;
desc - > info = memcpy ( tmp , iv , ivsize ) ;
err = crypt ( desc , dst , src , nbytes ) ;
memcpy ( iv , tmp , ivsize ) ;
return err ;
}
return crypt ( desc , dst , src , nbytes ) ;
}
2005-07-07 00:51:31 +04:00
static unsigned int cbc_process_encrypt ( const struct cipher_desc * desc ,
u8 * dst , const u8 * src ,
unsigned int nbytes )
2005-04-17 02:20:36 +04:00
{
2005-07-07 00:51:31 +04:00
struct crypto_tfm * tfm = desc - > tfm ;
void ( * xor ) ( u8 * , const u8 * ) = tfm - > crt_u . cipher . cit_xor_block ;
int bsize = crypto_tfm_alg_blocksize ( tfm ) ;
2006-05-16 16:09:29 +04:00
void ( * fn ) ( struct crypto_tfm * , u8 * , const u8 * ) = desc - > crfn ;
2005-07-07 00:51:31 +04:00
u8 * iv = desc - > info ;
unsigned int done = 0 ;
2005-09-07 01:49:44 +04:00
nbytes - = bsize ;
2005-07-07 00:51:31 +04:00
do {
xor ( iv , src ) ;
2006-05-16 16:09:29 +04:00
fn ( tfm , dst , iv ) ;
2005-07-07 00:51:31 +04:00
memcpy ( iv , dst , bsize ) ;
2005-04-17 02:20:36 +04:00
2005-07-07 00:51:31 +04:00
src + = bsize ;
dst + = bsize ;
2005-09-07 01:49:44 +04:00
} while ( ( done + = bsize ) < = nbytes ) ;
2005-07-07 00:51:31 +04:00
return done ;
2005-04-17 02:20:36 +04:00
}
2005-07-07 00:51:31 +04:00
static unsigned int cbc_process_decrypt ( const struct cipher_desc * desc ,
u8 * dst , const u8 * src ,
unsigned int nbytes )
2005-04-17 02:20:36 +04:00
{
2005-07-07 00:51:31 +04:00
struct crypto_tfm * tfm = desc - > tfm ;
void ( * xor ) ( u8 * , const u8 * ) = tfm - > crt_u . cipher . cit_xor_block ;
int bsize = crypto_tfm_alg_blocksize ( tfm ) ;
2005-11-29 14:04:41 +03:00
unsigned long alignmask = crypto_tfm_alg_alignmask ( desc - > tfm ) ;
2005-07-07 00:51:31 +04:00
2005-11-29 14:04:41 +03:00
u8 stack [ src = = dst ? bsize + alignmask : 0 ] ;
u8 * buf = ( u8 * ) ALIGN ( ( unsigned long ) stack , alignmask + 1 ) ;
2005-07-07 00:51:31 +04:00
u8 * * dst_p = src = = dst ? & buf : & dst ;
2006-05-16 16:09:29 +04:00
void ( * fn ) ( struct crypto_tfm * , u8 * , const u8 * ) = desc - > crfn ;
2005-07-07 00:51:31 +04:00
u8 * iv = desc - > info ;
unsigned int done = 0 ;
2005-09-07 01:49:44 +04:00
nbytes - = bsize ;
2005-07-07 00:51:31 +04:00
do {
u8 * tmp_dst = * dst_p ;
2005-04-17 02:20:36 +04:00
2006-05-16 16:09:29 +04:00
fn ( tfm , tmp_dst , src ) ;
2005-07-07 00:51:31 +04:00
xor ( tmp_dst , iv ) ;
memcpy ( iv , src , bsize ) ;
if ( tmp_dst ! = dst )
memcpy ( dst , tmp_dst , bsize ) ;
src + = bsize ;
dst + = bsize ;
2005-09-07 01:49:44 +04:00
} while ( ( done + = bsize ) < = nbytes ) ;
2005-07-07 00:51:31 +04:00
return done ;
2005-04-17 02:20:36 +04:00
}
2005-07-07 00:51:31 +04:00
static unsigned int ecb_process ( const struct cipher_desc * desc , u8 * dst ,
const u8 * src , unsigned int nbytes )
2005-04-17 02:20:36 +04:00
{
2005-07-07 00:51:31 +04:00
struct crypto_tfm * tfm = desc - > tfm ;
int bsize = crypto_tfm_alg_blocksize ( tfm ) ;
2006-05-16 16:09:29 +04:00
void ( * fn ) ( struct crypto_tfm * , u8 * , const u8 * ) = desc - > crfn ;
2005-07-07 00:51:31 +04:00
unsigned int done = 0 ;
2005-09-07 01:49:44 +04:00
nbytes - = bsize ;
2005-07-07 00:51:31 +04:00
do {
2006-05-16 16:09:29 +04:00
fn ( tfm , dst , src ) ;
2005-07-07 00:51:31 +04:00
src + = bsize ;
dst + = bsize ;
2005-09-07 01:49:44 +04:00
} while ( ( done + = bsize ) < = nbytes ) ;
2005-07-07 00:51:31 +04:00
return done ;
2005-04-17 02:20:36 +04:00
}
static int setkey ( struct crypto_tfm * tfm , const u8 * key , unsigned int keylen )
{
struct cipher_alg * cia = & tfm - > __crt_alg - > cra_cipher ;
if ( keylen < cia - > cia_min_keysize | | keylen > cia - > cia_max_keysize ) {
tfm - > crt_flags | = CRYPTO_TFM_RES_BAD_KEY_LEN ;
return - EINVAL ;
} else
2006-05-16 16:09:29 +04:00
return cia - > cia_setkey ( tfm , key , keylen ,
2005-04-17 02:20:36 +04:00
& tfm - > crt_flags ) ;
}
static int ecb_encrypt ( struct crypto_tfm * tfm ,
struct scatterlist * dst ,
struct scatterlist * src , unsigned int nbytes )
{
2005-07-07 00:51:31 +04:00
struct cipher_desc desc ;
2005-07-07 00:51:52 +04:00
struct cipher_alg * cipher = & tfm - > __crt_alg - > cra_cipher ;
2005-07-07 00:51:31 +04:00
desc . tfm = tfm ;
2005-07-07 00:51:52 +04:00
desc . crfn = cipher - > cia_encrypt ;
desc . prfn = cipher - > cia_encrypt_ecb ? : ecb_process ;
2005-07-07 00:51:31 +04:00
return crypt ( & desc , dst , src , nbytes ) ;
2005-04-17 02:20:36 +04:00
}
static int ecb_decrypt ( struct crypto_tfm * tfm ,
struct scatterlist * dst ,
struct scatterlist * src ,
unsigned int nbytes )
{
2005-07-07 00:51:31 +04:00
struct cipher_desc desc ;
2005-07-07 00:51:52 +04:00
struct cipher_alg * cipher = & tfm - > __crt_alg - > cra_cipher ;
2005-07-07 00:51:31 +04:00
desc . tfm = tfm ;
2005-07-07 00:51:52 +04:00
desc . crfn = cipher - > cia_decrypt ;
desc . prfn = cipher - > cia_decrypt_ecb ? : ecb_process ;
2005-07-07 00:51:31 +04:00
return crypt ( & desc , dst , src , nbytes ) ;
2005-04-17 02:20:36 +04:00
}
static int cbc_encrypt ( struct crypto_tfm * tfm ,
struct scatterlist * dst ,
struct scatterlist * src ,
unsigned int nbytes )
{
2005-07-07 00:51:31 +04:00
struct cipher_desc desc ;
2005-07-07 00:51:52 +04:00
struct cipher_alg * cipher = & tfm - > __crt_alg - > cra_cipher ;
2005-07-07 00:51:31 +04:00
desc . tfm = tfm ;
2005-07-07 00:51:52 +04:00
desc . crfn = cipher - > cia_encrypt ;
desc . prfn = cipher - > cia_encrypt_cbc ? : cbc_process_encrypt ;
2005-07-07 00:51:31 +04:00
desc . info = tfm - > crt_cipher . cit_iv ;
return crypt ( & desc , dst , src , nbytes ) ;
2005-04-17 02:20:36 +04:00
}
static int cbc_encrypt_iv ( struct crypto_tfm * tfm ,
struct scatterlist * dst ,
struct scatterlist * src ,
unsigned int nbytes , u8 * iv )
{
2005-07-07 00:51:31 +04:00
struct cipher_desc desc ;
2005-07-07 00:51:52 +04:00
struct cipher_alg * cipher = & tfm - > __crt_alg - > cra_cipher ;
2005-07-07 00:51:31 +04:00
desc . tfm = tfm ;
2005-07-07 00:51:52 +04:00
desc . crfn = cipher - > cia_encrypt ;
desc . prfn = cipher - > cia_encrypt_cbc ? : cbc_process_encrypt ;
2005-07-07 00:51:31 +04:00
desc . info = iv ;
2005-07-07 00:53:47 +04:00
return crypt_iv_unaligned ( & desc , dst , src , nbytes ) ;
2005-04-17 02:20:36 +04:00
}
static int cbc_decrypt ( struct crypto_tfm * tfm ,
struct scatterlist * dst ,
struct scatterlist * src ,
unsigned int nbytes )
{
2005-07-07 00:51:31 +04:00
struct cipher_desc desc ;
2005-07-07 00:51:52 +04:00
struct cipher_alg * cipher = & tfm - > __crt_alg - > cra_cipher ;
2005-07-07 00:51:31 +04:00
desc . tfm = tfm ;
2005-07-07 00:51:52 +04:00
desc . crfn = cipher - > cia_decrypt ;
desc . prfn = cipher - > cia_decrypt_cbc ? : cbc_process_decrypt ;
2005-07-07 00:51:31 +04:00
desc . info = tfm - > crt_cipher . cit_iv ;
return crypt ( & desc , dst , src , nbytes ) ;
2005-04-17 02:20:36 +04:00
}
static int cbc_decrypt_iv ( struct crypto_tfm * tfm ,
struct scatterlist * dst ,
struct scatterlist * src ,
unsigned int nbytes , u8 * iv )
{
2005-07-07 00:51:31 +04:00
struct cipher_desc desc ;
2005-07-07 00:51:52 +04:00
struct cipher_alg * cipher = & tfm - > __crt_alg - > cra_cipher ;
2005-07-07 00:51:31 +04:00
desc . tfm = tfm ;
2005-07-07 00:51:52 +04:00
desc . crfn = cipher - > cia_decrypt ;
desc . prfn = cipher - > cia_decrypt_cbc ? : cbc_process_decrypt ;
2005-07-07 00:51:31 +04:00
desc . info = iv ;
2005-07-07 00:53:47 +04:00
return crypt_iv_unaligned ( & desc , dst , src , nbytes ) ;
2005-04-17 02:20:36 +04:00
}
static int nocrypt ( struct crypto_tfm * tfm ,
struct scatterlist * dst ,
struct scatterlist * src ,
unsigned int nbytes )
{
return - ENOSYS ;
}
static int nocrypt_iv ( struct crypto_tfm * tfm ,
struct scatterlist * dst ,
struct scatterlist * src ,
unsigned int nbytes , u8 * iv )
{
return - ENOSYS ;
}
int crypto_init_cipher_flags ( struct crypto_tfm * tfm , u32 flags )
{
u32 mode = flags & CRYPTO_TFM_MODE_MASK ;
tfm - > crt_cipher . cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB ;
return 0 ;
}
int crypto_init_cipher_ops ( struct crypto_tfm * tfm )
{
int ret = 0 ;
struct cipher_tfm * ops = & tfm - > crt_cipher ;
ops - > cit_setkey = setkey ;
switch ( tfm - > crt_cipher . cit_mode ) {
case CRYPTO_TFM_MODE_ECB :
ops - > cit_encrypt = ecb_encrypt ;
ops - > cit_decrypt = ecb_decrypt ;
break ;
case CRYPTO_TFM_MODE_CBC :
ops - > cit_encrypt = cbc_encrypt ;
ops - > cit_decrypt = cbc_decrypt ;
ops - > cit_encrypt_iv = cbc_encrypt_iv ;
ops - > cit_decrypt_iv = cbc_decrypt_iv ;
break ;
case CRYPTO_TFM_MODE_CFB :
ops - > cit_encrypt = nocrypt ;
ops - > cit_decrypt = nocrypt ;
ops - > cit_encrypt_iv = nocrypt_iv ;
ops - > cit_decrypt_iv = nocrypt_iv ;
break ;
case CRYPTO_TFM_MODE_CTR :
ops - > cit_encrypt = nocrypt ;
ops - > cit_decrypt = nocrypt ;
ops - > cit_encrypt_iv = nocrypt_iv ;
ops - > cit_decrypt_iv = nocrypt_iv ;
break ;
default :
BUG ( ) ;
}
if ( ops - > cit_mode = = CRYPTO_TFM_MODE_CBC ) {
2005-07-15 18:41:31 +04:00
unsigned long align ;
2005-07-07 00:53:29 +04:00
unsigned long addr ;
2005-04-17 02:20:36 +04:00
switch ( crypto_tfm_alg_blocksize ( tfm ) ) {
case 8 :
ops - > cit_xor_block = xor_64 ;
break ;
case 16 :
ops - > cit_xor_block = xor_128 ;
break ;
default :
printk ( KERN_WARNING " %s: block size %u not supported \n " ,
crypto_tfm_alg_name ( tfm ) ,
crypto_tfm_alg_blocksize ( tfm ) ) ;
ret = - EINVAL ;
goto out ;
}
ops - > cit_ivsize = crypto_tfm_alg_blocksize ( tfm ) ;
2005-07-07 00:53:29 +04:00
align = crypto_tfm_alg_alignmask ( tfm ) + 1 ;
addr = ( unsigned long ) crypto_tfm_ctx ( tfm ) ;
addr = ALIGN ( addr , align ) ;
addr + = ALIGN ( tfm - > __crt_alg - > cra_ctxsize , align ) ;
ops - > cit_iv = ( void * ) addr ;
2005-04-17 02:20:36 +04:00
}
out :
return ret ;
}
void crypto_exit_cipher_ops ( struct crypto_tfm * tfm )
{
}