2006-01-06 11:19:18 +03:00
/*
* Cryptographic API .
*
* s390 implementation of the AES Cipher Algorithm .
*
* s390 Version :
2012-07-20 13:15:04 +04:00
* Copyright IBM Corp . 2005 , 2007
2006-01-06 11:19:18 +03:00
* Author ( s ) : Jan Glauber ( jang @ de . ibm . com )
2007-12-01 04:47:37 +03:00
* Sebastian Siewior ( sebastian @ breakpoint . cc > SW - Fallback
2006-01-06 11:19:18 +03:00
*
2007-10-05 12:52:01 +04:00
* Derived from " crypto/aes_generic.c "
2006-01-06 11:19:18 +03:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
2008-12-25 15:39:37 +03:00
# define KMSG_COMPONENT "aes_s390"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2007-10-17 19:18:57 +04:00
# include <crypto/aes.h>
2006-08-21 15:39:24 +04:00
# include <crypto/algapi.h>
2007-12-01 04:47:37 +03:00
# include <linux/err.h>
2006-01-06 11:19:18 +03:00
# include <linux/module.h>
# include <linux/init.h>
# include "crypt_s390.h"
2007-02-05 23:18:14 +03:00
# define AES_KEYLEN_128 1
# define AES_KEYLEN_192 2
# define AES_KEYLEN_256 4
2011-05-04 09:09:44 +04:00
static u8 * ctrblk ;
static char keylen_flag ;
2006-01-06 11:19:18 +03:00
struct s390_aes_ctx {
u8 key [ AES_MAX_KEY_SIZE ] ;
2006-08-21 15:39:24 +04:00
long enc ;
long dec ;
2006-01-06 11:19:18 +03:00
int key_len ;
2007-12-01 04:47:37 +03:00
union {
struct crypto_blkcipher * blk ;
struct crypto_cipher * cip ;
} fallback ;
2006-01-06 11:19:18 +03:00
} ;
2011-04-26 10:12:42 +04:00
struct pcc_param {
u8 key [ 32 ] ;
u8 tweak [ 16 ] ;
u8 block [ 16 ] ;
u8 bit [ 16 ] ;
u8 xts [ 16 ] ;
} ;
struct s390_xts_ctx {
u8 key [ 32 ] ;
u8 xts_param [ 16 ] ;
struct pcc_param pcc ;
long enc ;
long dec ;
int key_len ;
struct crypto_blkcipher * fallback ;
} ;
2007-12-01 04:47:37 +03:00
/*
* Check if the key_len is supported by the HW .
* Returns 0 if it is , a positive number if it is not and software fallback is
* required or a negative number in case the key size is not valid
*/
static int need_fallback ( unsigned int key_len )
2006-01-06 11:19:18 +03:00
{
switch ( key_len ) {
case 16 :
2007-02-05 23:18:14 +03:00
if ( ! ( keylen_flag & AES_KEYLEN_128 ) )
2007-12-01 04:47:37 +03:00
return 1 ;
2006-01-06 11:19:18 +03:00
break ;
case 24 :
2007-02-05 23:18:14 +03:00
if ( ! ( keylen_flag & AES_KEYLEN_192 ) )
2007-12-01 04:47:37 +03:00
return 1 ;
2006-01-06 11:19:18 +03:00
break ;
case 32 :
2007-02-05 23:18:14 +03:00
if ( ! ( keylen_flag & AES_KEYLEN_256 ) )
2007-12-01 04:47:37 +03:00
return 1 ;
2006-01-06 11:19:18 +03:00
break ;
default :
2007-12-01 04:47:37 +03:00
return - 1 ;
2006-01-06 11:19:18 +03:00
break ;
}
2007-12-01 04:47:37 +03:00
return 0 ;
}
static int setkey_fallback_cip ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
int ret ;
2010-01-08 06:18:34 +03:00
sctx - > fallback . cip - > base . crt_flags & = ~ CRYPTO_TFM_REQ_MASK ;
sctx - > fallback . cip - > base . crt_flags | = ( tfm - > crt_flags &
2007-12-01 04:47:37 +03:00
CRYPTO_TFM_REQ_MASK ) ;
ret = crypto_cipher_setkey ( sctx - > fallback . cip , in_key , key_len ) ;
if ( ret ) {
tfm - > crt_flags & = ~ CRYPTO_TFM_RES_MASK ;
2010-01-08 06:18:34 +03:00
tfm - > crt_flags | = ( sctx - > fallback . cip - > base . crt_flags &
2007-12-01 04:47:37 +03:00
CRYPTO_TFM_RES_MASK ) ;
}
return ret ;
}
static int aes_set_key ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
u32 * flags = & tfm - > crt_flags ;
int ret ;
ret = need_fallback ( key_len ) ;
if ( ret < 0 ) {
* flags | = CRYPTO_TFM_RES_BAD_KEY_LEN ;
return - EINVAL ;
}
2006-01-06 11:19:18 +03:00
sctx - > key_len = key_len ;
2007-12-01 04:47:37 +03:00
if ( ! ret ) {
memcpy ( sctx - > key , in_key , key_len ) ;
return 0 ;
}
return setkey_fallback_cip ( tfm , in_key , key_len ) ;
2006-01-06 11:19:18 +03:00
}
2006-05-16 16:09:29 +04:00
static void aes_encrypt ( struct crypto_tfm * tfm , u8 * out , const u8 * in )
2006-01-06 11:19:18 +03:00
{
2006-05-16 16:09:29 +04:00
const struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
2006-01-06 11:19:18 +03:00
2007-12-01 04:47:37 +03:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) ) {
crypto_cipher_encrypt_one ( sctx - > fallback . cip , out , in ) ;
return ;
}
2006-01-06 11:19:18 +03:00
switch ( sctx - > key_len ) {
case 16 :
crypt_s390_km ( KM_AES_128_ENCRYPT , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
break ;
case 24 :
crypt_s390_km ( KM_AES_192_ENCRYPT , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
break ;
case 32 :
crypt_s390_km ( KM_AES_256_ENCRYPT , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
break ;
}
}
2006-05-16 16:09:29 +04:00
static void aes_decrypt ( struct crypto_tfm * tfm , u8 * out , const u8 * in )
2006-01-06 11:19:18 +03:00
{
2006-05-16 16:09:29 +04:00
const struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
2006-01-06 11:19:18 +03:00
2007-12-01 04:47:37 +03:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) ) {
crypto_cipher_decrypt_one ( sctx - > fallback . cip , out , in ) ;
return ;
}
2006-01-06 11:19:18 +03:00
switch ( sctx - > key_len ) {
case 16 :
crypt_s390_km ( KM_AES_128_DECRYPT , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
break ;
case 24 :
crypt_s390_km ( KM_AES_192_DECRYPT , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
break ;
case 32 :
crypt_s390_km ( KM_AES_256_DECRYPT , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
break ;
}
}
2007-12-01 04:47:37 +03:00
static int fallback_init_cip ( struct crypto_tfm * tfm )
{
const char * name = tfm - > __crt_alg - > cra_name ;
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
sctx - > fallback . cip = crypto_alloc_cipher ( name , 0 ,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( sctx - > fallback . cip ) ) {
2008-12-25 15:39:37 +03:00
pr_err ( " Allocating AES fallback algorithm %s failed \n " ,
name ) ;
2009-12-18 19:43:18 +03:00
return PTR_ERR ( sctx - > fallback . cip ) ;
2007-12-01 04:47:37 +03:00
}
return 0 ;
}
static void fallback_exit_cip ( struct crypto_tfm * tfm )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_cipher ( sctx - > fallback . cip ) ;
sctx - > fallback . cip = NULL ;
}
2006-01-06 11:19:18 +03:00
static struct crypto_alg aes_alg = {
. cra_name = " aes " ,
2006-08-21 15:18:50 +04:00
. cra_driver_name = " aes-s390 " ,
. cra_priority = CRYPT_S390_PRIORITY ,
2007-05-04 20:47:47 +04:00
. cra_flags = CRYPTO_ALG_TYPE_CIPHER |
CRYPTO_ALG_NEED_FALLBACK ,
2006-01-06 11:19:18 +03:00
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct s390_aes_ctx ) ,
. cra_module = THIS_MODULE ,
2007-12-01 04:47:37 +03:00
. cra_init = fallback_init_cip ,
. cra_exit = fallback_exit_cip ,
2006-01-06 11:19:18 +03:00
. cra_u = {
. cipher = {
. cia_min_keysize = AES_MIN_KEY_SIZE ,
. cia_max_keysize = AES_MAX_KEY_SIZE ,
. cia_setkey = aes_set_key ,
. cia_encrypt = aes_encrypt ,
. cia_decrypt = aes_decrypt ,
}
}
} ;
2007-12-01 04:47:37 +03:00
static int setkey_fallback_blk ( struct crypto_tfm * tfm , const u8 * key ,
unsigned int len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
unsigned int ret ;
sctx - > fallback . blk - > base . crt_flags & = ~ CRYPTO_TFM_REQ_MASK ;
sctx - > fallback . blk - > base . crt_flags | = ( tfm - > crt_flags &
CRYPTO_TFM_REQ_MASK ) ;
ret = crypto_blkcipher_setkey ( sctx - > fallback . blk , key , len ) ;
if ( ret ) {
tfm - > crt_flags & = ~ CRYPTO_TFM_RES_MASK ;
tfm - > crt_flags | = ( sctx - > fallback . blk - > base . crt_flags &
CRYPTO_TFM_RES_MASK ) ;
}
return ret ;
}
static int fallback_blk_dec ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
unsigned int ret ;
struct crypto_blkcipher * tfm ;
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
tfm = desc - > tfm ;
desc - > tfm = sctx - > fallback . blk ;
2007-12-10 10:49:41 +03:00
ret = crypto_blkcipher_decrypt_iv ( desc , dst , src , nbytes ) ;
2007-12-01 04:47:37 +03:00
desc - > tfm = tfm ;
return ret ;
}
static int fallback_blk_enc ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
unsigned int ret ;
struct crypto_blkcipher * tfm ;
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
tfm = desc - > tfm ;
desc - > tfm = sctx - > fallback . blk ;
2007-12-10 10:49:41 +03:00
ret = crypto_blkcipher_encrypt_iv ( desc , dst , src , nbytes ) ;
2007-12-01 04:47:37 +03:00
desc - > tfm = tfm ;
return ret ;
}
2006-08-21 15:39:24 +04:00
static int ecb_aes_set_key ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
2007-12-01 04:47:37 +03:00
int ret ;
ret = need_fallback ( key_len ) ;
if ( ret > 0 ) {
sctx - > key_len = key_len ;
return setkey_fallback_blk ( tfm , in_key , key_len ) ;
}
2006-08-21 15:39:24 +04:00
switch ( key_len ) {
case 16 :
sctx - > enc = KM_AES_128_ENCRYPT ;
sctx - > dec = KM_AES_128_DECRYPT ;
break ;
case 24 :
sctx - > enc = KM_AES_192_ENCRYPT ;
sctx - > dec = KM_AES_192_DECRYPT ;
break ;
case 32 :
sctx - > enc = KM_AES_256_ENCRYPT ;
sctx - > dec = KM_AES_256_DECRYPT ;
break ;
}
return aes_set_key ( tfm , in_key , key_len ) ;
}
static int ecb_aes_crypt ( struct blkcipher_desc * desc , long func , void * param ,
struct blkcipher_walk * walk )
{
int ret = blkcipher_walk_virt ( desc , walk ) ;
unsigned int nbytes ;
while ( ( nbytes = walk - > nbytes ) ) {
/* only use complete blocks */
unsigned int n = nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
u8 * out = walk - > dst . virt . addr ;
u8 * in = walk - > src . virt . addr ;
ret = crypt_s390_km ( func , param , out , in , n ) ;
2012-10-26 17:06:12 +04:00
if ( ret < 0 | | ret ! = n )
return - EIO ;
2006-08-21 15:39:24 +04:00
nbytes & = AES_BLOCK_SIZE - 1 ;
ret = blkcipher_walk_done ( desc , walk , nbytes ) ;
}
return ret ;
}
static int ecb_aes_encrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
2007-12-01 04:47:37 +03:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) )
return fallback_blk_enc ( desc , dst , src , nbytes ) ;
2006-08-21 15:39:24 +04:00
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return ecb_aes_crypt ( desc , sctx - > enc , sctx - > key , & walk ) ;
}
static int ecb_aes_decrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
2007-12-01 04:47:37 +03:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) )
return fallback_blk_dec ( desc , dst , src , nbytes ) ;
2006-08-21 15:39:24 +04:00
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return ecb_aes_crypt ( desc , sctx - > dec , sctx - > key , & walk ) ;
}
2007-12-01 04:47:37 +03:00
static int fallback_init_blk ( struct crypto_tfm * tfm )
{
const char * name = tfm - > __crt_alg - > cra_name ;
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
sctx - > fallback . blk = crypto_alloc_blkcipher ( name , 0 ,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( sctx - > fallback . blk ) ) {
2008-12-25 15:39:37 +03:00
pr_err ( " Allocating AES fallback algorithm %s failed \n " ,
name ) ;
2007-12-01 04:47:37 +03:00
return PTR_ERR ( sctx - > fallback . blk ) ;
}
return 0 ;
}
static void fallback_exit_blk ( struct crypto_tfm * tfm )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_blkcipher ( sctx - > fallback . blk ) ;
sctx - > fallback . blk = NULL ;
}
2006-08-21 15:39:24 +04:00
static struct crypto_alg ecb_aes_alg = {
. cra_name = " ecb(aes) " ,
. cra_driver_name = " ecb-aes-s390 " ,
. cra_priority = CRYPT_S390_COMPOSITE_PRIORITY ,
2007-05-04 20:47:47 +04:00
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK ,
2006-08-21 15:39:24 +04:00
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct s390_aes_ctx ) ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
2007-12-01 04:47:37 +03:00
. cra_init = fallback_init_blk ,
. cra_exit = fallback_exit_blk ,
2006-08-21 15:39:24 +04:00
. cra_u = {
. blkcipher = {
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = ecb_aes_set_key ,
. encrypt = ecb_aes_encrypt ,
. decrypt = ecb_aes_decrypt ,
}
}
} ;
static int cbc_aes_set_key ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
2007-12-01 04:47:37 +03:00
int ret ;
ret = need_fallback ( key_len ) ;
if ( ret > 0 ) {
sctx - > key_len = key_len ;
return setkey_fallback_blk ( tfm , in_key , key_len ) ;
}
2006-08-21 15:39:24 +04:00
switch ( key_len ) {
case 16 :
sctx - > enc = KMC_AES_128_ENCRYPT ;
sctx - > dec = KMC_AES_128_DECRYPT ;
break ;
case 24 :
sctx - > enc = KMC_AES_192_ENCRYPT ;
sctx - > dec = KMC_AES_192_DECRYPT ;
break ;
case 32 :
sctx - > enc = KMC_AES_256_ENCRYPT ;
sctx - > dec = KMC_AES_256_DECRYPT ;
break ;
}
return aes_set_key ( tfm , in_key , key_len ) ;
}
2013-11-05 15:36:27 +04:00
static int cbc_aes_crypt ( struct blkcipher_desc * desc , long func ,
2006-08-21 15:39:24 +04:00
struct blkcipher_walk * walk )
{
2013-11-05 15:36:27 +04:00
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
2006-08-21 15:39:24 +04:00
int ret = blkcipher_walk_virt ( desc , walk ) ;
unsigned int nbytes = walk - > nbytes ;
2013-11-05 15:36:27 +04:00
struct {
u8 iv [ AES_BLOCK_SIZE ] ;
u8 key [ AES_MAX_KEY_SIZE ] ;
} param ;
2006-08-21 15:39:24 +04:00
if ( ! nbytes )
goto out ;
2013-11-05 15:36:27 +04:00
memcpy ( param . iv , walk - > iv , AES_BLOCK_SIZE ) ;
memcpy ( param . key , sctx - > key , sctx - > key_len ) ;
2006-08-21 15:39:24 +04:00
do {
/* only use complete blocks */
unsigned int n = nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
u8 * out = walk - > dst . virt . addr ;
u8 * in = walk - > src . virt . addr ;
2013-11-05 15:36:27 +04:00
ret = crypt_s390_kmc ( func , & param , out , in , n ) ;
2012-10-26 17:06:12 +04:00
if ( ret < 0 | | ret ! = n )
return - EIO ;
2006-08-21 15:39:24 +04:00
nbytes & = AES_BLOCK_SIZE - 1 ;
ret = blkcipher_walk_done ( desc , walk , nbytes ) ;
} while ( ( nbytes = walk - > nbytes ) ) ;
2013-11-05 15:36:27 +04:00
memcpy ( walk - > iv , param . iv , AES_BLOCK_SIZE ) ;
2006-08-21 15:39:24 +04:00
out :
return ret ;
}
static int cbc_aes_encrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
2007-12-01 04:47:37 +03:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) )
return fallback_blk_enc ( desc , dst , src , nbytes ) ;
2006-08-21 15:39:24 +04:00
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
2013-11-05 15:36:27 +04:00
return cbc_aes_crypt ( desc , sctx - > enc , & walk ) ;
2006-08-21 15:39:24 +04:00
}
static int cbc_aes_decrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
2007-12-01 04:47:37 +03:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) )
return fallback_blk_dec ( desc , dst , src , nbytes ) ;
2006-08-21 15:39:24 +04:00
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
2013-11-05 15:36:27 +04:00
return cbc_aes_crypt ( desc , sctx - > dec , & walk ) ;
2006-08-21 15:39:24 +04:00
}
static struct crypto_alg cbc_aes_alg = {
. cra_name = " cbc(aes) " ,
. cra_driver_name = " cbc-aes-s390 " ,
. cra_priority = CRYPT_S390_COMPOSITE_PRIORITY ,
2007-05-04 20:47:47 +04:00
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK ,
2006-08-21 15:39:24 +04:00
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct s390_aes_ctx ) ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
2007-12-01 04:47:37 +03:00
. cra_init = fallback_init_blk ,
. cra_exit = fallback_exit_blk ,
2006-08-21 15:39:24 +04:00
. cra_u = {
. blkcipher = {
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = cbc_aes_set_key ,
. encrypt = cbc_aes_encrypt ,
. decrypt = cbc_aes_decrypt ,
}
}
} ;
2011-04-26 10:12:42 +04:00
static int xts_fallback_setkey ( struct crypto_tfm * tfm , const u8 * key ,
unsigned int len )
{
struct s390_xts_ctx * xts_ctx = crypto_tfm_ctx ( tfm ) ;
unsigned int ret ;
xts_ctx - > fallback - > base . crt_flags & = ~ CRYPTO_TFM_REQ_MASK ;
xts_ctx - > fallback - > base . crt_flags | = ( tfm - > crt_flags &
CRYPTO_TFM_REQ_MASK ) ;
ret = crypto_blkcipher_setkey ( xts_ctx - > fallback , key , len ) ;
if ( ret ) {
tfm - > crt_flags & = ~ CRYPTO_TFM_RES_MASK ;
tfm - > crt_flags | = ( xts_ctx - > fallback - > base . crt_flags &
CRYPTO_TFM_RES_MASK ) ;
}
return ret ;
}
static int xts_fallback_decrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_xts_ctx * xts_ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct crypto_blkcipher * tfm ;
unsigned int ret ;
tfm = desc - > tfm ;
desc - > tfm = xts_ctx - > fallback ;
ret = crypto_blkcipher_decrypt_iv ( desc , dst , src , nbytes ) ;
desc - > tfm = tfm ;
return ret ;
}
static int xts_fallback_encrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_xts_ctx * xts_ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct crypto_blkcipher * tfm ;
unsigned int ret ;
tfm = desc - > tfm ;
desc - > tfm = xts_ctx - > fallback ;
ret = crypto_blkcipher_encrypt_iv ( desc , dst , src , nbytes ) ;
desc - > tfm = tfm ;
return ret ;
}
static int xts_aes_set_key ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_xts_ctx * xts_ctx = crypto_tfm_ctx ( tfm ) ;
u32 * flags = & tfm - > crt_flags ;
switch ( key_len ) {
case 32 :
xts_ctx - > enc = KM_XTS_128_ENCRYPT ;
xts_ctx - > dec = KM_XTS_128_DECRYPT ;
memcpy ( xts_ctx - > key + 16 , in_key , 16 ) ;
memcpy ( xts_ctx - > pcc . key + 16 , in_key + 16 , 16 ) ;
break ;
case 48 :
xts_ctx - > enc = 0 ;
xts_ctx - > dec = 0 ;
xts_fallback_setkey ( tfm , in_key , key_len ) ;
break ;
case 64 :
xts_ctx - > enc = KM_XTS_256_ENCRYPT ;
xts_ctx - > dec = KM_XTS_256_DECRYPT ;
memcpy ( xts_ctx - > key , in_key , 32 ) ;
memcpy ( xts_ctx - > pcc . key , in_key + 32 , 32 ) ;
break ;
default :
* flags | = CRYPTO_TFM_RES_BAD_KEY_LEN ;
return - EINVAL ;
}
xts_ctx - > key_len = key_len ;
return 0 ;
}
static int xts_aes_crypt ( struct blkcipher_desc * desc , long func ,
struct s390_xts_ctx * xts_ctx ,
struct blkcipher_walk * walk )
{
unsigned int offset = ( xts_ctx - > key_len > > 1 ) & 0x10 ;
int ret = blkcipher_walk_virt ( desc , walk ) ;
unsigned int nbytes = walk - > nbytes ;
unsigned int n ;
u8 * in , * out ;
void * param ;
if ( ! nbytes )
goto out ;
memset ( xts_ctx - > pcc . block , 0 , sizeof ( xts_ctx - > pcc . block ) ) ;
memset ( xts_ctx - > pcc . bit , 0 , sizeof ( xts_ctx - > pcc . bit ) ) ;
memset ( xts_ctx - > pcc . xts , 0 , sizeof ( xts_ctx - > pcc . xts ) ) ;
memcpy ( xts_ctx - > pcc . tweak , walk - > iv , sizeof ( xts_ctx - > pcc . tweak ) ) ;
param = xts_ctx - > pcc . key + offset ;
ret = crypt_s390_pcc ( func , param ) ;
2012-10-26 17:06:12 +04:00
if ( ret < 0 )
return - EIO ;
2011-04-26 10:12:42 +04:00
memcpy ( xts_ctx - > xts_param , xts_ctx - > pcc . xts , 16 ) ;
param = xts_ctx - > key + offset ;
do {
/* only use complete blocks */
n = nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
out = walk - > dst . virt . addr ;
in = walk - > src . virt . addr ;
ret = crypt_s390_km ( func , param , out , in , n ) ;
2012-10-26 17:06:12 +04:00
if ( ret < 0 | | ret ! = n )
return - EIO ;
2011-04-26 10:12:42 +04:00
nbytes & = AES_BLOCK_SIZE - 1 ;
ret = blkcipher_walk_done ( desc , walk , nbytes ) ;
} while ( ( nbytes = walk - > nbytes ) ) ;
out :
return ret ;
}
static int xts_aes_encrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_xts_ctx * xts_ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
if ( unlikely ( xts_ctx - > key_len = = 48 ) )
return xts_fallback_encrypt ( desc , dst , src , nbytes ) ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return xts_aes_crypt ( desc , xts_ctx - > enc , xts_ctx , & walk ) ;
}
static int xts_aes_decrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_xts_ctx * xts_ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
if ( unlikely ( xts_ctx - > key_len = = 48 ) )
return xts_fallback_decrypt ( desc , dst , src , nbytes ) ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return xts_aes_crypt ( desc , xts_ctx - > dec , xts_ctx , & walk ) ;
}
static int xts_fallback_init ( struct crypto_tfm * tfm )
{
const char * name = tfm - > __crt_alg - > cra_name ;
struct s390_xts_ctx * xts_ctx = crypto_tfm_ctx ( tfm ) ;
xts_ctx - > fallback = crypto_alloc_blkcipher ( name , 0 ,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( xts_ctx - > fallback ) ) {
pr_err ( " Allocating XTS fallback algorithm %s failed \n " ,
name ) ;
return PTR_ERR ( xts_ctx - > fallback ) ;
}
return 0 ;
}
static void xts_fallback_exit ( struct crypto_tfm * tfm )
{
struct s390_xts_ctx * xts_ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_blkcipher ( xts_ctx - > fallback ) ;
xts_ctx - > fallback = NULL ;
}
static struct crypto_alg xts_aes_alg = {
. cra_name = " xts(aes) " ,
. cra_driver_name = " xts-aes-s390 " ,
. cra_priority = CRYPT_S390_COMPOSITE_PRIORITY ,
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct s390_xts_ctx ) ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_init = xts_fallback_init ,
. cra_exit = xts_fallback_exit ,
. cra_u = {
. blkcipher = {
. min_keysize = 2 * AES_MIN_KEY_SIZE ,
. max_keysize = 2 * AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = xts_aes_set_key ,
. encrypt = xts_aes_encrypt ,
. decrypt = xts_aes_decrypt ,
}
}
} ;
2013-10-15 13:24:07 +04:00
static int xts_aes_alg_reg ;
2011-05-04 09:09:44 +04:00
static int ctr_aes_set_key ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
switch ( key_len ) {
case 16 :
sctx - > enc = KMCTR_AES_128_ENCRYPT ;
sctx - > dec = KMCTR_AES_128_DECRYPT ;
break ;
case 24 :
sctx - > enc = KMCTR_AES_192_ENCRYPT ;
sctx - > dec = KMCTR_AES_192_DECRYPT ;
break ;
case 32 :
sctx - > enc = KMCTR_AES_256_ENCRYPT ;
sctx - > dec = KMCTR_AES_256_DECRYPT ;
break ;
}
return aes_set_key ( tfm , in_key , key_len ) ;
}
static int ctr_aes_crypt ( struct blkcipher_desc * desc , long func ,
struct s390_aes_ctx * sctx , struct blkcipher_walk * walk )
{
int ret = blkcipher_walk_virt_block ( desc , walk , AES_BLOCK_SIZE ) ;
unsigned int i , n , nbytes ;
u8 buf [ AES_BLOCK_SIZE ] ;
u8 * out , * in ;
if ( ! walk - > nbytes )
return ret ;
memcpy ( ctrblk , walk - > iv , AES_BLOCK_SIZE ) ;
while ( ( nbytes = walk - > nbytes ) > = AES_BLOCK_SIZE ) {
out = walk - > dst . virt . addr ;
in = walk - > src . virt . addr ;
while ( nbytes > = AES_BLOCK_SIZE ) {
/* only use complete blocks, max. PAGE_SIZE */
n = ( nbytes > PAGE_SIZE ) ? PAGE_SIZE :
nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
for ( i = AES_BLOCK_SIZE ; i < n ; i + = AES_BLOCK_SIZE ) {
memcpy ( ctrblk + i , ctrblk + i - AES_BLOCK_SIZE ,
AES_BLOCK_SIZE ) ;
crypto_inc ( ctrblk + i , AES_BLOCK_SIZE ) ;
}
ret = crypt_s390_kmctr ( func , sctx - > key , out , in , n , ctrblk ) ;
2012-10-26 17:06:12 +04:00
if ( ret < 0 | | ret ! = n )
return - EIO ;
2011-05-04 09:09:44 +04:00
if ( n > AES_BLOCK_SIZE )
memcpy ( ctrblk , ctrblk + n - AES_BLOCK_SIZE ,
AES_BLOCK_SIZE ) ;
crypto_inc ( ctrblk , AES_BLOCK_SIZE ) ;
out + = n ;
in + = n ;
nbytes - = n ;
}
ret = blkcipher_walk_done ( desc , walk , nbytes ) ;
}
/*
* final block may be < AES_BLOCK_SIZE , copy only nbytes
*/
if ( nbytes ) {
out = walk - > dst . virt . addr ;
in = walk - > src . virt . addr ;
ret = crypt_s390_kmctr ( func , sctx - > key , buf , in ,
AES_BLOCK_SIZE , ctrblk ) ;
2012-10-26 17:06:12 +04:00
if ( ret < 0 | | ret ! = AES_BLOCK_SIZE )
return - EIO ;
2011-05-04 09:09:44 +04:00
memcpy ( out , buf , nbytes ) ;
crypto_inc ( ctrblk , AES_BLOCK_SIZE ) ;
ret = blkcipher_walk_done ( desc , walk , 0 ) ;
}
memcpy ( walk - > iv , ctrblk , AES_BLOCK_SIZE ) ;
return ret ;
}
static int ctr_aes_encrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return ctr_aes_crypt ( desc , sctx - > enc , sctx , & walk ) ;
}
static int ctr_aes_decrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return ctr_aes_crypt ( desc , sctx - > dec , sctx , & walk ) ;
}
static struct crypto_alg ctr_aes_alg = {
. cra_name = " ctr(aes) " ,
. cra_driver_name = " ctr-aes-s390 " ,
. cra_priority = CRYPT_S390_COMPOSITE_PRIORITY ,
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct s390_aes_ctx ) ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_u = {
. blkcipher = {
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = ctr_aes_set_key ,
. encrypt = ctr_aes_encrypt ,
. decrypt = ctr_aes_decrypt ,
}
}
} ;
2013-10-15 13:24:07 +04:00
static int ctr_aes_alg_reg ;
2008-04-17 09:46:17 +04:00
static int __init aes_s390_init ( void )
2006-01-06 11:19:18 +03:00
{
int ret ;
2011-04-19 23:29:14 +04:00
if ( crypt_s390_func_available ( KM_AES_128_ENCRYPT , CRYPT_S390_MSA ) )
2007-02-05 23:18:14 +03:00
keylen_flag | = AES_KEYLEN_128 ;
2011-04-19 23:29:14 +04:00
if ( crypt_s390_func_available ( KM_AES_192_ENCRYPT , CRYPT_S390_MSA ) )
2007-02-05 23:18:14 +03:00
keylen_flag | = AES_KEYLEN_192 ;
2011-04-19 23:29:14 +04:00
if ( crypt_s390_func_available ( KM_AES_256_ENCRYPT , CRYPT_S390_MSA ) )
2007-02-05 23:18:14 +03:00
keylen_flag | = AES_KEYLEN_256 ;
if ( ! keylen_flag )
return - EOPNOTSUPP ;
2006-01-06 11:19:18 +03:00
2007-02-05 23:18:14 +03:00
/* z9 109 and z9 BC/EC only support 128 bit key length */
2007-12-01 04:47:37 +03:00
if ( keylen_flag = = AES_KEYLEN_128 )
2008-12-25 15:39:37 +03:00
pr_info ( " AES hardware acceleration is only available for "
" 128-bit keys \n " ) ;
2006-01-06 11:19:18 +03:00
ret = crypto_register_alg ( & aes_alg ) ;
2007-02-05 23:18:14 +03:00
if ( ret )
2006-08-21 15:39:24 +04:00
goto aes_err ;
ret = crypto_register_alg ( & ecb_aes_alg ) ;
2007-02-05 23:18:14 +03:00
if ( ret )
2006-08-21 15:39:24 +04:00
goto ecb_aes_err ;
ret = crypto_register_alg ( & cbc_aes_alg ) ;
2007-02-05 23:18:14 +03:00
if ( ret )
2006-08-21 15:39:24 +04:00
goto cbc_aes_err ;
2011-04-26 10:12:42 +04:00
if ( crypt_s390_func_available ( KM_XTS_128_ENCRYPT ,
CRYPT_S390_MSA | CRYPT_S390_MSA4 ) & &
crypt_s390_func_available ( KM_XTS_256_ENCRYPT ,
CRYPT_S390_MSA | CRYPT_S390_MSA4 ) ) {
ret = crypto_register_alg ( & xts_aes_alg ) ;
if ( ret )
goto xts_aes_err ;
2013-10-15 13:24:07 +04:00
xts_aes_alg_reg = 1 ;
2011-04-26 10:12:42 +04:00
}
2011-05-04 09:09:44 +04:00
if ( crypt_s390_func_available ( KMCTR_AES_128_ENCRYPT ,
CRYPT_S390_MSA | CRYPT_S390_MSA4 ) & &
crypt_s390_func_available ( KMCTR_AES_192_ENCRYPT ,
CRYPT_S390_MSA | CRYPT_S390_MSA4 ) & &
crypt_s390_func_available ( KMCTR_AES_256_ENCRYPT ,
CRYPT_S390_MSA | CRYPT_S390_MSA4 ) ) {
ctrblk = ( u8 * ) __get_free_page ( GFP_KERNEL ) ;
if ( ! ctrblk ) {
ret = - ENOMEM ;
goto ctr_aes_err ;
}
ret = crypto_register_alg ( & ctr_aes_alg ) ;
if ( ret ) {
free_page ( ( unsigned long ) ctrblk ) ;
goto ctr_aes_err ;
}
2013-10-15 13:24:07 +04:00
ctr_aes_alg_reg = 1 ;
2011-05-04 09:09:44 +04:00
}
2006-08-21 15:39:24 +04:00
out :
2006-01-06 11:19:18 +03:00
return ret ;
2006-08-21 15:39:24 +04:00
2011-05-04 09:09:44 +04:00
ctr_aes_err :
crypto_unregister_alg ( & xts_aes_alg ) ;
2011-04-26 10:12:42 +04:00
xts_aes_err :
crypto_unregister_alg ( & cbc_aes_alg ) ;
2006-08-21 15:39:24 +04:00
cbc_aes_err :
crypto_unregister_alg ( & ecb_aes_alg ) ;
ecb_aes_err :
crypto_unregister_alg ( & aes_alg ) ;
aes_err :
goto out ;
2006-01-06 11:19:18 +03:00
}
2008-04-17 09:46:17 +04:00
static void __exit aes_s390_fini ( void )
2006-01-06 11:19:18 +03:00
{
2013-10-15 13:24:07 +04:00
if ( ctr_aes_alg_reg ) {
crypto_unregister_alg ( & ctr_aes_alg ) ;
free_page ( ( unsigned long ) ctrblk ) ;
}
if ( xts_aes_alg_reg )
crypto_unregister_alg ( & xts_aes_alg ) ;
2006-08-21 15:39:24 +04:00
crypto_unregister_alg ( & cbc_aes_alg ) ;
crypto_unregister_alg ( & ecb_aes_alg ) ;
2006-01-06 11:19:18 +03:00
crypto_unregister_alg ( & aes_alg ) ;
}
2008-04-17 09:46:17 +04:00
module_init ( aes_s390_init ) ;
module_exit ( aes_s390_fini ) ;
2006-01-06 11:19:18 +03:00
2009-02-26 09:06:31 +03:00
MODULE_ALIAS ( " aes-all " ) ;
2006-01-06 11:19:18 +03:00
MODULE_DESCRIPTION ( " Rijndael (AES) Cipher Algorithm " ) ;
MODULE_LICENSE ( " GPL " ) ;