2006-01-06 00:19:18 -08:00
/*
* Cryptographic API .
*
* s390 implementation of the AES Cipher Algorithm .
*
* s390 Version :
2012-07-20 11:15:04 +02:00
* Copyright IBM Corp . 2005 , 2007
2006-01-06 00:19:18 -08:00
* Author ( s ) : Jan Glauber ( jang @ de . ibm . com )
2007-12-01 12:47:37 +11:00
* Sebastian Siewior ( sebastian @ breakpoint . cc > SW - Fallback
2006-01-06 00:19:18 -08:00
*
2007-10-05 16:52:01 +08:00
* Derived from " crypto/aes_generic.c "
2006-01-06 00:19:18 -08:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
2008-12-25 13:39:37 +01:00
# define KMSG_COMPONENT "aes_s390"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2007-10-17 23:18:57 +08:00
# include <crypto/aes.h>
2006-08-21 21:39:24 +10:00
# include <crypto/algapi.h>
2007-12-01 12:47:37 +11:00
# include <linux/err.h>
2006-01-06 00:19:18 -08:00
# include <linux/module.h>
2015-02-19 17:34:07 +01:00
# include <linux/cpufeature.h>
2006-01-06 00:19:18 -08:00
# include <linux/init.h>
2014-01-16 16:01:11 +01:00
# include <linux/spinlock.h>
2016-02-17 07:00:01 +01:00
# include <crypto/xts.h>
2016-03-17 15:22:12 +01:00
# include <asm/cpacf.h>
2006-01-06 00:19:18 -08:00
2007-02-05 21:18:14 +01:00
# define AES_KEYLEN_128 1
# define AES_KEYLEN_192 2
# define AES_KEYLEN_256 4
2011-05-04 15:09:44 +10:00
static u8 * ctrblk ;
2014-01-16 16:01:11 +01:00
static DEFINE_SPINLOCK ( ctrblk_lock ) ;
2011-05-04 15:09:44 +10:00
static char keylen_flag ;
2006-01-06 00:19:18 -08:00
struct s390_aes_ctx {
u8 key [ AES_MAX_KEY_SIZE ] ;
2006-08-21 21:39:24 +10:00
long enc ;
long dec ;
2006-01-06 00:19:18 -08:00
int key_len ;
2007-12-01 12:47:37 +11:00
union {
struct crypto_blkcipher * blk ;
struct crypto_cipher * cip ;
} fallback ;
2006-01-06 00:19:18 -08:00
} ;
2011-04-26 16:12:42 +10:00
struct pcc_param {
u8 key [ 32 ] ;
u8 tweak [ 16 ] ;
u8 block [ 16 ] ;
u8 bit [ 16 ] ;
u8 xts [ 16 ] ;
} ;
struct s390_xts_ctx {
u8 key [ 32 ] ;
2013-11-19 17:12:47 +01:00
u8 pcc_key [ 32 ] ;
2011-04-26 16:12:42 +10:00
long enc ;
long dec ;
int key_len ;
struct crypto_blkcipher * fallback ;
} ;
2007-12-01 12:47:37 +11:00
/*
* Check if the key_len is supported by the HW .
* Returns 0 if it is , a positive number if it is not and software fallback is
* required or a negative number in case the key size is not valid
*/
static int need_fallback ( unsigned int key_len )
2006-01-06 00:19:18 -08:00
{
switch ( key_len ) {
case 16 :
2007-02-05 21:18:14 +01:00
if ( ! ( keylen_flag & AES_KEYLEN_128 ) )
2007-12-01 12:47:37 +11:00
return 1 ;
2006-01-06 00:19:18 -08:00
break ;
case 24 :
2007-02-05 21:18:14 +01:00
if ( ! ( keylen_flag & AES_KEYLEN_192 ) )
2007-12-01 12:47:37 +11:00
return 1 ;
2006-01-06 00:19:18 -08:00
break ;
case 32 :
2007-02-05 21:18:14 +01:00
if ( ! ( keylen_flag & AES_KEYLEN_256 ) )
2007-12-01 12:47:37 +11:00
return 1 ;
2006-01-06 00:19:18 -08:00
break ;
default :
2007-12-01 12:47:37 +11:00
return - 1 ;
2006-01-06 00:19:18 -08:00
break ;
}
2007-12-01 12:47:37 +11:00
return 0 ;
}
static int setkey_fallback_cip ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
int ret ;
2010-01-08 14:18:34 +11:00
sctx - > fallback . cip - > base . crt_flags & = ~ CRYPTO_TFM_REQ_MASK ;
sctx - > fallback . cip - > base . crt_flags | = ( tfm - > crt_flags &
2007-12-01 12:47:37 +11:00
CRYPTO_TFM_REQ_MASK ) ;
ret = crypto_cipher_setkey ( sctx - > fallback . cip , in_key , key_len ) ;
if ( ret ) {
tfm - > crt_flags & = ~ CRYPTO_TFM_RES_MASK ;
2010-01-08 14:18:34 +11:00
tfm - > crt_flags | = ( sctx - > fallback . cip - > base . crt_flags &
2007-12-01 12:47:37 +11:00
CRYPTO_TFM_RES_MASK ) ;
}
return ret ;
}
static int aes_set_key ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
u32 * flags = & tfm - > crt_flags ;
int ret ;
ret = need_fallback ( key_len ) ;
if ( ret < 0 ) {
* flags | = CRYPTO_TFM_RES_BAD_KEY_LEN ;
return - EINVAL ;
}
2006-01-06 00:19:18 -08:00
sctx - > key_len = key_len ;
2007-12-01 12:47:37 +11:00
if ( ! ret ) {
memcpy ( sctx - > key , in_key , key_len ) ;
return 0 ;
}
return setkey_fallback_cip ( tfm , in_key , key_len ) ;
2006-01-06 00:19:18 -08:00
}
2006-05-16 22:09:29 +10:00
static void aes_encrypt ( struct crypto_tfm * tfm , u8 * out , const u8 * in )
2006-01-06 00:19:18 -08:00
{
2015-01-01 22:56:02 +08:00
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
2006-01-06 00:19:18 -08:00
2007-12-01 12:47:37 +11:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) ) {
crypto_cipher_encrypt_one ( sctx - > fallback . cip , out , in ) ;
return ;
}
2006-01-06 00:19:18 -08:00
switch ( sctx - > key_len ) {
case 16 :
2016-03-17 15:22:12 +01:00
cpacf_km ( CPACF_KM_AES_128_ENC , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
2006-01-06 00:19:18 -08:00
break ;
case 24 :
2016-03-17 15:22:12 +01:00
cpacf_km ( CPACF_KM_AES_192_ENC , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
2006-01-06 00:19:18 -08:00
break ;
case 32 :
2016-03-17 15:22:12 +01:00
cpacf_km ( CPACF_KM_AES_256_ENC , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
2006-01-06 00:19:18 -08:00
break ;
}
}
2006-05-16 22:09:29 +10:00
static void aes_decrypt ( struct crypto_tfm * tfm , u8 * out , const u8 * in )
2006-01-06 00:19:18 -08:00
{
2015-01-01 22:56:02 +08:00
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
2006-01-06 00:19:18 -08:00
2007-12-01 12:47:37 +11:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) ) {
crypto_cipher_decrypt_one ( sctx - > fallback . cip , out , in ) ;
return ;
}
2006-01-06 00:19:18 -08:00
switch ( sctx - > key_len ) {
case 16 :
2016-03-17 15:22:12 +01:00
cpacf_km ( CPACF_KM_AES_128_DEC , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
2006-01-06 00:19:18 -08:00
break ;
case 24 :
2016-03-17 15:22:12 +01:00
cpacf_km ( CPACF_KM_AES_192_DEC , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
2006-01-06 00:19:18 -08:00
break ;
case 32 :
2016-03-17 15:22:12 +01:00
cpacf_km ( CPACF_KM_AES_256_DEC , & sctx - > key , out , in ,
AES_BLOCK_SIZE ) ;
2006-01-06 00:19:18 -08:00
break ;
}
}
2007-12-01 12:47:37 +11:00
static int fallback_init_cip ( struct crypto_tfm * tfm )
{
const char * name = tfm - > __crt_alg - > cra_name ;
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
sctx - > fallback . cip = crypto_alloc_cipher ( name , 0 ,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( sctx - > fallback . cip ) ) {
2008-12-25 13:39:37 +01:00
pr_err ( " Allocating AES fallback algorithm %s failed \n " ,
name ) ;
2009-12-18 17:43:18 +01:00
return PTR_ERR ( sctx - > fallback . cip ) ;
2007-12-01 12:47:37 +11:00
}
return 0 ;
}
static void fallback_exit_cip ( struct crypto_tfm * tfm )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_cipher ( sctx - > fallback . cip ) ;
sctx - > fallback . cip = NULL ;
}
2006-01-06 00:19:18 -08:00
static struct crypto_alg aes_alg = {
. cra_name = " aes " ,
2006-08-21 21:18:50 +10:00
. cra_driver_name = " aes-s390 " ,
2016-03-17 15:22:12 +01:00
. cra_priority = 300 ,
2007-05-04 18:47:47 +02:00
. cra_flags = CRYPTO_ALG_TYPE_CIPHER |
CRYPTO_ALG_NEED_FALLBACK ,
2006-01-06 00:19:18 -08:00
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct s390_aes_ctx ) ,
. cra_module = THIS_MODULE ,
2007-12-01 12:47:37 +11:00
. cra_init = fallback_init_cip ,
. cra_exit = fallback_exit_cip ,
2006-01-06 00:19:18 -08:00
. cra_u = {
. cipher = {
. cia_min_keysize = AES_MIN_KEY_SIZE ,
. cia_max_keysize = AES_MAX_KEY_SIZE ,
. cia_setkey = aes_set_key ,
. cia_encrypt = aes_encrypt ,
. cia_decrypt = aes_decrypt ,
}
}
} ;
2007-12-01 12:47:37 +11:00
static int setkey_fallback_blk ( struct crypto_tfm * tfm , const u8 * key ,
unsigned int len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
unsigned int ret ;
sctx - > fallback . blk - > base . crt_flags & = ~ CRYPTO_TFM_REQ_MASK ;
sctx - > fallback . blk - > base . crt_flags | = ( tfm - > crt_flags &
CRYPTO_TFM_REQ_MASK ) ;
ret = crypto_blkcipher_setkey ( sctx - > fallback . blk , key , len ) ;
if ( ret ) {
tfm - > crt_flags & = ~ CRYPTO_TFM_RES_MASK ;
tfm - > crt_flags | = ( sctx - > fallback . blk - > base . crt_flags &
CRYPTO_TFM_RES_MASK ) ;
}
return ret ;
}
static int fallback_blk_dec ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
unsigned int ret ;
struct crypto_blkcipher * tfm ;
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
tfm = desc - > tfm ;
desc - > tfm = sctx - > fallback . blk ;
2007-12-10 15:49:41 +08:00
ret = crypto_blkcipher_decrypt_iv ( desc , dst , src , nbytes ) ;
2007-12-01 12:47:37 +11:00
desc - > tfm = tfm ;
return ret ;
}
static int fallback_blk_enc ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
unsigned int ret ;
struct crypto_blkcipher * tfm ;
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
tfm = desc - > tfm ;
desc - > tfm = sctx - > fallback . blk ;
2007-12-10 15:49:41 +08:00
ret = crypto_blkcipher_encrypt_iv ( desc , dst , src , nbytes ) ;
2007-12-01 12:47:37 +11:00
desc - > tfm = tfm ;
return ret ;
}
2006-08-21 21:39:24 +10:00
static int ecb_aes_set_key ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
2007-12-01 12:47:37 +11:00
int ret ;
ret = need_fallback ( key_len ) ;
if ( ret > 0 ) {
sctx - > key_len = key_len ;
return setkey_fallback_blk ( tfm , in_key , key_len ) ;
}
2006-08-21 21:39:24 +10:00
switch ( key_len ) {
case 16 :
2016-03-17 15:22:12 +01:00
sctx - > enc = CPACF_KM_AES_128_ENC ;
sctx - > dec = CPACF_KM_AES_128_DEC ;
2006-08-21 21:39:24 +10:00
break ;
case 24 :
2016-03-17 15:22:12 +01:00
sctx - > enc = CPACF_KM_AES_192_ENC ;
sctx - > dec = CPACF_KM_AES_192_DEC ;
2006-08-21 21:39:24 +10:00
break ;
case 32 :
2016-03-17 15:22:12 +01:00
sctx - > enc = CPACF_KM_AES_256_ENC ;
sctx - > dec = CPACF_KM_AES_256_DEC ;
2006-08-21 21:39:24 +10:00
break ;
}
return aes_set_key ( tfm , in_key , key_len ) ;
}
static int ecb_aes_crypt ( struct blkcipher_desc * desc , long func , void * param ,
struct blkcipher_walk * walk )
{
int ret = blkcipher_walk_virt ( desc , walk ) ;
unsigned int nbytes ;
while ( ( nbytes = walk - > nbytes ) ) {
/* only use complete blocks */
unsigned int n = nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
u8 * out = walk - > dst . virt . addr ;
u8 * in = walk - > src . virt . addr ;
2016-03-17 15:22:12 +01:00
ret = cpacf_km ( func , param , out , in , n ) ;
2012-10-26 15:06:12 +02:00
if ( ret < 0 | | ret ! = n )
return - EIO ;
2006-08-21 21:39:24 +10:00
nbytes & = AES_BLOCK_SIZE - 1 ;
ret = blkcipher_walk_done ( desc , walk , nbytes ) ;
}
return ret ;
}
static int ecb_aes_encrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
2007-12-01 12:47:37 +11:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) )
return fallback_blk_enc ( desc , dst , src , nbytes ) ;
2006-08-21 21:39:24 +10:00
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return ecb_aes_crypt ( desc , sctx - > enc , sctx - > key , & walk ) ;
}
static int ecb_aes_decrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
2007-12-01 12:47:37 +11:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) )
return fallback_blk_dec ( desc , dst , src , nbytes ) ;
2006-08-21 21:39:24 +10:00
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return ecb_aes_crypt ( desc , sctx - > dec , sctx - > key , & walk ) ;
}
2007-12-01 12:47:37 +11:00
static int fallback_init_blk ( struct crypto_tfm * tfm )
{
const char * name = tfm - > __crt_alg - > cra_name ;
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
sctx - > fallback . blk = crypto_alloc_blkcipher ( name , 0 ,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( sctx - > fallback . blk ) ) {
2008-12-25 13:39:37 +01:00
pr_err ( " Allocating AES fallback algorithm %s failed \n " ,
name ) ;
2007-12-01 12:47:37 +11:00
return PTR_ERR ( sctx - > fallback . blk ) ;
}
return 0 ;
}
static void fallback_exit_blk ( struct crypto_tfm * tfm )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_blkcipher ( sctx - > fallback . blk ) ;
sctx - > fallback . blk = NULL ;
}
2006-08-21 21:39:24 +10:00
static struct crypto_alg ecb_aes_alg = {
. cra_name = " ecb(aes) " ,
. cra_driver_name = " ecb-aes-s390 " ,
2016-03-17 15:22:12 +01:00
. cra_priority = 400 , /* combo: aes + ecb */
2007-05-04 18:47:47 +02:00
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK ,
2006-08-21 21:39:24 +10:00
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct s390_aes_ctx ) ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
2007-12-01 12:47:37 +11:00
. cra_init = fallback_init_blk ,
. cra_exit = fallback_exit_blk ,
2006-08-21 21:39:24 +10:00
. cra_u = {
. blkcipher = {
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = ecb_aes_set_key ,
. encrypt = ecb_aes_encrypt ,
. decrypt = ecb_aes_decrypt ,
}
}
} ;
static int cbc_aes_set_key ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
2007-12-01 12:47:37 +11:00
int ret ;
ret = need_fallback ( key_len ) ;
if ( ret > 0 ) {
sctx - > key_len = key_len ;
return setkey_fallback_blk ( tfm , in_key , key_len ) ;
}
2006-08-21 21:39:24 +10:00
switch ( key_len ) {
case 16 :
2016-03-17 15:22:12 +01:00
sctx - > enc = CPACF_KMC_AES_128_ENC ;
sctx - > dec = CPACF_KMC_AES_128_DEC ;
2006-08-21 21:39:24 +10:00
break ;
case 24 :
2016-03-17 15:22:12 +01:00
sctx - > enc = CPACF_KMC_AES_192_ENC ;
sctx - > dec = CPACF_KMC_AES_192_DEC ;
2006-08-21 21:39:24 +10:00
break ;
case 32 :
2016-03-17 15:22:12 +01:00
sctx - > enc = CPACF_KMC_AES_256_ENC ;
sctx - > dec = CPACF_KMC_AES_256_DEC ;
2006-08-21 21:39:24 +10:00
break ;
}
return aes_set_key ( tfm , in_key , key_len ) ;
}
2013-11-05 19:36:27 +08:00
static int cbc_aes_crypt ( struct blkcipher_desc * desc , long func ,
2006-08-21 21:39:24 +10:00
struct blkcipher_walk * walk )
{
2013-11-05 19:36:27 +08:00
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
2006-08-21 21:39:24 +10:00
int ret = blkcipher_walk_virt ( desc , walk ) ;
unsigned int nbytes = walk - > nbytes ;
2013-11-05 19:36:27 +08:00
struct {
u8 iv [ AES_BLOCK_SIZE ] ;
u8 key [ AES_MAX_KEY_SIZE ] ;
} param ;
2006-08-21 21:39:24 +10:00
if ( ! nbytes )
goto out ;
2013-11-05 19:36:27 +08:00
memcpy ( param . iv , walk - > iv , AES_BLOCK_SIZE ) ;
memcpy ( param . key , sctx - > key , sctx - > key_len ) ;
2006-08-21 21:39:24 +10:00
do {
/* only use complete blocks */
unsigned int n = nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
u8 * out = walk - > dst . virt . addr ;
u8 * in = walk - > src . virt . addr ;
2016-03-17 15:22:12 +01:00
ret = cpacf_kmc ( func , & param , out , in , n ) ;
2012-10-26 15:06:12 +02:00
if ( ret < 0 | | ret ! = n )
return - EIO ;
2006-08-21 21:39:24 +10:00
nbytes & = AES_BLOCK_SIZE - 1 ;
ret = blkcipher_walk_done ( desc , walk , nbytes ) ;
} while ( ( nbytes = walk - > nbytes ) ) ;
2013-11-05 19:36:27 +08:00
memcpy ( walk - > iv , param . iv , AES_BLOCK_SIZE ) ;
2006-08-21 21:39:24 +10:00
out :
return ret ;
}
static int cbc_aes_encrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
2007-12-01 12:47:37 +11:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) )
return fallback_blk_enc ( desc , dst , src , nbytes ) ;
2006-08-21 21:39:24 +10:00
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
2013-11-05 19:36:27 +08:00
return cbc_aes_crypt ( desc , sctx - > enc , & walk ) ;
2006-08-21 21:39:24 +10:00
}
static int cbc_aes_decrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
2007-12-01 12:47:37 +11:00
if ( unlikely ( need_fallback ( sctx - > key_len ) ) )
return fallback_blk_dec ( desc , dst , src , nbytes ) ;
2006-08-21 21:39:24 +10:00
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
2013-11-05 19:36:27 +08:00
return cbc_aes_crypt ( desc , sctx - > dec , & walk ) ;
2006-08-21 21:39:24 +10:00
}
static struct crypto_alg cbc_aes_alg = {
. cra_name = " cbc(aes) " ,
. cra_driver_name = " cbc-aes-s390 " ,
2016-03-17 15:22:12 +01:00
. cra_priority = 400 , /* combo: aes + cbc */
2007-05-04 18:47:47 +02:00
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK ,
2006-08-21 21:39:24 +10:00
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct s390_aes_ctx ) ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
2007-12-01 12:47:37 +11:00
. cra_init = fallback_init_blk ,
. cra_exit = fallback_exit_blk ,
2006-08-21 21:39:24 +10:00
. cra_u = {
. blkcipher = {
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = cbc_aes_set_key ,
. encrypt = cbc_aes_encrypt ,
. decrypt = cbc_aes_decrypt ,
}
}
} ;
2011-04-26 16:12:42 +10:00
static int xts_fallback_setkey ( struct crypto_tfm * tfm , const u8 * key ,
unsigned int len )
{
struct s390_xts_ctx * xts_ctx = crypto_tfm_ctx ( tfm ) ;
unsigned int ret ;
xts_ctx - > fallback - > base . crt_flags & = ~ CRYPTO_TFM_REQ_MASK ;
xts_ctx - > fallback - > base . crt_flags | = ( tfm - > crt_flags &
CRYPTO_TFM_REQ_MASK ) ;
ret = crypto_blkcipher_setkey ( xts_ctx - > fallback , key , len ) ;
if ( ret ) {
tfm - > crt_flags & = ~ CRYPTO_TFM_RES_MASK ;
tfm - > crt_flags | = ( xts_ctx - > fallback - > base . crt_flags &
CRYPTO_TFM_RES_MASK ) ;
}
return ret ;
}
static int xts_fallback_decrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_xts_ctx * xts_ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct crypto_blkcipher * tfm ;
unsigned int ret ;
tfm = desc - > tfm ;
desc - > tfm = xts_ctx - > fallback ;
ret = crypto_blkcipher_decrypt_iv ( desc , dst , src , nbytes ) ;
desc - > tfm = tfm ;
return ret ;
}
static int xts_fallback_encrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_xts_ctx * xts_ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct crypto_blkcipher * tfm ;
unsigned int ret ;
tfm = desc - > tfm ;
desc - > tfm = xts_ctx - > fallback ;
ret = crypto_blkcipher_encrypt_iv ( desc , dst , src , nbytes ) ;
desc - > tfm = tfm ;
return ret ;
}
static int xts_aes_set_key ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_xts_ctx * xts_ctx = crypto_tfm_ctx ( tfm ) ;
u32 * flags = & tfm - > crt_flags ;
2016-02-09 15:37:47 +01:00
int err ;
err = xts_check_key ( tfm , in_key , key_len ) ;
if ( err )
return err ;
2011-04-26 16:12:42 +10:00
switch ( key_len ) {
case 32 :
2016-03-17 15:22:12 +01:00
xts_ctx - > enc = CPACF_KM_XTS_128_ENC ;
xts_ctx - > dec = CPACF_KM_XTS_128_DEC ;
2011-04-26 16:12:42 +10:00
memcpy ( xts_ctx - > key + 16 , in_key , 16 ) ;
2013-11-19 17:12:47 +01:00
memcpy ( xts_ctx - > pcc_key + 16 , in_key + 16 , 16 ) ;
2011-04-26 16:12:42 +10:00
break ;
case 48 :
xts_ctx - > enc = 0 ;
xts_ctx - > dec = 0 ;
xts_fallback_setkey ( tfm , in_key , key_len ) ;
break ;
case 64 :
2016-03-17 15:22:12 +01:00
xts_ctx - > enc = CPACF_KM_XTS_256_ENC ;
xts_ctx - > dec = CPACF_KM_XTS_256_DEC ;
2011-04-26 16:12:42 +10:00
memcpy ( xts_ctx - > key , in_key , 32 ) ;
2013-11-19 17:12:47 +01:00
memcpy ( xts_ctx - > pcc_key , in_key + 32 , 32 ) ;
2011-04-26 16:12:42 +10:00
break ;
default :
* flags | = CRYPTO_TFM_RES_BAD_KEY_LEN ;
return - EINVAL ;
}
xts_ctx - > key_len = key_len ;
return 0 ;
}
static int xts_aes_crypt ( struct blkcipher_desc * desc , long func ,
struct s390_xts_ctx * xts_ctx ,
struct blkcipher_walk * walk )
{
unsigned int offset = ( xts_ctx - > key_len > > 1 ) & 0x10 ;
int ret = blkcipher_walk_virt ( desc , walk ) ;
unsigned int nbytes = walk - > nbytes ;
unsigned int n ;
u8 * in , * out ;
2013-11-19 17:12:47 +01:00
struct pcc_param pcc_param ;
struct {
u8 key [ 32 ] ;
u8 init [ 16 ] ;
} xts_param ;
2011-04-26 16:12:42 +10:00
if ( ! nbytes )
goto out ;
2013-11-19 17:12:47 +01:00
memset ( pcc_param . block , 0 , sizeof ( pcc_param . block ) ) ;
memset ( pcc_param . bit , 0 , sizeof ( pcc_param . bit ) ) ;
memset ( pcc_param . xts , 0 , sizeof ( pcc_param . xts ) ) ;
memcpy ( pcc_param . tweak , walk - > iv , sizeof ( pcc_param . tweak ) ) ;
memcpy ( pcc_param . key , xts_ctx - > pcc_key , 32 ) ;
2016-03-17 15:22:12 +01:00
/* remove decipher modifier bit from 'func' and call PCC */
ret = cpacf_pcc ( func & 0x7f , & pcc_param . key [ offset ] ) ;
2012-10-26 15:06:12 +02:00
if ( ret < 0 )
return - EIO ;
2011-04-26 16:12:42 +10:00
2013-11-19 17:12:47 +01:00
memcpy ( xts_param . key , xts_ctx - > key , 32 ) ;
memcpy ( xts_param . init , pcc_param . xts , 16 ) ;
2011-04-26 16:12:42 +10:00
do {
/* only use complete blocks */
n = nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
out = walk - > dst . virt . addr ;
in = walk - > src . virt . addr ;
2016-03-17 15:22:12 +01:00
ret = cpacf_km ( func , & xts_param . key [ offset ] , out , in , n ) ;
2012-10-26 15:06:12 +02:00
if ( ret < 0 | | ret ! = n )
return - EIO ;
2011-04-26 16:12:42 +10:00
nbytes & = AES_BLOCK_SIZE - 1 ;
ret = blkcipher_walk_done ( desc , walk , nbytes ) ;
} while ( ( nbytes = walk - > nbytes ) ) ;
out :
return ret ;
}
static int xts_aes_encrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_xts_ctx * xts_ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
if ( unlikely ( xts_ctx - > key_len = = 48 ) )
return xts_fallback_encrypt ( desc , dst , src , nbytes ) ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return xts_aes_crypt ( desc , xts_ctx - > enc , xts_ctx , & walk ) ;
}
static int xts_aes_decrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_xts_ctx * xts_ctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
if ( unlikely ( xts_ctx - > key_len = = 48 ) )
return xts_fallback_decrypt ( desc , dst , src , nbytes ) ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return xts_aes_crypt ( desc , xts_ctx - > dec , xts_ctx , & walk ) ;
}
static int xts_fallback_init ( struct crypto_tfm * tfm )
{
const char * name = tfm - > __crt_alg - > cra_name ;
struct s390_xts_ctx * xts_ctx = crypto_tfm_ctx ( tfm ) ;
xts_ctx - > fallback = crypto_alloc_blkcipher ( name , 0 ,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( xts_ctx - > fallback ) ) {
pr_err ( " Allocating XTS fallback algorithm %s failed \n " ,
name ) ;
return PTR_ERR ( xts_ctx - > fallback ) ;
}
return 0 ;
}
static void xts_fallback_exit ( struct crypto_tfm * tfm )
{
struct s390_xts_ctx * xts_ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_blkcipher ( xts_ctx - > fallback ) ;
xts_ctx - > fallback = NULL ;
}
static struct crypto_alg xts_aes_alg = {
. cra_name = " xts(aes) " ,
. cra_driver_name = " xts-aes-s390 " ,
2016-03-17 15:22:12 +01:00
. cra_priority = 400 , /* combo: aes + xts */
2011-04-26 16:12:42 +10:00
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct s390_xts_ctx ) ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_init = xts_fallback_init ,
. cra_exit = xts_fallback_exit ,
. cra_u = {
. blkcipher = {
. min_keysize = 2 * AES_MIN_KEY_SIZE ,
. max_keysize = 2 * AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = xts_aes_set_key ,
. encrypt = xts_aes_encrypt ,
. decrypt = xts_aes_decrypt ,
}
}
} ;
2013-10-15 11:24:07 +02:00
static int xts_aes_alg_reg ;
2011-05-04 15:09:44 +10:00
static int ctr_aes_set_key ( struct crypto_tfm * tfm , const u8 * in_key ,
unsigned int key_len )
{
struct s390_aes_ctx * sctx = crypto_tfm_ctx ( tfm ) ;
switch ( key_len ) {
case 16 :
2016-03-17 15:22:12 +01:00
sctx - > enc = CPACF_KMCTR_AES_128_ENC ;
sctx - > dec = CPACF_KMCTR_AES_128_DEC ;
2011-05-04 15:09:44 +10:00
break ;
case 24 :
2016-03-17 15:22:12 +01:00
sctx - > enc = CPACF_KMCTR_AES_192_ENC ;
sctx - > dec = CPACF_KMCTR_AES_192_DEC ;
2011-05-04 15:09:44 +10:00
break ;
case 32 :
2016-03-17 15:22:12 +01:00
sctx - > enc = CPACF_KMCTR_AES_256_ENC ;
sctx - > dec = CPACF_KMCTR_AES_256_DEC ;
2011-05-04 15:09:44 +10:00
break ;
}
return aes_set_key ( tfm , in_key , key_len ) ;
}
2014-01-16 16:01:11 +01:00
static unsigned int __ctrblk_init ( u8 * ctrptr , unsigned int nbytes )
{
unsigned int i , n ;
/* only use complete blocks, max. PAGE_SIZE */
n = ( nbytes > PAGE_SIZE ) ? PAGE_SIZE : nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
for ( i = AES_BLOCK_SIZE ; i < n ; i + = AES_BLOCK_SIZE ) {
memcpy ( ctrptr + i , ctrptr + i - AES_BLOCK_SIZE ,
AES_BLOCK_SIZE ) ;
crypto_inc ( ctrptr + i , AES_BLOCK_SIZE ) ;
}
return n ;
}
2011-05-04 15:09:44 +10:00
static int ctr_aes_crypt ( struct blkcipher_desc * desc , long func ,
struct s390_aes_ctx * sctx , struct blkcipher_walk * walk )
{
int ret = blkcipher_walk_virt_block ( desc , walk , AES_BLOCK_SIZE ) ;
2014-01-16 16:01:11 +01:00
unsigned int n , nbytes ;
u8 buf [ AES_BLOCK_SIZE ] , ctrbuf [ AES_BLOCK_SIZE ] ;
u8 * out , * in , * ctrptr = ctrbuf ;
2011-05-04 15:09:44 +10:00
if ( ! walk - > nbytes )
return ret ;
2014-01-16 16:01:11 +01:00
if ( spin_trylock ( & ctrblk_lock ) )
ctrptr = ctrblk ;
memcpy ( ctrptr , walk - > iv , AES_BLOCK_SIZE ) ;
2011-05-04 15:09:44 +10:00
while ( ( nbytes = walk - > nbytes ) > = AES_BLOCK_SIZE ) {
out = walk - > dst . virt . addr ;
in = walk - > src . virt . addr ;
while ( nbytes > = AES_BLOCK_SIZE ) {
2014-01-16 16:01:11 +01:00
if ( ctrptr = = ctrblk )
n = __ctrblk_init ( ctrptr , nbytes ) ;
else
n = AES_BLOCK_SIZE ;
2016-03-17 15:22:12 +01:00
ret = cpacf_kmctr ( func , sctx - > key , out , in , n , ctrptr ) ;
2014-01-16 16:01:11 +01:00
if ( ret < 0 | | ret ! = n ) {
if ( ctrptr = = ctrblk )
spin_unlock ( & ctrblk_lock ) ;
2012-10-26 15:06:12 +02:00
return - EIO ;
2014-01-16 16:01:11 +01:00
}
2011-05-04 15:09:44 +10:00
if ( n > AES_BLOCK_SIZE )
2014-01-16 16:01:11 +01:00
memcpy ( ctrptr , ctrptr + n - AES_BLOCK_SIZE ,
2011-05-04 15:09:44 +10:00
AES_BLOCK_SIZE ) ;
2014-01-16 16:01:11 +01:00
crypto_inc ( ctrptr , AES_BLOCK_SIZE ) ;
2011-05-04 15:09:44 +10:00
out + = n ;
in + = n ;
nbytes - = n ;
}
ret = blkcipher_walk_done ( desc , walk , nbytes ) ;
}
2014-01-16 16:01:11 +01:00
if ( ctrptr = = ctrblk ) {
if ( nbytes )
memcpy ( ctrbuf , ctrptr , AES_BLOCK_SIZE ) ;
else
memcpy ( walk - > iv , ctrptr , AES_BLOCK_SIZE ) ;
spin_unlock ( & ctrblk_lock ) ;
2014-05-07 16:51:29 +02:00
} else {
if ( ! nbytes )
memcpy ( walk - > iv , ctrptr , AES_BLOCK_SIZE ) ;
2014-01-16 16:01:11 +01:00
}
2011-05-04 15:09:44 +10:00
/*
* final block may be < AES_BLOCK_SIZE , copy only nbytes
*/
if ( nbytes ) {
out = walk - > dst . virt . addr ;
in = walk - > src . virt . addr ;
2016-03-17 15:22:12 +01:00
ret = cpacf_kmctr ( func , sctx - > key , buf , in ,
AES_BLOCK_SIZE , ctrbuf ) ;
2012-10-26 15:06:12 +02:00
if ( ret < 0 | | ret ! = AES_BLOCK_SIZE )
return - EIO ;
2011-05-04 15:09:44 +10:00
memcpy ( out , buf , nbytes ) ;
2014-01-16 16:01:11 +01:00
crypto_inc ( ctrbuf , AES_BLOCK_SIZE ) ;
2011-05-04 15:09:44 +10:00
ret = blkcipher_walk_done ( desc , walk , 0 ) ;
2014-01-16 16:01:11 +01:00
memcpy ( walk - > iv , ctrbuf , AES_BLOCK_SIZE ) ;
2011-05-04 15:09:44 +10:00
}
2014-01-16 16:01:11 +01:00
2011-05-04 15:09:44 +10:00
return ret ;
}
static int ctr_aes_encrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return ctr_aes_crypt ( desc , sctx - > enc , sctx , & walk ) ;
}
static int ctr_aes_decrypt ( struct blkcipher_desc * desc ,
struct scatterlist * dst , struct scatterlist * src ,
unsigned int nbytes )
{
struct s390_aes_ctx * sctx = crypto_blkcipher_ctx ( desc - > tfm ) ;
struct blkcipher_walk walk ;
blkcipher_walk_init ( & walk , dst , src , nbytes ) ;
return ctr_aes_crypt ( desc , sctx - > dec , sctx , & walk ) ;
}
static struct crypto_alg ctr_aes_alg = {
. cra_name = " ctr(aes) " ,
. cra_driver_name = " ctr-aes-s390 " ,
2016-03-17 15:22:12 +01:00
. cra_priority = 400 , /* combo: aes + ctr */
2011-05-04 15:09:44 +10:00
. cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct s390_aes_ctx ) ,
. cra_type = & crypto_blkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_u = {
. blkcipher = {
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = ctr_aes_set_key ,
. encrypt = ctr_aes_encrypt ,
. decrypt = ctr_aes_decrypt ,
}
}
} ;
2013-10-15 11:24:07 +02:00
static int ctr_aes_alg_reg ;
2008-04-17 07:46:17 +02:00
static int __init aes_s390_init ( void )
2006-01-06 00:19:18 -08:00
{
int ret ;
2016-03-17 15:22:12 +01:00
if ( cpacf_query ( CPACF_KM , CPACF_KM_AES_128_ENC ) )
2007-02-05 21:18:14 +01:00
keylen_flag | = AES_KEYLEN_128 ;
2016-03-17 15:22:12 +01:00
if ( cpacf_query ( CPACF_KM , CPACF_KM_AES_192_ENC ) )
2007-02-05 21:18:14 +01:00
keylen_flag | = AES_KEYLEN_192 ;
2016-03-17 15:22:12 +01:00
if ( cpacf_query ( CPACF_KM , CPACF_KM_AES_256_ENC ) )
2007-02-05 21:18:14 +01:00
keylen_flag | = AES_KEYLEN_256 ;
if ( ! keylen_flag )
return - EOPNOTSUPP ;
2006-01-06 00:19:18 -08:00
2007-02-05 21:18:14 +01:00
/* z9 109 and z9 BC/EC only support 128 bit key length */
2007-12-01 12:47:37 +11:00
if ( keylen_flag = = AES_KEYLEN_128 )
2008-12-25 13:39:37 +01:00
pr_info ( " AES hardware acceleration is only available for "
" 128-bit keys \n " ) ;
2006-01-06 00:19:18 -08:00
ret = crypto_register_alg ( & aes_alg ) ;
2007-02-05 21:18:14 +01:00
if ( ret )
2006-08-21 21:39:24 +10:00
goto aes_err ;
ret = crypto_register_alg ( & ecb_aes_alg ) ;
2007-02-05 21:18:14 +01:00
if ( ret )
2006-08-21 21:39:24 +10:00
goto ecb_aes_err ;
ret = crypto_register_alg ( & cbc_aes_alg ) ;
2007-02-05 21:18:14 +01:00
if ( ret )
2006-08-21 21:39:24 +10:00
goto cbc_aes_err ;
2016-03-17 15:22:12 +01:00
if ( cpacf_query ( CPACF_KM , CPACF_KM_XTS_128_ENC ) & &
cpacf_query ( CPACF_KM , CPACF_KM_XTS_256_ENC ) ) {
2011-04-26 16:12:42 +10:00
ret = crypto_register_alg ( & xts_aes_alg ) ;
if ( ret )
goto xts_aes_err ;
2013-10-15 11:24:07 +02:00
xts_aes_alg_reg = 1 ;
2011-04-26 16:12:42 +10:00
}
2016-03-17 15:22:12 +01:00
if ( cpacf_query ( CPACF_KMCTR , CPACF_KMCTR_AES_128_ENC ) & &
cpacf_query ( CPACF_KMCTR , CPACF_KMCTR_AES_192_ENC ) & &
cpacf_query ( CPACF_KMCTR , CPACF_KMCTR_AES_256_ENC ) ) {
2011-05-04 15:09:44 +10:00
ctrblk = ( u8 * ) __get_free_page ( GFP_KERNEL ) ;
if ( ! ctrblk ) {
ret = - ENOMEM ;
goto ctr_aes_err ;
}
ret = crypto_register_alg ( & ctr_aes_alg ) ;
if ( ret ) {
free_page ( ( unsigned long ) ctrblk ) ;
goto ctr_aes_err ;
}
2013-10-15 11:24:07 +02:00
ctr_aes_alg_reg = 1 ;
2011-05-04 15:09:44 +10:00
}
2006-08-21 21:39:24 +10:00
out :
2006-01-06 00:19:18 -08:00
return ret ;
2006-08-21 21:39:24 +10:00
2011-05-04 15:09:44 +10:00
ctr_aes_err :
crypto_unregister_alg ( & xts_aes_alg ) ;
2011-04-26 16:12:42 +10:00
xts_aes_err :
crypto_unregister_alg ( & cbc_aes_alg ) ;
2006-08-21 21:39:24 +10:00
cbc_aes_err :
crypto_unregister_alg ( & ecb_aes_alg ) ;
ecb_aes_err :
crypto_unregister_alg ( & aes_alg ) ;
aes_err :
goto out ;
2006-01-06 00:19:18 -08:00
}
2008-04-17 07:46:17 +02:00
static void __exit aes_s390_fini ( void )
2006-01-06 00:19:18 -08:00
{
2013-10-15 11:24:07 +02:00
if ( ctr_aes_alg_reg ) {
crypto_unregister_alg ( & ctr_aes_alg ) ;
free_page ( ( unsigned long ) ctrblk ) ;
}
if ( xts_aes_alg_reg )
crypto_unregister_alg ( & xts_aes_alg ) ;
2006-08-21 21:39:24 +10:00
crypto_unregister_alg ( & cbc_aes_alg ) ;
crypto_unregister_alg ( & ecb_aes_alg ) ;
2006-01-06 00:19:18 -08:00
crypto_unregister_alg ( & aes_alg ) ;
}
2015-02-19 17:34:07 +01:00
module_cpu_feature_match ( MSA , aes_s390_init ) ;
2008-04-17 07:46:17 +02:00
module_exit ( aes_s390_fini ) ;
2006-01-06 00:19:18 -08:00
2014-11-20 17:05:53 -08:00
MODULE_ALIAS_CRYPTO ( " aes-all " ) ;
2006-01-06 00:19:18 -08:00
MODULE_DESCRIPTION ( " Rijndael (AES) Cipher Algorithm " ) ;
MODULE_LICENSE ( " GPL " ) ;