2017-11-24 15:00:34 +01:00
// SPDX-License-Identifier: GPL-2.0
2016-11-04 11:57:15 +01:00
/*
* Cryptographic API .
*
* s390 implementation of the AES Cipher Algorithm with protected keys .
*
* s390 Version :
2019-07-19 15:22:26 +02:00
* Copyright IBM Corp . 2017 , 2019
2016-11-04 11:57:15 +01:00
* Author ( s ) : Martin Schwidefsky < schwidefsky @ de . ibm . com >
* Harald Freudenberger < freude @ de . ibm . com >
*/
# define KMSG_COMPONENT "paes_s390"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
# include <crypto/aes.h>
# include <crypto/algapi.h>
# include <linux/bug.h>
# include <linux/err.h>
# include <linux/module.h>
# include <linux/cpufeature.h>
# include <linux/init.h>
# include <linux/spinlock.h>
2019-10-12 13:18:08 -07:00
# include <crypto/internal/skcipher.h>
2016-11-04 11:57:15 +01:00
# include <crypto/xts.h>
# include <asm/cpacf.h>
# include <asm/pkey.h>
2019-07-19 15:22:26 +02:00
/*
* Key blobs smaller / bigger than these defines are rejected
* by the common code even before the individual setkey function
* is called . As paes can handle different kinds of key blobs
* and padding is also possible , the limits need to be generous .
*/
# define PAES_MIN_KEYSIZE 64
# define PAES_MAX_KEYSIZE 256
2016-11-04 11:57:15 +01:00
static u8 * ctrblk ;
static DEFINE_SPINLOCK ( ctrblk_lock ) ;
static cpacf_mask_t km_functions , kmc_functions , kmctr_functions ;
2018-08-27 10:40:10 +02:00
struct key_blob {
2019-07-19 15:22:26 +02:00
/*
* Small keys will be stored in the keybuf . Larger keys are
* stored in extra allocated memory . In both cases does
* key point to the memory where the key is stored .
* The code distinguishes by checking keylen against
* sizeof ( keybuf ) . See the two following helper functions .
*/
u8 * key ;
u8 keybuf [ 128 ] ;
2018-08-27 10:40:10 +02:00
unsigned int keylen ;
} ;
2019-07-19 15:22:26 +02:00
static inline int _copy_key_to_kb ( struct key_blob * kb ,
const u8 * key ,
unsigned int keylen )
{
if ( keylen < = sizeof ( kb - > keybuf ) )
kb - > key = kb - > keybuf ;
else {
kb - > key = kmalloc ( keylen , GFP_KERNEL ) ;
if ( ! kb - > key )
return - ENOMEM ;
}
memcpy ( kb - > key , key , keylen ) ;
kb - > keylen = keylen ;
return 0 ;
}
static inline void _free_kb_keybuf ( struct key_blob * kb )
{
if ( kb - > key & & kb - > key ! = kb - > keybuf
& & kb - > keylen > sizeof ( kb - > keybuf ) ) {
kfree ( kb - > key ) ;
kb - > key = NULL ;
}
}
2016-11-04 11:57:15 +01:00
struct s390_paes_ctx {
2018-08-27 10:40:10 +02:00
struct key_blob kb ;
2016-11-04 11:57:15 +01:00
struct pkey_protkey pk ;
unsigned long fc ;
} ;
struct s390_pxts_ctx {
2018-08-27 10:40:10 +02:00
struct key_blob kb [ 2 ] ;
2016-11-04 11:57:15 +01:00
struct pkey_protkey pk [ 2 ] ;
unsigned long fc ;
} ;
2018-08-27 10:40:10 +02:00
static inline int __paes_convert_key ( struct key_blob * kb ,
2016-11-04 11:57:15 +01:00
struct pkey_protkey * pk )
{
int i , ret ;
/* try three times in case of failure */
for ( i = 0 ; i < 3 ; i + + ) {
2018-08-27 10:40:10 +02:00
ret = pkey_keyblob2pkey ( kb - > key , kb - > keylen , pk ) ;
2016-11-04 11:57:15 +01:00
if ( ret = = 0 )
break ;
}
return ret ;
}
static int __paes_set_key ( struct s390_paes_ctx * ctx )
{
unsigned long fc ;
2018-08-27 10:40:10 +02:00
if ( __paes_convert_key ( & ctx - > kb , & ctx - > pk ) )
2016-11-04 11:57:15 +01:00
return - EINVAL ;
/* Pick the correct function code based on the protected key type */
fc = ( ctx - > pk . type = = PKEY_KEYTYPE_AES_128 ) ? CPACF_KM_PAES_128 :
( ctx - > pk . type = = PKEY_KEYTYPE_AES_192 ) ? CPACF_KM_PAES_192 :
( ctx - > pk . type = = PKEY_KEYTYPE_AES_256 ) ? CPACF_KM_PAES_256 : 0 ;
/* Check if the function code is available */
ctx - > fc = ( fc & & cpacf_test_func ( & km_functions , fc ) ) ? fc : 0 ;
return ctx - > fc ? 0 : - EINVAL ;
}
2019-10-12 13:18:08 -07:00
static int ecb_paes_init ( struct crypto_skcipher * tfm )
2019-07-19 15:22:26 +02:00
{
2019-10-12 13:18:08 -07:00
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2019-07-19 15:22:26 +02:00
ctx - > kb . key = NULL ;
return 0 ;
}
2019-10-12 13:18:08 -07:00
static void ecb_paes_exit ( struct crypto_skcipher * tfm )
2019-07-19 15:22:26 +02:00
{
2019-10-12 13:18:08 -07:00
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2019-07-19 15:22:26 +02:00
_free_kb_keybuf ( & ctx - > kb ) ;
}
2019-10-12 13:18:08 -07:00
static int ecb_paes_set_key ( struct crypto_skcipher * tfm , const u8 * in_key ,
2016-11-04 11:57:15 +01:00
unsigned int key_len )
{
2019-07-19 15:22:26 +02:00
int rc ;
2019-10-12 13:18:08 -07:00
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2016-11-04 11:57:15 +01:00
2019-07-19 15:22:26 +02:00
_free_kb_keybuf ( & ctx - > kb ) ;
rc = _copy_key_to_kb ( & ctx - > kb , in_key , key_len ) ;
if ( rc )
return rc ;
2016-11-04 11:57:15 +01:00
if ( __paes_set_key ( ctx ) ) {
2019-10-12 13:18:08 -07:00
crypto_skcipher_set_flags ( tfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
2016-11-04 11:57:15 +01:00
return - EINVAL ;
}
return 0 ;
}
2019-10-12 13:18:08 -07:00
static int ecb_paes_crypt ( struct skcipher_request * req , unsigned long modifier )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
struct skcipher_walk walk ;
2016-11-04 11:57:15 +01:00
unsigned int nbytes , n , k ;
int ret ;
2019-10-12 13:18:08 -07:00
ret = skcipher_walk_virt ( & walk , req , false ) ;
while ( ( nbytes = walk . nbytes ) ! = 0 ) {
2016-11-04 11:57:15 +01:00
/* only use complete blocks */
n = nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
k = cpacf_km ( ctx - > fc | modifier , ctx - > pk . protkey ,
2019-10-12 13:18:08 -07:00
walk . dst . virt . addr , walk . src . virt . addr , n ) ;
2016-11-04 11:57:15 +01:00
if ( k )
2019-10-12 13:18:08 -07:00
ret = skcipher_walk_done ( & walk , nbytes - k ) ;
2016-11-04 11:57:15 +01:00
if ( k < n ) {
if ( __paes_set_key ( ctx ) ! = 0 )
2019-10-12 13:18:08 -07:00
return skcipher_walk_done ( & walk , - EIO ) ;
2016-11-04 11:57:15 +01:00
}
}
return ret ;
}
2019-10-12 13:18:08 -07:00
static int ecb_paes_encrypt ( struct skcipher_request * req )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
return ecb_paes_crypt ( req , 0 ) ;
2016-11-04 11:57:15 +01:00
}
2019-10-12 13:18:08 -07:00
static int ecb_paes_decrypt ( struct skcipher_request * req )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
return ecb_paes_crypt ( req , CPACF_DECRYPT ) ;
2016-11-04 11:57:15 +01:00
}
2019-10-12 13:18:08 -07:00
static struct skcipher_alg ecb_paes_alg = {
. base . cra_name = " ecb(paes) " ,
. base . cra_driver_name = " ecb-paes-s390 " ,
. base . cra_priority = 401 , /* combo: aes + ecb + 1 */
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct s390_paes_ctx ) ,
. base . cra_module = THIS_MODULE ,
. base . cra_list = LIST_HEAD_INIT ( ecb_paes_alg . base . cra_list ) ,
. init = ecb_paes_init ,
. exit = ecb_paes_exit ,
. min_keysize = PAES_MIN_KEYSIZE ,
. max_keysize = PAES_MAX_KEYSIZE ,
. setkey = ecb_paes_set_key ,
. encrypt = ecb_paes_encrypt ,
. decrypt = ecb_paes_decrypt ,
2016-11-04 11:57:15 +01:00
} ;
2019-10-12 13:18:08 -07:00
static int cbc_paes_init ( struct crypto_skcipher * tfm )
2019-07-19 15:22:26 +02:00
{
2019-10-12 13:18:08 -07:00
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2019-07-19 15:22:26 +02:00
ctx - > kb . key = NULL ;
return 0 ;
}
2019-10-12 13:18:08 -07:00
static void cbc_paes_exit ( struct crypto_skcipher * tfm )
2019-07-19 15:22:26 +02:00
{
2019-10-12 13:18:08 -07:00
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2019-07-19 15:22:26 +02:00
_free_kb_keybuf ( & ctx - > kb ) ;
}
2016-11-04 11:57:15 +01:00
static int __cbc_paes_set_key ( struct s390_paes_ctx * ctx )
{
unsigned long fc ;
2018-08-27 10:40:10 +02:00
if ( __paes_convert_key ( & ctx - > kb , & ctx - > pk ) )
2016-11-04 11:57:15 +01:00
return - EINVAL ;
/* Pick the correct function code based on the protected key type */
fc = ( ctx - > pk . type = = PKEY_KEYTYPE_AES_128 ) ? CPACF_KMC_PAES_128 :
( ctx - > pk . type = = PKEY_KEYTYPE_AES_192 ) ? CPACF_KMC_PAES_192 :
( ctx - > pk . type = = PKEY_KEYTYPE_AES_256 ) ? CPACF_KMC_PAES_256 : 0 ;
/* Check if the function code is available */
ctx - > fc = ( fc & & cpacf_test_func ( & kmc_functions , fc ) ) ? fc : 0 ;
return ctx - > fc ? 0 : - EINVAL ;
}
2019-10-12 13:18:08 -07:00
static int cbc_paes_set_key ( struct crypto_skcipher * tfm , const u8 * in_key ,
2016-11-04 11:57:15 +01:00
unsigned int key_len )
{
2019-07-19 15:22:26 +02:00
int rc ;
2019-10-12 13:18:08 -07:00
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2016-11-04 11:57:15 +01:00
2019-07-19 15:22:26 +02:00
_free_kb_keybuf ( & ctx - > kb ) ;
rc = _copy_key_to_kb ( & ctx - > kb , in_key , key_len ) ;
if ( rc )
return rc ;
2016-11-04 11:57:15 +01:00
if ( __cbc_paes_set_key ( ctx ) ) {
2019-10-12 13:18:08 -07:00
crypto_skcipher_set_flags ( tfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
2016-11-04 11:57:15 +01:00
return - EINVAL ;
}
return 0 ;
}
2019-10-12 13:18:08 -07:00
static int cbc_paes_crypt ( struct skcipher_request * req , unsigned long modifier )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
struct skcipher_walk walk ;
2016-11-04 11:57:15 +01:00
unsigned int nbytes , n , k ;
int ret ;
struct {
u8 iv [ AES_BLOCK_SIZE ] ;
u8 key [ MAXPROTKEYSIZE ] ;
} param ;
2019-10-12 13:18:08 -07:00
ret = skcipher_walk_virt ( & walk , req , false ) ;
if ( ret )
return ret ;
memcpy ( param . iv , walk . iv , AES_BLOCK_SIZE ) ;
2016-11-04 11:57:15 +01:00
memcpy ( param . key , ctx - > pk . protkey , MAXPROTKEYSIZE ) ;
2019-10-12 13:18:08 -07:00
while ( ( nbytes = walk . nbytes ) ! = 0 ) {
2016-11-04 11:57:15 +01:00
/* only use complete blocks */
n = nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
k = cpacf_kmc ( ctx - > fc | modifier , & param ,
2019-10-12 13:18:08 -07:00
walk . dst . virt . addr , walk . src . virt . addr , n ) ;
if ( k ) {
memcpy ( walk . iv , param . iv , AES_BLOCK_SIZE ) ;
ret = skcipher_walk_done ( & walk , nbytes - k ) ;
}
2018-08-27 14:28:47 +02:00
if ( k < n ) {
2016-11-04 11:57:15 +01:00
if ( __cbc_paes_set_key ( ctx ) ! = 0 )
2019-10-12 13:18:08 -07:00
return skcipher_walk_done ( & walk , - EIO ) ;
2016-11-04 11:57:15 +01:00
memcpy ( param . key , ctx - > pk . protkey , MAXPROTKEYSIZE ) ;
}
}
return ret ;
}
2019-10-12 13:18:08 -07:00
static int cbc_paes_encrypt ( struct skcipher_request * req )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
return cbc_paes_crypt ( req , 0 ) ;
2016-11-04 11:57:15 +01:00
}
2019-10-12 13:18:08 -07:00
static int cbc_paes_decrypt ( struct skcipher_request * req )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
return cbc_paes_crypt ( req , CPACF_DECRYPT ) ;
2016-11-04 11:57:15 +01:00
}
2019-10-12 13:18:08 -07:00
static struct skcipher_alg cbc_paes_alg = {
. base . cra_name = " cbc(paes) " ,
. base . cra_driver_name = " cbc-paes-s390 " ,
. base . cra_priority = 402 , /* ecb-paes-s390 + 1 */
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct s390_paes_ctx ) ,
. base . cra_module = THIS_MODULE ,
. base . cra_list = LIST_HEAD_INIT ( cbc_paes_alg . base . cra_list ) ,
. init = cbc_paes_init ,
. exit = cbc_paes_exit ,
. min_keysize = PAES_MIN_KEYSIZE ,
. max_keysize = PAES_MAX_KEYSIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = cbc_paes_set_key ,
. encrypt = cbc_paes_encrypt ,
. decrypt = cbc_paes_decrypt ,
2016-11-04 11:57:15 +01:00
} ;
2019-10-12 13:18:08 -07:00
static int xts_paes_init ( struct crypto_skcipher * tfm )
2019-07-19 15:22:26 +02:00
{
2019-10-12 13:18:08 -07:00
struct s390_pxts_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2019-07-19 15:22:26 +02:00
ctx - > kb [ 0 ] . key = NULL ;
ctx - > kb [ 1 ] . key = NULL ;
return 0 ;
}
2019-10-12 13:18:08 -07:00
static void xts_paes_exit ( struct crypto_skcipher * tfm )
2019-07-19 15:22:26 +02:00
{
2019-10-12 13:18:08 -07:00
struct s390_pxts_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2019-07-19 15:22:26 +02:00
_free_kb_keybuf ( & ctx - > kb [ 0 ] ) ;
_free_kb_keybuf ( & ctx - > kb [ 1 ] ) ;
}
2016-11-04 11:57:15 +01:00
static int __xts_paes_set_key ( struct s390_pxts_ctx * ctx )
{
unsigned long fc ;
2018-08-27 10:40:10 +02:00
if ( __paes_convert_key ( & ctx - > kb [ 0 ] , & ctx - > pk [ 0 ] ) | |
__paes_convert_key ( & ctx - > kb [ 1 ] , & ctx - > pk [ 1 ] ) )
2016-11-04 11:57:15 +01:00
return - EINVAL ;
if ( ctx - > pk [ 0 ] . type ! = ctx - > pk [ 1 ] . type )
return - EINVAL ;
/* Pick the correct function code based on the protected key type */
fc = ( ctx - > pk [ 0 ] . type = = PKEY_KEYTYPE_AES_128 ) ? CPACF_KM_PXTS_128 :
( ctx - > pk [ 0 ] . type = = PKEY_KEYTYPE_AES_256 ) ?
CPACF_KM_PXTS_256 : 0 ;
/* Check if the function code is available */
ctx - > fc = ( fc & & cpacf_test_func ( & km_functions , fc ) ) ? fc : 0 ;
return ctx - > fc ? 0 : - EINVAL ;
}
2019-10-12 13:18:08 -07:00
static int xts_paes_set_key ( struct crypto_skcipher * tfm , const u8 * in_key ,
2019-07-19 15:22:26 +02:00
unsigned int xts_key_len )
2016-11-04 11:57:15 +01:00
{
2019-07-19 15:22:26 +02:00
int rc ;
2019-10-12 13:18:08 -07:00
struct s390_pxts_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2016-11-04 11:57:15 +01:00
u8 ckey [ 2 * AES_MAX_KEY_SIZE ] ;
2019-07-19 15:22:26 +02:00
unsigned int ckey_len , key_len ;
2018-08-27 10:40:10 +02:00
2019-07-19 15:22:26 +02:00
if ( xts_key_len % 2 )
2018-08-27 10:40:10 +02:00
return - EINVAL ;
2016-11-04 11:57:15 +01:00
2019-07-19 15:22:26 +02:00
key_len = xts_key_len / 2 ;
_free_kb_keybuf ( & ctx - > kb [ 0 ] ) ;
_free_kb_keybuf ( & ctx - > kb [ 1 ] ) ;
rc = _copy_key_to_kb ( & ctx - > kb [ 0 ] , in_key , key_len ) ;
if ( rc )
return rc ;
rc = _copy_key_to_kb ( & ctx - > kb [ 1 ] , in_key + key_len , key_len ) ;
if ( rc )
return rc ;
2016-11-04 11:57:15 +01:00
if ( __xts_paes_set_key ( ctx ) ) {
2019-10-12 13:18:08 -07:00
crypto_skcipher_set_flags ( tfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
2016-11-04 11:57:15 +01:00
return - EINVAL ;
}
/*
* xts_check_key verifies the key length is not odd and makes
* sure that the two keys are not the same . This can be done
* on the two protected keys as well
*/
ckey_len = ( ctx - > pk [ 0 ] . type = = PKEY_KEYTYPE_AES_128 ) ?
AES_KEYSIZE_128 : AES_KEYSIZE_256 ;
memcpy ( ckey , ctx - > pk [ 0 ] . protkey , ckey_len ) ;
memcpy ( ckey + ckey_len , ctx - > pk [ 1 ] . protkey , ckey_len ) ;
2019-10-12 13:18:08 -07:00
return xts_verify_key ( tfm , ckey , 2 * ckey_len ) ;
2016-11-04 11:57:15 +01:00
}
2019-10-12 13:18:08 -07:00
static int xts_paes_crypt ( struct skcipher_request * req , unsigned long modifier )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct s390_pxts_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
struct skcipher_walk walk ;
2016-11-04 11:57:15 +01:00
unsigned int keylen , offset , nbytes , n , k ;
int ret ;
struct {
u8 key [ MAXPROTKEYSIZE ] ; /* key + verification pattern */
u8 tweak [ 16 ] ;
u8 block [ 16 ] ;
u8 bit [ 16 ] ;
u8 xts [ 16 ] ;
} pcc_param ;
struct {
u8 key [ MAXPROTKEYSIZE ] ; /* key + verification pattern */
u8 init [ 16 ] ;
} xts_param ;
2019-10-12 13:18:08 -07:00
ret = skcipher_walk_virt ( & walk , req , false ) ;
if ( ret )
return ret ;
2016-11-04 11:57:15 +01:00
keylen = ( ctx - > pk [ 0 ] . type = = PKEY_KEYTYPE_AES_128 ) ? 48 : 64 ;
offset = ( ctx - > pk [ 0 ] . type = = PKEY_KEYTYPE_AES_128 ) ? 16 : 0 ;
retry :
memset ( & pcc_param , 0 , sizeof ( pcc_param ) ) ;
2019-10-12 13:18:08 -07:00
memcpy ( pcc_param . tweak , walk . iv , sizeof ( pcc_param . tweak ) ) ;
2016-11-04 11:57:15 +01:00
memcpy ( pcc_param . key + offset , ctx - > pk [ 1 ] . protkey , keylen ) ;
cpacf_pcc ( ctx - > fc , pcc_param . key + offset ) ;
memcpy ( xts_param . key + offset , ctx - > pk [ 0 ] . protkey , keylen ) ;
memcpy ( xts_param . init , pcc_param . xts , 16 ) ;
2019-10-12 13:18:08 -07:00
while ( ( nbytes = walk . nbytes ) ! = 0 ) {
2016-11-04 11:57:15 +01:00
/* only use complete blocks */
n = nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
k = cpacf_km ( ctx - > fc | modifier , xts_param . key + offset ,
2019-10-12 13:18:08 -07:00
walk . dst . virt . addr , walk . src . virt . addr , n ) ;
2016-11-04 11:57:15 +01:00
if ( k )
2019-10-12 13:18:08 -07:00
ret = skcipher_walk_done ( & walk , nbytes - k ) ;
2016-11-04 11:57:15 +01:00
if ( k < n ) {
if ( __xts_paes_set_key ( ctx ) ! = 0 )
2019-10-12 13:18:08 -07:00
return skcipher_walk_done ( & walk , - EIO ) ;
2016-11-04 11:57:15 +01:00
goto retry ;
}
}
return ret ;
}
2019-10-12 13:18:08 -07:00
static int xts_paes_encrypt ( struct skcipher_request * req )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
return xts_paes_crypt ( req , 0 ) ;
2016-11-04 11:57:15 +01:00
}
2019-10-12 13:18:08 -07:00
static int xts_paes_decrypt ( struct skcipher_request * req )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
return xts_paes_crypt ( req , CPACF_DECRYPT ) ;
2016-11-04 11:57:15 +01:00
}
2019-10-12 13:18:08 -07:00
static struct skcipher_alg xts_paes_alg = {
. base . cra_name = " xts(paes) " ,
. base . cra_driver_name = " xts-paes-s390 " ,
. base . cra_priority = 402 , /* ecb-paes-s390 + 1 */
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct s390_pxts_ctx ) ,
. base . cra_module = THIS_MODULE ,
. base . cra_list = LIST_HEAD_INIT ( xts_paes_alg . base . cra_list ) ,
. init = xts_paes_init ,
. exit = xts_paes_exit ,
. min_keysize = 2 * PAES_MIN_KEYSIZE ,
. max_keysize = 2 * PAES_MAX_KEYSIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = xts_paes_set_key ,
. encrypt = xts_paes_encrypt ,
. decrypt = xts_paes_decrypt ,
2016-11-04 11:57:15 +01:00
} ;
2019-10-12 13:18:08 -07:00
static int ctr_paes_init ( struct crypto_skcipher * tfm )
2019-07-19 15:22:26 +02:00
{
2019-10-12 13:18:08 -07:00
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2019-07-19 15:22:26 +02:00
ctx - > kb . key = NULL ;
return 0 ;
}
2019-10-12 13:18:08 -07:00
static void ctr_paes_exit ( struct crypto_skcipher * tfm )
2019-07-19 15:22:26 +02:00
{
2019-10-12 13:18:08 -07:00
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2019-07-19 15:22:26 +02:00
_free_kb_keybuf ( & ctx - > kb ) ;
}
2016-11-04 11:57:15 +01:00
static int __ctr_paes_set_key ( struct s390_paes_ctx * ctx )
{
unsigned long fc ;
2018-08-27 10:40:10 +02:00
if ( __paes_convert_key ( & ctx - > kb , & ctx - > pk ) )
2016-11-04 11:57:15 +01:00
return - EINVAL ;
/* Pick the correct function code based on the protected key type */
fc = ( ctx - > pk . type = = PKEY_KEYTYPE_AES_128 ) ? CPACF_KMCTR_PAES_128 :
( ctx - > pk . type = = PKEY_KEYTYPE_AES_192 ) ? CPACF_KMCTR_PAES_192 :
( ctx - > pk . type = = PKEY_KEYTYPE_AES_256 ) ?
CPACF_KMCTR_PAES_256 : 0 ;
/* Check if the function code is available */
ctx - > fc = ( fc & & cpacf_test_func ( & kmctr_functions , fc ) ) ? fc : 0 ;
return ctx - > fc ? 0 : - EINVAL ;
}
2019-10-12 13:18:08 -07:00
static int ctr_paes_set_key ( struct crypto_skcipher * tfm , const u8 * in_key ,
2016-11-04 11:57:15 +01:00
unsigned int key_len )
{
2019-07-19 15:22:26 +02:00
int rc ;
2019-10-12 13:18:08 -07:00
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2016-11-04 11:57:15 +01:00
2019-07-19 15:22:26 +02:00
_free_kb_keybuf ( & ctx - > kb ) ;
rc = _copy_key_to_kb ( & ctx - > kb , in_key , key_len ) ;
if ( rc )
return rc ;
2016-11-04 11:57:15 +01:00
if ( __ctr_paes_set_key ( ctx ) ) {
2019-10-12 13:18:08 -07:00
crypto_skcipher_set_flags ( tfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
2016-11-04 11:57:15 +01:00
return - EINVAL ;
}
return 0 ;
}
static unsigned int __ctrblk_init ( u8 * ctrptr , u8 * iv , unsigned int nbytes )
{
unsigned int i , n ;
/* only use complete blocks, max. PAGE_SIZE */
memcpy ( ctrptr , iv , AES_BLOCK_SIZE ) ;
n = ( nbytes > PAGE_SIZE ) ? PAGE_SIZE : nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ;
for ( i = ( n / AES_BLOCK_SIZE ) - 1 ; i > 0 ; i - - ) {
memcpy ( ctrptr + AES_BLOCK_SIZE , ctrptr , AES_BLOCK_SIZE ) ;
crypto_inc ( ctrptr + AES_BLOCK_SIZE , AES_BLOCK_SIZE ) ;
ctrptr + = AES_BLOCK_SIZE ;
}
return n ;
}
2019-10-12 13:18:08 -07:00
static int ctr_paes_crypt ( struct skcipher_request * req )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct s390_paes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2016-11-04 11:57:15 +01:00
u8 buf [ AES_BLOCK_SIZE ] , * ctrptr ;
2019-10-12 13:18:08 -07:00
struct skcipher_walk walk ;
2016-11-04 11:57:15 +01:00
unsigned int nbytes , n , k ;
int ret , locked ;
locked = spin_trylock ( & ctrblk_lock ) ;
2019-10-12 13:18:08 -07:00
ret = skcipher_walk_virt ( & walk , req , false ) ;
while ( ( nbytes = walk . nbytes ) > = AES_BLOCK_SIZE ) {
2016-11-04 11:57:15 +01:00
n = AES_BLOCK_SIZE ;
if ( nbytes > = 2 * AES_BLOCK_SIZE & & locked )
2019-10-12 13:18:08 -07:00
n = __ctrblk_init ( ctrblk , walk . iv , nbytes ) ;
ctrptr = ( n > AES_BLOCK_SIZE ) ? ctrblk : walk . iv ;
k = cpacf_kmctr ( ctx - > fc , ctx - > pk . protkey , walk . dst . virt . addr ,
walk . src . virt . addr , n , ctrptr ) ;
2016-11-04 11:57:15 +01:00
if ( k ) {
if ( ctrptr = = ctrblk )
2019-10-12 13:18:08 -07:00
memcpy ( walk . iv , ctrptr + k - AES_BLOCK_SIZE ,
2016-11-04 11:57:15 +01:00
AES_BLOCK_SIZE ) ;
2019-10-12 13:18:08 -07:00
crypto_inc ( walk . iv , AES_BLOCK_SIZE ) ;
ret = skcipher_walk_done ( & walk , nbytes - n ) ;
2016-11-04 11:57:15 +01:00
}
if ( k < n ) {
2017-02-28 07:05:59 +01:00
if ( __ctr_paes_set_key ( ctx ) ! = 0 ) {
if ( locked )
spin_unlock ( & ctrblk_lock ) ;
2019-10-12 13:18:08 -07:00
return skcipher_walk_done ( & walk , - EIO ) ;
2017-02-28 07:05:59 +01:00
}
2016-11-04 11:57:15 +01:00
}
}
if ( locked )
spin_unlock ( & ctrblk_lock ) ;
/*
* final block may be < AES_BLOCK_SIZE , copy only nbytes
*/
if ( nbytes ) {
while ( 1 ) {
2019-10-12 13:18:08 -07:00
if ( cpacf_kmctr ( ctx - > fc , ctx - > pk . protkey , buf ,
walk . src . virt . addr , AES_BLOCK_SIZE ,
walk . iv ) = = AES_BLOCK_SIZE )
2016-11-04 11:57:15 +01:00
break ;
if ( __ctr_paes_set_key ( ctx ) ! = 0 )
2019-10-12 13:18:08 -07:00
return skcipher_walk_done ( & walk , - EIO ) ;
2016-11-04 11:57:15 +01:00
}
2019-10-12 13:18:08 -07:00
memcpy ( walk . dst . virt . addr , buf , nbytes ) ;
crypto_inc ( walk . iv , AES_BLOCK_SIZE ) ;
ret = skcipher_walk_done ( & walk , 0 ) ;
2016-11-04 11:57:15 +01:00
}
return ret ;
}
2019-10-12 13:18:08 -07:00
static struct skcipher_alg ctr_paes_alg = {
. base . cra_name = " ctr(paes) " ,
. base . cra_driver_name = " ctr-paes-s390 " ,
. base . cra_priority = 402 , /* ecb-paes-s390 + 1 */
. base . cra_blocksize = 1 ,
. base . cra_ctxsize = sizeof ( struct s390_paes_ctx ) ,
. base . cra_module = THIS_MODULE ,
. base . cra_list = LIST_HEAD_INIT ( ctr_paes_alg . base . cra_list ) ,
. init = ctr_paes_init ,
. exit = ctr_paes_exit ,
. min_keysize = PAES_MIN_KEYSIZE ,
. max_keysize = PAES_MAX_KEYSIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = ctr_paes_set_key ,
. encrypt = ctr_paes_crypt ,
. decrypt = ctr_paes_crypt ,
. chunksize = AES_BLOCK_SIZE ,
2016-11-04 11:57:15 +01:00
} ;
2019-10-12 13:18:08 -07:00
static inline void __crypto_unregister_skcipher ( struct skcipher_alg * alg )
2016-11-04 11:57:15 +01:00
{
2019-10-12 13:18:08 -07:00
if ( ! list_empty ( & alg - > base . cra_list ) )
crypto_unregister_skcipher ( alg ) ;
2016-11-04 11:57:15 +01:00
}
static void paes_s390_fini ( void )
{
if ( ctrblk )
free_page ( ( unsigned long ) ctrblk ) ;
2019-10-12 13:18:08 -07:00
__crypto_unregister_skcipher ( & ctr_paes_alg ) ;
__crypto_unregister_skcipher ( & xts_paes_alg ) ;
__crypto_unregister_skcipher ( & cbc_paes_alg ) ;
__crypto_unregister_skcipher ( & ecb_paes_alg ) ;
2016-11-04 11:57:15 +01:00
}
static int __init paes_s390_init ( void )
{
int ret ;
/* Query available functions for KM, KMC and KMCTR */
cpacf_query ( CPACF_KM , & km_functions ) ;
cpacf_query ( CPACF_KMC , & kmc_functions ) ;
cpacf_query ( CPACF_KMCTR , & kmctr_functions ) ;
if ( cpacf_test_func ( & km_functions , CPACF_KM_PAES_128 ) | |
cpacf_test_func ( & km_functions , CPACF_KM_PAES_192 ) | |
cpacf_test_func ( & km_functions , CPACF_KM_PAES_256 ) ) {
2019-10-12 13:18:08 -07:00
ret = crypto_register_skcipher ( & ecb_paes_alg ) ;
2016-11-04 11:57:15 +01:00
if ( ret )
goto out_err ;
}
if ( cpacf_test_func ( & kmc_functions , CPACF_KMC_PAES_128 ) | |
cpacf_test_func ( & kmc_functions , CPACF_KMC_PAES_192 ) | |
cpacf_test_func ( & kmc_functions , CPACF_KMC_PAES_256 ) ) {
2019-10-12 13:18:08 -07:00
ret = crypto_register_skcipher ( & cbc_paes_alg ) ;
2016-11-04 11:57:15 +01:00
if ( ret )
goto out_err ;
}
if ( cpacf_test_func ( & km_functions , CPACF_KM_PXTS_128 ) | |
cpacf_test_func ( & km_functions , CPACF_KM_PXTS_256 ) ) {
2019-10-12 13:18:08 -07:00
ret = crypto_register_skcipher ( & xts_paes_alg ) ;
2016-11-04 11:57:15 +01:00
if ( ret )
goto out_err ;
}
if ( cpacf_test_func ( & kmctr_functions , CPACF_KMCTR_PAES_128 ) | |
cpacf_test_func ( & kmctr_functions , CPACF_KMCTR_PAES_192 ) | |
cpacf_test_func ( & kmctr_functions , CPACF_KMCTR_PAES_256 ) ) {
2019-10-12 13:18:08 -07:00
ret = crypto_register_skcipher ( & ctr_paes_alg ) ;
2016-11-04 11:57:15 +01:00
if ( ret )
goto out_err ;
ctrblk = ( u8 * ) __get_free_page ( GFP_KERNEL ) ;
if ( ! ctrblk ) {
ret = - ENOMEM ;
goto out_err ;
}
}
return 0 ;
out_err :
paes_s390_fini ( ) ;
return ret ;
}
module_init ( paes_s390_init ) ;
module_exit ( paes_s390_fini ) ;
2017-04-26 13:56:41 +02:00
MODULE_ALIAS_CRYPTO ( " paes " ) ;
2016-11-04 11:57:15 +01:00
MODULE_DESCRIPTION ( " Rijndael (AES) Cipher Algorithm with protected keys " ) ;
MODULE_LICENSE ( " GPL " ) ;