2012-05-14 15:06:09 +04:00
/**
* AES XCBC routines supporting the Power 7 + Nest Accelerators driver
*
* Copyright ( C ) 2011 - 2012 International Business Machines Inc .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; version 2 only .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*
* Author : Kent Yoder < yoder1 @ us . ibm . com >
*/
# include <crypto/internal/hash.h>
# include <crypto/aes.h>
# include <crypto/algapi.h>
# include <linux/module.h>
# include <linux/types.h>
# include <linux/crypto.h>
# include <asm/vio.h>
# include "nx_csbcpb.h"
# include "nx.h"
struct xcbc_state {
u8 state [ AES_BLOCK_SIZE ] ;
unsigned int count ;
u8 buffer [ AES_BLOCK_SIZE ] ;
} ;
static int nx_xcbc_set_key ( struct crypto_shash * desc ,
const u8 * in_key ,
unsigned int key_len )
{
struct nx_crypto_ctx * nx_ctx = crypto_shash_ctx ( desc ) ;
switch ( key_len ) {
case AES_KEYSIZE_128 :
nx_ctx - > ap = & nx_ctx - > props [ NX_PROPS_AES_128 ] ;
break ;
default :
return - EINVAL ;
}
memcpy ( nx_ctx - > priv . xcbc . key , in_key , key_len ) ;
return 0 ;
}
2013-08-29 18:36:38 +04:00
/*
* Based on RFC 3566 , for a zero - length message :
*
* n = 1
* K1 = E ( K , 0x01010101010101010101010101010101 )
* K3 = E ( K , 0x03030303030303030303030303030303 )
* E [ 0 ] = 0x00000000000000000000000000000000
* M [ 1 ] = 0x80000000000000000000000000000000 ( 0 length message with padding )
* E [ 1 ] = ( K1 , M [ 1 ] ^ E [ 0 ] ^ K3 )
* Tag = M [ 1 ]
*/
static int nx_xcbc_empty ( struct shash_desc * desc , u8 * out )
{
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( & desc - > tfm - > base ) ;
struct nx_csbcpb * csbcpb = nx_ctx - > csbcpb ;
struct nx_sg * in_sg , * out_sg ;
u8 keys [ 2 ] [ AES_BLOCK_SIZE ] ;
u8 key [ 32 ] ;
int rc = 0 ;
2014-10-28 20:48:47 +03:00
int len ;
2013-08-29 18:36:38 +04:00
/* Change to ECB mode */
csbcpb - > cpb . hdr . mode = NX_MODE_AES_ECB ;
memcpy ( key , csbcpb - > cpb . aes_xcbc . key , AES_BLOCK_SIZE ) ;
memcpy ( csbcpb - > cpb . aes_ecb . key , key , AES_BLOCK_SIZE ) ;
NX_CPB_FDM ( csbcpb ) | = NX_FDM_ENDE_ENCRYPT ;
/* K1 and K3 base patterns */
memset ( keys [ 0 ] , 0x01 , sizeof ( keys [ 0 ] ) ) ;
memset ( keys [ 1 ] , 0x03 , sizeof ( keys [ 1 ] ) ) ;
2014-10-28 20:48:47 +03:00
len = sizeof ( keys ) ;
2013-08-29 18:36:38 +04:00
/* Generate K1 and K3 encrypting the patterns */
2014-10-28 20:48:47 +03:00
in_sg = nx_build_sg_list ( nx_ctx - > in_sg , ( u8 * ) keys , & len ,
2013-08-29 18:36:38 +04:00
nx_ctx - > ap - > sglen ) ;
2014-10-28 20:48:47 +03:00
if ( len ! = sizeof ( keys ) )
return - EINVAL ;
out_sg = nx_build_sg_list ( nx_ctx - > out_sg , ( u8 * ) keys , & len ,
2013-08-29 18:36:38 +04:00
nx_ctx - > ap - > sglen ) ;
2014-10-28 20:48:47 +03:00
if ( len ! = sizeof ( keys ) )
return - EINVAL ;
2013-08-29 18:36:38 +04:00
nx_ctx - > op . inlen = ( nx_ctx - > in_sg - in_sg ) * sizeof ( struct nx_sg ) ;
nx_ctx - > op . outlen = ( nx_ctx - > out_sg - out_sg ) * sizeof ( struct nx_sg ) ;
rc = nx_hcall_sync ( nx_ctx , & nx_ctx - > op ,
desc - > flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ;
if ( rc )
goto out ;
atomic_inc ( & ( nx_ctx - > stats - > aes_ops ) ) ;
/* XOr K3 with the padding for a 0 length message */
keys [ 1 ] [ 0 ] ^ = 0x80 ;
2014-10-28 20:48:47 +03:00
len = sizeof ( keys [ 1 ] ) ;
2013-08-29 18:36:38 +04:00
/* Encrypt the final result */
memcpy ( csbcpb - > cpb . aes_ecb . key , keys [ 0 ] , AES_BLOCK_SIZE ) ;
2014-10-28 20:48:47 +03:00
in_sg = nx_build_sg_list ( nx_ctx - > in_sg , ( u8 * ) keys [ 1 ] , & len ,
2013-08-29 18:36:38 +04:00
nx_ctx - > ap - > sglen ) ;
2014-10-28 20:48:47 +03:00
if ( len ! = sizeof ( keys [ 1 ] ) )
return - EINVAL ;
len = AES_BLOCK_SIZE ;
out_sg = nx_build_sg_list ( nx_ctx - > out_sg , out , & len ,
2013-08-29 18:36:38 +04:00
nx_ctx - > ap - > sglen ) ;
2014-10-28 20:48:47 +03:00
if ( len ! = AES_BLOCK_SIZE )
return - EINVAL ;
2013-08-29 18:36:38 +04:00
nx_ctx - > op . inlen = ( nx_ctx - > in_sg - in_sg ) * sizeof ( struct nx_sg ) ;
nx_ctx - > op . outlen = ( nx_ctx - > out_sg - out_sg ) * sizeof ( struct nx_sg ) ;
rc = nx_hcall_sync ( nx_ctx , & nx_ctx - > op ,
desc - > flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ;
if ( rc )
goto out ;
atomic_inc ( & ( nx_ctx - > stats - > aes_ops ) ) ;
out :
/* Restore XCBC mode */
csbcpb - > cpb . hdr . mode = NX_MODE_AES_XCBC_MAC ;
memcpy ( csbcpb - > cpb . aes_xcbc . key , key , AES_BLOCK_SIZE ) ;
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_ENDE_ENCRYPT ;
return rc ;
}
2012-05-14 15:06:09 +04:00
static int nx_xcbc_init ( struct shash_desc * desc )
{
struct xcbc_state * sctx = shash_desc_ctx ( desc ) ;
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( & desc - > tfm - > base ) ;
struct nx_csbcpb * csbcpb = nx_ctx - > csbcpb ;
struct nx_sg * out_sg ;
2014-10-28 20:48:47 +03:00
int len ;
2012-05-14 15:06:09 +04:00
nx_ctx_init ( nx_ctx , HCOP_FC_AES ) ;
memset ( sctx , 0 , sizeof * sctx ) ;
NX_CPB_SET_KEY_SIZE ( csbcpb , NX_KS_AES_128 ) ;
csbcpb - > cpb . hdr . mode = NX_MODE_AES_XCBC_MAC ;
memcpy ( csbcpb - > cpb . aes_xcbc . key , nx_ctx - > priv . xcbc . key , AES_BLOCK_SIZE ) ;
memset ( nx_ctx - > priv . xcbc . key , 0 , sizeof * nx_ctx - > priv . xcbc . key ) ;
2014-10-28 20:48:47 +03:00
len = AES_BLOCK_SIZE ;
2012-05-14 15:06:09 +04:00
out_sg = nx_build_sg_list ( nx_ctx - > out_sg , ( u8 * ) sctx - > state ,
2014-10-28 20:48:47 +03:00
& len , nx_ctx - > ap - > sglen ) ;
if ( len ! = AES_BLOCK_SIZE )
return - EINVAL ;
2012-05-14 15:06:09 +04:00
nx_ctx - > op . outlen = ( nx_ctx - > out_sg - out_sg ) * sizeof ( struct nx_sg ) ;
return 0 ;
}
static int nx_xcbc_update ( struct shash_desc * desc ,
const u8 * data ,
unsigned int len )
{
struct xcbc_state * sctx = shash_desc_ctx ( desc ) ;
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( & desc - > tfm - > base ) ;
struct nx_csbcpb * csbcpb = nx_ctx - > csbcpb ;
struct nx_sg * in_sg ;
2014-10-28 20:48:47 +03:00
u32 to_process = 0 , leftover , total ;
unsigned int max_sg_len ;
2013-08-13 01:49:37 +04:00
unsigned long irq_flags ;
2012-05-14 15:06:09 +04:00
int rc = 0 ;
2014-10-28 20:48:47 +03:00
int data_len ;
2012-05-14 15:06:09 +04:00
2013-08-13 01:49:37 +04:00
spin_lock_irqsave ( & nx_ctx - > lock , irq_flags ) ;
2013-08-29 18:36:36 +04:00
total = sctx - > count + len ;
2012-05-14 15:06:09 +04:00
/* 2 cases for total data len:
* 1 : < = AES_BLOCK_SIZE : copy into state , return 0
* 2 : > AES_BLOCK_SIZE : process X blocks , copy in leftover
*/
2013-08-29 18:36:36 +04:00
if ( total < = AES_BLOCK_SIZE ) {
2012-05-14 15:06:09 +04:00
memcpy ( sctx - > buffer + sctx - > count , data , len ) ;
sctx - > count + = len ;
goto out ;
}
2013-08-29 18:36:36 +04:00
in_sg = nx_ctx - > in_sg ;
2014-10-28 20:48:47 +03:00
max_sg_len = min_t ( u64 , nx_driver . of . max_sg_len / sizeof ( struct nx_sg ) ,
2013-08-29 18:36:36 +04:00
nx_ctx - > ap - > sglen ) ;
2014-10-28 20:48:47 +03:00
max_sg_len = min_t ( u64 , max_sg_len ,
nx_ctx - > ap - > databytelen / NX_PAGE_SIZE ) ;
2013-08-29 18:36:36 +04:00
do {
2014-10-28 20:48:47 +03:00
to_process = total - to_process ;
2013-08-29 18:36:36 +04:00
to_process = to_process & ~ ( AES_BLOCK_SIZE - 1 ) ;
2014-10-28 20:48:47 +03:00
2013-08-29 18:36:36 +04:00
leftover = total - to_process ;
/* the hardware will not accept a 0 byte operation for this
* algorithm and the operation MUST be finalized to be correct .
* So if we happen to get an update that falls on a block sized
* boundary , we must save off the last block to finalize with
* later . */
if ( ! leftover ) {
to_process - = AES_BLOCK_SIZE ;
leftover = AES_BLOCK_SIZE ;
}
if ( sctx - > count ) {
2014-10-28 20:48:47 +03:00
data_len = sctx - > count ;
2013-08-29 18:36:36 +04:00
in_sg = nx_build_sg_list ( nx_ctx - > in_sg ,
( u8 * ) sctx - > buffer ,
2014-10-28 20:48:47 +03:00
& data_len ,
2013-08-29 18:36:36 +04:00
max_sg_len ) ;
2014-10-28 20:48:47 +03:00
if ( data_len ! = sctx - > count )
return - EINVAL ;
2013-08-29 18:36:36 +04:00
}
2014-10-28 20:48:47 +03:00
data_len = to_process - sctx - > count ;
2013-08-29 18:36:36 +04:00
in_sg = nx_build_sg_list ( in_sg ,
( u8 * ) data ,
2014-10-28 20:48:47 +03:00
& data_len ,
2013-08-29 18:36:36 +04:00
max_sg_len ) ;
2014-10-28 20:48:47 +03:00
if ( data_len ! = to_process - sctx - > count )
return - EINVAL ;
2012-05-14 15:06:09 +04:00
nx_ctx - > op . inlen = ( nx_ctx - > in_sg - in_sg ) *
sizeof ( struct nx_sg ) ;
2013-08-29 18:36:36 +04:00
/* we've hit the nx chip previously and we're updating again,
* so copy over the partial digest */
if ( NX_CPB_FDM ( csbcpb ) & NX_FDM_CONTINUATION ) {
memcpy ( csbcpb - > cpb . aes_xcbc . cv ,
csbcpb - > cpb . aes_xcbc . out_cv_mac ,
AES_BLOCK_SIZE ) ;
}
NX_CPB_FDM ( csbcpb ) | = NX_FDM_INTERMEDIATE ;
if ( ! nx_ctx - > op . inlen | | ! nx_ctx - > op . outlen ) {
rc = - EINVAL ;
goto out ;
}
rc = nx_hcall_sync ( nx_ctx , & nx_ctx - > op ,
desc - > flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ;
if ( rc )
goto out ;
2012-05-14 15:06:09 +04:00
2013-08-29 18:36:36 +04:00
atomic_inc ( & ( nx_ctx - > stats - > aes_ops ) ) ;
2012-05-14 15:06:09 +04:00
2013-08-29 18:36:36 +04:00
/* everything after the first update is continuation */
NX_CPB_FDM ( csbcpb ) | = NX_FDM_CONTINUATION ;
2012-05-14 15:06:09 +04:00
2013-08-29 18:36:36 +04:00
total - = to_process ;
data + = to_process - sctx - > count ;
sctx - > count = 0 ;
in_sg = nx_ctx - > in_sg ;
} while ( leftover > AES_BLOCK_SIZE ) ;
2012-05-14 15:06:09 +04:00
/* copy the leftover back into the state struct */
2013-08-29 18:36:36 +04:00
memcpy ( sctx - > buffer , data , leftover ) ;
2012-05-14 15:06:09 +04:00
sctx - > count = leftover ;
out :
2013-08-13 01:49:37 +04:00
spin_unlock_irqrestore ( & nx_ctx - > lock , irq_flags ) ;
2012-05-14 15:06:09 +04:00
return rc ;
}
static int nx_xcbc_final ( struct shash_desc * desc , u8 * out )
{
struct xcbc_state * sctx = shash_desc_ctx ( desc ) ;
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( & desc - > tfm - > base ) ;
struct nx_csbcpb * csbcpb = nx_ctx - > csbcpb ;
struct nx_sg * in_sg , * out_sg ;
2013-08-13 01:49:37 +04:00
unsigned long irq_flags ;
2012-05-14 15:06:09 +04:00
int rc = 0 ;
2014-10-28 20:48:47 +03:00
int len ;
2012-05-14 15:06:09 +04:00
2013-08-13 01:49:37 +04:00
spin_lock_irqsave ( & nx_ctx - > lock , irq_flags ) ;
2012-05-14 15:06:09 +04:00
if ( NX_CPB_FDM ( csbcpb ) & NX_FDM_CONTINUATION ) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
memcpy ( csbcpb - > cpb . aes_xcbc . cv ,
csbcpb - > cpb . aes_xcbc . out_cv_mac , AES_BLOCK_SIZE ) ;
} else if ( sctx - > count = = 0 ) {
2013-08-29 18:36:38 +04:00
/*
* we ' ve never seen an update , so this is a 0 byte op . The
* hardware cannot handle a 0 byte op , so just ECB to
* generate the hash .
*/
rc = nx_xcbc_empty ( desc , out ) ;
2012-05-14 15:06:09 +04:00
goto out ;
}
/* final is represented by continuing the operation and indicating that
* this is not an intermediate operation */
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_INTERMEDIATE ;
2014-10-28 20:48:47 +03:00
len = sctx - > count ;
2012-05-14 15:06:09 +04:00
in_sg = nx_build_sg_list ( nx_ctx - > in_sg , ( u8 * ) sctx - > buffer ,
2014-10-28 20:48:47 +03:00
& len , nx_ctx - > ap - > sglen ) ;
if ( len ! = sctx - > count )
return - EINVAL ;
len = AES_BLOCK_SIZE ;
out_sg = nx_build_sg_list ( nx_ctx - > out_sg , out , & len ,
2012-05-14 15:06:09 +04:00
nx_ctx - > ap - > sglen ) ;
2014-10-28 20:48:47 +03:00
if ( len ! = AES_BLOCK_SIZE )
return - EINVAL ;
2012-05-14 15:06:09 +04:00
nx_ctx - > op . inlen = ( nx_ctx - > in_sg - in_sg ) * sizeof ( struct nx_sg ) ;
nx_ctx - > op . outlen = ( nx_ctx - > out_sg - out_sg ) * sizeof ( struct nx_sg ) ;
if ( ! nx_ctx - > op . outlen ) {
rc = - EINVAL ;
goto out ;
}
rc = nx_hcall_sync ( nx_ctx , & nx_ctx - > op ,
desc - > flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ;
if ( rc )
goto out ;
atomic_inc ( & ( nx_ctx - > stats - > aes_ops ) ) ;
memcpy ( out , csbcpb - > cpb . aes_xcbc . out_cv_mac , AES_BLOCK_SIZE ) ;
out :
2013-08-13 01:49:37 +04:00
spin_unlock_irqrestore ( & nx_ctx - > lock , irq_flags ) ;
2012-05-14 15:06:09 +04:00
return rc ;
}
struct shash_alg nx_shash_aes_xcbc_alg = {
. digestsize = AES_BLOCK_SIZE ,
. init = nx_xcbc_init ,
. update = nx_xcbc_update ,
. final = nx_xcbc_final ,
. setkey = nx_xcbc_set_key ,
. descsize = sizeof ( struct xcbc_state ) ,
. statesize = sizeof ( struct xcbc_state ) ,
. base = {
. cra_name = " xcbc(aes) " ,
. cra_driver_name = " xcbc-aes-nx " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_TYPE_SHASH ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_module = THIS_MODULE ,
. cra_ctxsize = sizeof ( struct nx_crypto_ctx ) ,
. cra_init = nx_crypto_ctx_aes_xcbc_init ,
. cra_exit = nx_crypto_ctx_exit ,
}
} ;