2020-07-13 11:34:22 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* K3 SA2UL crypto accelerator driver
*
* Copyright ( C ) 2018 - 2020 Texas Instruments Incorporated - http : //www.ti.com
*
* Authors : Keerthy
* Vitaly Andrianov
* Tero Kristo
*/
# include <linux/clk.h>
# include <linux/dmaengine.h>
# include <linux/dmapool.h>
# include <linux/module.h>
# include <linux/of_device.h>
# include <linux/platform_device.h>
# include <linux/pm_runtime.h>
# include <crypto/aes.h>
2020-07-13 11:34:24 +03:00
# include <crypto/authenc.h>
2020-07-13 11:34:22 +03:00
# include <crypto/des.h>
2020-07-13 11:34:24 +03:00
# include <crypto/internal/aead.h>
2020-07-13 11:34:23 +03:00
# include <crypto/internal/hash.h>
2020-07-13 11:34:22 +03:00
# include <crypto/internal/skcipher.h>
# include <crypto/scatterwalk.h>
2020-07-13 11:34:23 +03:00
# include <crypto/sha.h>
2020-07-13 11:34:22 +03:00
# include "sa2ul.h"
/* Byte offset for key in encryption security context */
# define SC_ENC_KEY_OFFSET (1 + 27 + 4)
/* Byte offset for Aux-1 in encryption security context */
# define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
# define SA_CMDL_UPD_ENC 0x0001
# define SA_CMDL_UPD_AUTH 0x0002
# define SA_CMDL_UPD_ENC_IV 0x0004
# define SA_CMDL_UPD_AUTH_IV 0x0008
# define SA_CMDL_UPD_AUX_KEY 0x0010
# define SA_AUTH_SUBKEY_LEN 16
# define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
# define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
# define MODE_CONTROL_BYTES 27
# define SA_HASH_PROCESSING 0
# define SA_CRYPTO_PROCESSING 0
# define SA_UPLOAD_HASH_TO_TLR BIT(6)
# define SA_SW0_FLAGS_MASK 0xF0000
# define SA_SW0_CMDL_INFO_MASK 0x1F00000
# define SA_SW0_CMDL_PRESENT BIT(4)
# define SA_SW0_ENG_ID_MASK 0x3E000000
# define SA_SW0_DEST_INFO_PRESENT BIT(30)
# define SA_SW2_EGRESS_LENGTH 0xFF000000
# define SA_BASIC_HASH 0x10
# define SHA256_DIGEST_WORDS 8
/* Make 32-bit word from 4 bytes */
# define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
( ( b2 ) < < 8 ) | ( b3 ) )
/* size of SCCTL structure in bytes */
# define SA_SCCTL_SZ 16
/* Max Authentication tag size */
# define SA_MAX_AUTH_TAG_SZ 64
# define PRIV_ID 0x1
# define PRIV 0x1
static struct device * sa_k3_dev ;
/**
* struct sa_cmdl_cfg - Command label configuration descriptor
2020-07-13 11:34:23 +03:00
* @ aalg : authentication algorithm ID
2020-07-13 11:34:22 +03:00
* @ enc_eng_id : Encryption Engine ID supported by the SA hardware
2020-07-13 11:34:23 +03:00
* @ auth_eng_id : Authentication Engine ID
2020-07-13 11:34:22 +03:00
* @ iv_size : Initialization Vector size
2020-07-13 11:34:23 +03:00
* @ akey : Authentication key
* @ akey_len : Authentication key length
2020-07-13 11:34:24 +03:00
* @ enc : True , if this is an encode request
2020-07-13 11:34:22 +03:00
*/
struct sa_cmdl_cfg {
2020-07-13 11:34:23 +03:00
int aalg ;
2020-07-13 11:34:22 +03:00
u8 enc_eng_id ;
2020-07-13 11:34:23 +03:00
u8 auth_eng_id ;
2020-07-13 11:34:22 +03:00
u8 iv_size ;
2020-07-13 11:34:23 +03:00
const u8 * akey ;
u16 akey_len ;
2020-07-13 11:34:24 +03:00
bool enc ;
2020-07-13 11:34:22 +03:00
} ;
/**
* struct algo_data - Crypto algorithm specific data
* @ enc_eng : Encryption engine info structure
2020-07-13 11:34:23 +03:00
* @ auth_eng : Authentication engine info structure
* @ auth_ctrl : Authentication control word
* @ hash_size : Size of digest
2020-07-13 11:34:22 +03:00
* @ iv_idx : iv index in psdata
* @ iv_out_size : iv out size
* @ ealg_id : Encryption Algorithm ID
2020-07-13 11:34:23 +03:00
* @ aalg_id : Authentication algorithm ID
2020-07-13 11:34:22 +03:00
* @ mci_enc : Mode Control Instruction for Encryption algorithm
* @ mci_dec : Mode Control Instruction for Decryption
* @ inv_key : Whether the encryption algorithm demands key inversion
* @ ctx : Pointer to the algorithm context
2020-07-13 11:34:24 +03:00
* @ keyed_mac : Whether the authentication algorithm has key
* @ prep_iopad : Function pointer to generate intermediate ipad / opad
2020-07-13 11:34:22 +03:00
*/
struct algo_data {
struct sa_eng_info enc_eng ;
2020-07-13 11:34:23 +03:00
struct sa_eng_info auth_eng ;
u8 auth_ctrl ;
u8 hash_size ;
2020-07-13 11:34:22 +03:00
u8 iv_idx ;
u8 iv_out_size ;
u8 ealg_id ;
2020-07-13 11:34:23 +03:00
u8 aalg_id ;
2020-07-13 11:34:22 +03:00
u8 * mci_enc ;
u8 * mci_dec ;
bool inv_key ;
struct sa_tfm_ctx * ctx ;
2020-07-13 11:34:24 +03:00
bool keyed_mac ;
void ( * prep_iopad ) ( struct algo_data * algo , const u8 * key ,
u16 key_sz , __be32 * ipad , __be32 * opad ) ;
2020-07-13 11:34:22 +03:00
} ;
/**
* struct sa_alg_tmpl : A generic template encompassing crypto / aead algorithms
* @ type : Type of the crypto algorithm .
* @ alg : Union of crypto algorithm definitions .
* @ registered : Flag indicating if the crypto algorithm is already registered
*/
struct sa_alg_tmpl {
u32 type ; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
union {
struct skcipher_alg skcipher ;
2020-07-13 11:34:23 +03:00
struct ahash_alg ahash ;
2020-07-13 11:34:24 +03:00
struct aead_alg aead ;
2020-07-13 11:34:22 +03:00
} alg ;
bool registered ;
} ;
/**
* struct sa_rx_data : RX Packet miscellaneous data place holder
* @ req : crypto request data pointer
* @ ddev : pointer to the DMA device
* @ tx_in : dma_async_tx_descriptor pointer for rx channel
* @ split_src_sg : Set if the src sg is split and needs to be freed up
* @ split_dst_sg : Set if the dst sg is split and needs to be freed up
* @ enc : Flag indicating either encryption or decryption
* @ enc_iv_size : Initialisation vector size
* @ iv_idx : Initialisation vector index
* @ rx_sg : Static scatterlist entry for overriding RX data
* @ tx_sg : Static scatterlist entry for overriding TX data
* @ src : Source data pointer
* @ dst : Destination data pointer
*/
struct sa_rx_data {
void * req ;
struct device * ddev ;
struct dma_async_tx_descriptor * tx_in ;
struct scatterlist * split_src_sg ;
struct scatterlist * split_dst_sg ;
u8 enc ;
u8 enc_iv_size ;
u8 iv_idx ;
struct scatterlist rx_sg ;
struct scatterlist tx_sg ;
struct scatterlist * src ;
struct scatterlist * dst ;
} ;
/**
* struct sa_req : SA request definition
* @ dev : device for the request
* @ size : total data to the xmitted via DMA
* @ enc_offset : offset of cipher data
* @ enc_size : data to be passed to cipher engine
* @ enc_iv : cipher IV
2020-07-13 11:34:23 +03:00
* @ auth_offset : offset of the authentication data
* @ auth_size : size of the authentication data
* @ auth_iv : authentication IV
2020-07-13 11:34:22 +03:00
* @ type : algorithm type for the request
* @ cmdl : command label pointer
* @ base : pointer to the base request
* @ ctx : pointer to the algorithm context data
* @ enc : true if this is an encode request
* @ src : source data
* @ dst : destination data
* @ callback : DMA callback for the request
* @ mdata_size : metadata size passed to DMA
*/
struct sa_req {
struct device * dev ;
u16 size ;
u8 enc_offset ;
u16 enc_size ;
u8 * enc_iv ;
2020-07-13 11:34:23 +03:00
u8 auth_offset ;
u16 auth_size ;
u8 * auth_iv ;
2020-07-13 11:34:22 +03:00
u32 type ;
u32 * cmdl ;
struct crypto_async_request * base ;
struct sa_tfm_ctx * ctx ;
bool enc ;
struct scatterlist * src ;
struct scatterlist * dst ;
dma_async_tx_callback callback ;
u16 mdata_size ;
} ;
/*
* Mode Control Instructions for various Key lengths 128 , 192 , 256
* For CBC ( Cipher Block Chaining ) mode for encryption
*/
static u8 mci_cbc_enc_array [ 3 ] [ MODE_CONTROL_BYTES ] = {
{ 0x61 , 0x00 , 0x00 , 0x18 , 0x88 , 0x0a , 0xaa , 0x4b , 0x7e , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x61 , 0x00 , 0x00 , 0x18 , 0x88 , 0x4a , 0xaa , 0x4b , 0x7e , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x61 , 0x00 , 0x00 , 0x18 , 0x88 , 0x8a , 0xaa , 0x4b , 0x7e , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
} ;
/*
* Mode Control Instructions for various Key lengths 128 , 192 , 256
* For CBC ( Cipher Block Chaining ) mode for decryption
*/
static u8 mci_cbc_dec_array [ 3 ] [ MODE_CONTROL_BYTES ] = {
{ 0x71 , 0x00 , 0x00 , 0x80 , 0x8a , 0xca , 0x98 , 0xf4 , 0x40 , 0xc0 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x71 , 0x00 , 0x00 , 0x84 , 0x8a , 0xca , 0x98 , 0xf4 , 0x40 , 0xc0 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x71 , 0x00 , 0x00 , 0x88 , 0x8a , 0xca , 0x98 , 0xf4 , 0x40 , 0xc0 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
} ;
2020-07-13 11:34:24 +03:00
/*
* Mode Control Instructions for various Key lengths 128 , 192 , 256
* For CBC ( Cipher Block Chaining ) mode for encryption
*/
static u8 mci_cbc_enc_no_iv_array [ 3 ] [ MODE_CONTROL_BYTES ] = {
{ 0x21 , 0x00 , 0x00 , 0x18 , 0x88 , 0x0a , 0xaa , 0x4b , 0x7e , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x21 , 0x00 , 0x00 , 0x18 , 0x88 , 0x4a , 0xaa , 0x4b , 0x7e , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x21 , 0x00 , 0x00 , 0x18 , 0x88 , 0x8a , 0xaa , 0x4b , 0x7e , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
} ;
/*
* Mode Control Instructions for various Key lengths 128 , 192 , 256
* For CBC ( Cipher Block Chaining ) mode for decryption
*/
static u8 mci_cbc_dec_no_iv_array [ 3 ] [ MODE_CONTROL_BYTES ] = {
{ 0x31 , 0x00 , 0x00 , 0x80 , 0x8a , 0xca , 0x98 , 0xf4 , 0x40 , 0xc0 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x31 , 0x00 , 0x00 , 0x84 , 0x8a , 0xca , 0x98 , 0xf4 , 0x40 , 0xc0 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x31 , 0x00 , 0x00 , 0x88 , 0x8a , 0xca , 0x98 , 0xf4 , 0x40 , 0xc0 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
} ;
2020-07-13 11:34:22 +03:00
/*
* Mode Control Instructions for various Key lengths 128 , 192 , 256
* For ECB ( Electronic Code Book ) mode for encryption
*/
static u8 mci_ecb_enc_array [ 3 ] [ 27 ] = {
{ 0x21 , 0x00 , 0x00 , 0x80 , 0x8a , 0x04 , 0xb7 , 0x90 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x21 , 0x00 , 0x00 , 0x84 , 0x8a , 0x04 , 0xb7 , 0x90 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x21 , 0x00 , 0x00 , 0x88 , 0x8a , 0x04 , 0xb7 , 0x90 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
} ;
/*
* Mode Control Instructions for various Key lengths 128 , 192 , 256
* For ECB ( Electronic Code Book ) mode for decryption
*/
static u8 mci_ecb_dec_array [ 3 ] [ 27 ] = {
{ 0x31 , 0x00 , 0x00 , 0x80 , 0x8a , 0x04 , 0xb7 , 0x90 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x31 , 0x00 , 0x00 , 0x84 , 0x8a , 0x04 , 0xb7 , 0x90 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
{ 0x31 , 0x00 , 0x00 , 0x88 , 0x8a , 0x04 , 0xb7 , 0x90 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ,
} ;
/*
* Mode Control Instructions for DES algorithm
* For CBC ( Cipher Block Chaining ) mode and ECB mode
* encryption and for decryption respectively
*/
static u8 mci_cbc_3des_enc_array [ MODE_CONTROL_BYTES ] = {
0x60 , 0x00 , 0x00 , 0x18 , 0x88 , 0x52 , 0xaa , 0x4b , 0x7e , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 ,
} ;
static u8 mci_cbc_3des_dec_array [ MODE_CONTROL_BYTES ] = {
0x70 , 0x00 , 0x00 , 0x85 , 0x0a , 0xca , 0x98 , 0xf4 , 0x40 , 0xc0 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 ,
} ;
static u8 mci_ecb_3des_enc_array [ MODE_CONTROL_BYTES ] = {
0x20 , 0x00 , 0x00 , 0x85 , 0x0a , 0x04 , 0xb7 , 0x90 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 ,
} ;
static u8 mci_ecb_3des_dec_array [ MODE_CONTROL_BYTES ] = {
0x30 , 0x00 , 0x00 , 0x85 , 0x0a , 0x04 , 0xb7 , 0x90 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 ,
0x00 , 0x00 , 0x00 ,
} ;
/*
* Perform 16 byte or 128 bit swizzling
* The SA2UL Expects the security context to
* be in little Endian and the bus width is 128 bits or 16 bytes
* Hence swap 16 bytes at a time from higher to lower address
*/
static void sa_swiz_128 ( u8 * in , u16 len )
{
u8 data [ 16 ] ;
int i , j ;
for ( i = 0 ; i < len ; i + = 16 ) {
memcpy ( data , & in [ i ] , 16 ) ;
for ( j = 0 ; j < 16 ; j + + )
in [ i + j ] = data [ 15 - j ] ;
}
}
2020-07-13 11:34:24 +03:00
/* Prepare the ipad and opad from key as per SHA algorithm step 1*/
static void prepare_kiopad ( u8 * k_ipad , u8 * k_opad , const u8 * key , u16 key_sz )
{
int i ;
for ( i = 0 ; i < key_sz ; i + + ) {
k_ipad [ i ] = key [ i ] ^ 0x36 ;
k_opad [ i ] = key [ i ] ^ 0x5c ;
}
/* Instead of XOR with 0 */
for ( ; i < SHA1_BLOCK_SIZE ; i + + ) {
k_ipad [ i ] = 0x36 ;
k_opad [ i ] = 0x5c ;
}
}
static void sa_export_shash ( struct shash_desc * hash , int block_size ,
int digest_size , __be32 * out )
{
union {
struct sha1_state sha1 ;
struct sha256_state sha256 ;
struct sha512_state sha512 ;
} sha ;
void * state ;
u32 * result ;
int i ;
switch ( digest_size ) {
case SHA1_DIGEST_SIZE :
state = & sha . sha1 ;
result = sha . sha1 . state ;
break ;
case SHA256_DIGEST_SIZE :
state = & sha . sha256 ;
result = sha . sha256 . state ;
break ;
default :
dev_err ( sa_k3_dev , " %s: bad digest_size=%d \n " , __func__ ,
digest_size ) ;
return ;
}
crypto_shash_export ( hash , state ) ;
for ( i = 0 ; i < digest_size > > 2 ; i + + )
out [ i ] = cpu_to_be32 ( result [ i ] ) ;
}
static void sa_prepare_iopads ( struct algo_data * data , const u8 * key ,
u16 key_sz , __be32 * ipad , __be32 * opad )
{
SHASH_DESC_ON_STACK ( shash , data - > ctx - > shash ) ;
int block_size = crypto_shash_blocksize ( data - > ctx - > shash ) ;
int digest_size = crypto_shash_digestsize ( data - > ctx - > shash ) ;
u8 k_ipad [ SHA1_BLOCK_SIZE ] ;
u8 k_opad [ SHA1_BLOCK_SIZE ] ;
shash - > tfm = data - > ctx - > shash ;
prepare_kiopad ( k_ipad , k_opad , key , key_sz ) ;
memzero_explicit ( ipad , block_size ) ;
memzero_explicit ( opad , block_size ) ;
crypto_shash_init ( shash ) ;
crypto_shash_update ( shash , k_ipad , block_size ) ;
sa_export_shash ( shash , block_size , digest_size , ipad ) ;
crypto_shash_init ( shash ) ;
crypto_shash_update ( shash , k_opad , block_size ) ;
sa_export_shash ( shash , block_size , digest_size , opad ) ;
}
2020-07-13 11:34:22 +03:00
/* Derive the inverse key used in AES-CBC decryption operation */
static inline int sa_aes_inv_key ( u8 * inv_key , const u8 * key , u16 key_sz )
{
struct crypto_aes_ctx ctx ;
int key_pos ;
if ( aes_expandkey ( & ctx , key , key_sz ) ) {
dev_err ( sa_k3_dev , " %s: bad key len(%d) \n " , __func__ , key_sz ) ;
return - EINVAL ;
}
/* work around to get the right inverse for AES_KEYSIZE_192 size keys */
if ( key_sz = = AES_KEYSIZE_192 ) {
ctx . key_enc [ 52 ] = ctx . key_enc [ 51 ] ^ ctx . key_enc [ 46 ] ;
ctx . key_enc [ 53 ] = ctx . key_enc [ 52 ] ^ ctx . key_enc [ 47 ] ;
}
/* Based crypto_aes_expand_key logic */
switch ( key_sz ) {
case AES_KEYSIZE_128 :
case AES_KEYSIZE_192 :
key_pos = key_sz + 24 ;
break ;
case AES_KEYSIZE_256 :
key_pos = key_sz + 24 - 4 ;
break ;
default :
dev_err ( sa_k3_dev , " %s: bad key len(%d) \n " , __func__ , key_sz ) ;
return - EINVAL ;
}
memcpy ( inv_key , & ctx . key_enc [ key_pos ] , key_sz ) ;
return 0 ;
}
/* Set Security context for the encryption engine */
static int sa_set_sc_enc ( struct algo_data * ad , const u8 * key , u16 key_sz ,
u8 enc , u8 * sc_buf )
{
const u8 * mci = NULL ;
/* Set Encryption mode selector to crypto processing */
sc_buf [ 0 ] = SA_CRYPTO_PROCESSING ;
if ( enc )
mci = ad - > mci_enc ;
else
mci = ad - > mci_dec ;
/* Set the mode control instructions in security context */
if ( mci )
memcpy ( & sc_buf [ 1 ] , mci , MODE_CONTROL_BYTES ) ;
/* For AES-CBC decryption get the inverse key */
if ( ad - > inv_key & & ! enc ) {
if ( sa_aes_inv_key ( & sc_buf [ SC_ENC_KEY_OFFSET ] , key , key_sz ) )
return - EINVAL ;
/* For all other cases: key is used */
} else {
memcpy ( & sc_buf [ SC_ENC_KEY_OFFSET ] , key , key_sz ) ;
}
return 0 ;
}
2020-07-13 11:34:23 +03:00
/* Set Security context for the authentication engine */
static void sa_set_sc_auth ( struct algo_data * ad , const u8 * key , u16 key_sz ,
u8 * sc_buf )
{
2020-07-13 11:34:24 +03:00
__be32 ipad [ 64 ] , opad [ 64 ] ;
2020-07-13 11:34:23 +03:00
/* Set Authentication mode selector to hash processing */
sc_buf [ 0 ] = SA_HASH_PROCESSING ;
/* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
sc_buf [ 1 ] = SA_UPLOAD_HASH_TO_TLR ;
sc_buf [ 1 ] | = ad - > auth_ctrl ;
2020-07-13 11:34:24 +03:00
/* Copy the keys or ipad/opad */
if ( ad - > keyed_mac ) {
ad - > prep_iopad ( ad , key , key_sz , ipad , opad ) ;
/* Copy ipad to AuthKey */
memcpy ( & sc_buf [ 32 ] , ipad , ad - > hash_size ) ;
/* Copy opad to Aux-1 */
memcpy ( & sc_buf [ 64 ] , opad , ad - > hash_size ) ;
} else {
/* basic hash */
sc_buf [ 1 ] | = SA_BASIC_HASH ;
}
2020-07-13 11:34:23 +03:00
}
2020-07-13 11:34:22 +03:00
static inline void sa_copy_iv ( __be32 * out , const u8 * iv , bool size16 )
{
int j ;
for ( j = 0 ; j < ( ( size16 ) ? 4 : 2 ) ; j + + ) {
* out = cpu_to_be32 ( * ( ( u32 * ) iv ) ) ;
iv + = 4 ;
out + + ;
}
}
/* Format general command label */
static int sa_format_cmdl_gen ( struct sa_cmdl_cfg * cfg , u8 * cmdl ,
struct sa_cmdl_upd_info * upd_info )
{
2020-07-13 11:34:23 +03:00
u8 enc_offset = 0 , auth_offset = 0 , total = 0 ;
2020-07-13 11:34:22 +03:00
u8 enc_next_eng = SA_ENG_ID_OUTPORT2 ;
2020-07-13 11:34:23 +03:00
u8 auth_next_eng = SA_ENG_ID_OUTPORT2 ;
2020-07-13 11:34:22 +03:00
u32 * word_ptr = ( u32 * ) cmdl ;
int i ;
/* Clear the command label */
memzero_explicit ( cmdl , ( SA_MAX_CMDL_WORDS * sizeof ( u32 ) ) ) ;
/* Iniialize the command update structure */
memzero_explicit ( upd_info , sizeof ( * upd_info ) ) ;
2020-07-13 11:34:24 +03:00
if ( cfg - > enc_eng_id & & cfg - > auth_eng_id ) {
if ( cfg - > enc ) {
auth_offset = SA_CMDL_HEADER_SIZE_BYTES ;
enc_next_eng = cfg - > auth_eng_id ;
2020-07-13 11:34:22 +03:00
2020-07-13 11:34:24 +03:00
if ( cfg - > iv_size )
auth_offset + = cfg - > iv_size ;
} else {
enc_offset = SA_CMDL_HEADER_SIZE_BYTES ;
auth_next_eng = cfg - > enc_eng_id ;
}
}
2020-07-13 11:34:22 +03:00
2020-07-13 11:34:23 +03:00
if ( cfg - > enc_eng_id ) {
2020-07-13 11:34:22 +03:00
upd_info - > flags | = SA_CMDL_UPD_ENC ;
upd_info - > enc_size . index = enc_offset > > 2 ;
upd_info - > enc_offset . index = upd_info - > enc_size . index + 1 ;
/* Encryption command label */
cmdl [ enc_offset + SA_CMDL_OFFSET_NESC ] = enc_next_eng ;
/* Encryption modes requiring IV */
if ( cfg - > iv_size ) {
upd_info - > flags | = SA_CMDL_UPD_ENC_IV ;
upd_info - > enc_iv . index =
( enc_offset + SA_CMDL_HEADER_SIZE_BYTES ) > > 2 ;
upd_info - > enc_iv . size = cfg - > iv_size ;
cmdl [ enc_offset + SA_CMDL_OFFSET_LABEL_LEN ] =
SA_CMDL_HEADER_SIZE_BYTES + cfg - > iv_size ;
cmdl [ enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1 ] =
( SA_CTX_ENC_AUX2_OFFSET | ( cfg - > iv_size > > 3 ) ) ;
2020-07-13 11:34:24 +03:00
total + = SA_CMDL_HEADER_SIZE_BYTES + cfg - > iv_size ;
2020-07-13 11:34:22 +03:00
} else {
cmdl [ enc_offset + SA_CMDL_OFFSET_LABEL_LEN ] =
SA_CMDL_HEADER_SIZE_BYTES ;
2020-07-13 11:34:24 +03:00
total + = SA_CMDL_HEADER_SIZE_BYTES ;
2020-07-13 11:34:22 +03:00
}
}
2020-07-13 11:34:23 +03:00
if ( cfg - > auth_eng_id ) {
upd_info - > flags | = SA_CMDL_UPD_AUTH ;
upd_info - > auth_size . index = auth_offset > > 2 ;
upd_info - > auth_offset . index = upd_info - > auth_size . index + 1 ;
cmdl [ auth_offset + SA_CMDL_OFFSET_NESC ] = auth_next_eng ;
cmdl [ auth_offset + SA_CMDL_OFFSET_LABEL_LEN ] =
SA_CMDL_HEADER_SIZE_BYTES ;
total + = SA_CMDL_HEADER_SIZE_BYTES ;
}
2020-07-13 11:34:22 +03:00
total = roundup ( total , 8 ) ;
for ( i = 0 ; i < total / 4 ; i + + )
word_ptr [ i ] = swab32 ( word_ptr [ i ] ) ;
return total ;
}
/* Update Command label */
static inline void sa_update_cmdl ( struct sa_req * req , u32 * cmdl ,
struct sa_cmdl_upd_info * upd_info )
{
int i = 0 , j ;
if ( likely ( upd_info - > flags & SA_CMDL_UPD_ENC ) ) {
cmdl [ upd_info - > enc_size . index ] & = ~ SA_CMDL_PAYLOAD_LENGTH_MASK ;
cmdl [ upd_info - > enc_size . index ] | = req - > enc_size ;
cmdl [ upd_info - > enc_offset . index ] & =
~ SA_CMDL_SOP_BYPASS_LEN_MASK ;
cmdl [ upd_info - > enc_offset . index ] | =
( ( u32 ) req - > enc_offset < <
__ffs ( SA_CMDL_SOP_BYPASS_LEN_MASK ) ) ;
if ( likely ( upd_info - > flags & SA_CMDL_UPD_ENC_IV ) ) {
__be32 * data = ( __be32 * ) & cmdl [ upd_info - > enc_iv . index ] ;
u32 * enc_iv = ( u32 * ) req - > enc_iv ;
for ( j = 0 ; i < upd_info - > enc_iv . size ; i + = 4 , j + + ) {
data [ j ] = cpu_to_be32 ( * enc_iv ) ;
enc_iv + + ;
}
}
}
2020-07-13 11:34:23 +03:00
if ( likely ( upd_info - > flags & SA_CMDL_UPD_AUTH ) ) {
cmdl [ upd_info - > auth_size . index ] & = ~ SA_CMDL_PAYLOAD_LENGTH_MASK ;
cmdl [ upd_info - > auth_size . index ] | = req - > auth_size ;
cmdl [ upd_info - > auth_offset . index ] & =
~ SA_CMDL_SOP_BYPASS_LEN_MASK ;
cmdl [ upd_info - > auth_offset . index ] | =
( ( u32 ) req - > auth_offset < <
__ffs ( SA_CMDL_SOP_BYPASS_LEN_MASK ) ) ;
if ( upd_info - > flags & SA_CMDL_UPD_AUTH_IV ) {
sa_copy_iv ( ( void * ) & cmdl [ upd_info - > auth_iv . index ] ,
req - > auth_iv ,
( upd_info - > auth_iv . size > 8 ) ) ;
}
if ( upd_info - > flags & SA_CMDL_UPD_AUX_KEY ) {
int offset = ( req - > auth_size & 0xF ) ? 4 : 0 ;
memcpy ( & cmdl [ upd_info - > aux_key_info . index ] ,
& upd_info - > aux_key [ offset ] , 16 ) ;
}
}
2020-07-13 11:34:22 +03:00
}
/* Format SWINFO words to be sent to SA */
static
void sa_set_swinfo ( u8 eng_id , u16 sc_id , dma_addr_t sc_phys ,
u8 cmdl_present , u8 cmdl_offset , u8 flags ,
u8 hash_size , u32 * swinfo )
{
swinfo [ 0 ] = sc_id ;
swinfo [ 0 ] | = ( flags < < __ffs ( SA_SW0_FLAGS_MASK ) ) ;
if ( likely ( cmdl_present ) )
swinfo [ 0 ] | = ( ( cmdl_offset | SA_SW0_CMDL_PRESENT ) < <
__ffs ( SA_SW0_CMDL_INFO_MASK ) ) ;
swinfo [ 0 ] | = ( eng_id < < __ffs ( SA_SW0_ENG_ID_MASK ) ) ;
swinfo [ 0 ] | = SA_SW0_DEST_INFO_PRESENT ;
swinfo [ 1 ] = ( u32 ) ( sc_phys & 0xFFFFFFFFULL ) ;
swinfo [ 2 ] = ( u32 ) ( ( sc_phys & 0xFFFFFFFF00000000ULL ) > > 32 ) ;
swinfo [ 2 ] | = ( hash_size < < __ffs ( SA_SW2_EGRESS_LENGTH ) ) ;
}
/* Dump the security context */
static void sa_dump_sc ( u8 * buf , dma_addr_t dma_addr )
{
# ifdef DEBUG
dev_info ( sa_k3_dev , " Security context dump:: 0x%pad \n " , & dma_addr ) ;
print_hex_dump ( KERN_CONT , " " , DUMP_PREFIX_OFFSET ,
16 , 1 , buf , SA_CTX_MAX_SZ , false ) ;
# endif
}
static
int sa_init_sc ( struct sa_ctx_info * ctx , const u8 * enc_key ,
2020-07-13 11:34:23 +03:00
u16 enc_key_sz , const u8 * auth_key , u16 auth_key_sz ,
struct algo_data * ad , u8 enc , u32 * swinfo )
2020-07-13 11:34:22 +03:00
{
int enc_sc_offset = 0 ;
2020-07-13 11:34:23 +03:00
int auth_sc_offset = 0 ;
2020-07-13 11:34:22 +03:00
u8 * sc_buf = ctx - > sc ;
u16 sc_id = ctx - > sc_id ;
2020-07-13 11:34:24 +03:00
u8 first_engine = 0 ;
2020-07-13 11:34:22 +03:00
memzero_explicit ( sc_buf , SA_CTX_MAX_SZ ) ;
2020-07-13 11:34:24 +03:00
if ( ad - > auth_eng . eng_id ) {
if ( enc )
first_engine = ad - > enc_eng . eng_id ;
else
first_engine = ad - > auth_eng . eng_id ;
2020-07-13 11:34:23 +03:00
enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ ;
auth_sc_offset = enc_sc_offset + ad - > enc_eng . sc_size ;
sc_buf [ 1 ] = SA_SCCTL_FE_AUTH_ENC ;
if ( ! ad - > hash_size )
return - EINVAL ;
ad - > hash_size = roundup ( ad - > hash_size , 8 ) ;
2020-07-13 11:34:24 +03:00
} else if ( ad - > enc_eng . eng_id & & ! ad - > auth_eng . eng_id ) {
enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ ;
first_engine = ad - > enc_eng . eng_id ;
sc_buf [ 1 ] = SA_SCCTL_FE_ENC ;
ad - > hash_size = ad - > iv_out_size ;
2020-07-13 11:34:23 +03:00
}
2020-07-13 11:34:22 +03:00
/* SCCTL Owner info: 0=host, 1=CP_ACE */
sc_buf [ SA_CTX_SCCTL_OWNER_OFFSET ] = 0 ;
memcpy ( & sc_buf [ 2 ] , & sc_id , 2 ) ;
sc_buf [ 4 ] = 0x0 ;
sc_buf [ 5 ] = PRIV_ID ;
sc_buf [ 6 ] = PRIV ;
sc_buf [ 7 ] = 0x0 ;
/* Prepare context for encryption engine */
if ( ad - > enc_eng . sc_size ) {
if ( sa_set_sc_enc ( ad , enc_key , enc_key_sz , enc ,
& sc_buf [ enc_sc_offset ] ) )
return - EINVAL ;
}
2020-07-13 11:34:23 +03:00
/* Prepare context for authentication engine */
if ( ad - > auth_eng . sc_size )
sa_set_sc_auth ( ad , auth_key , auth_key_sz ,
& sc_buf [ auth_sc_offset ] ) ;
2020-07-13 11:34:22 +03:00
/* Set the ownership of context to CP_ACE */
sc_buf [ SA_CTX_SCCTL_OWNER_OFFSET ] = 0x80 ;
/* swizzle the security context */
sa_swiz_128 ( sc_buf , SA_CTX_MAX_SZ ) ;
sa_set_swinfo ( first_engine , ctx - > sc_id , ctx - > sc_phys , 1 , 0 ,
2020-07-13 11:34:23 +03:00
SA_SW_INFO_FLAG_EVICT , ad - > hash_size , swinfo ) ;
2020-07-13 11:34:22 +03:00
sa_dump_sc ( sc_buf , ctx - > sc_phys ) ;
return 0 ;
}
/* Free the per direction context memory */
static void sa_free_ctx_info ( struct sa_ctx_info * ctx ,
struct sa_crypto_data * data )
{
unsigned long bn ;
bn = ctx - > sc_id - data - > sc_id_start ;
spin_lock ( & data - > scid_lock ) ;
__clear_bit ( bn , data - > ctx_bm ) ;
data - > sc_id - - ;
spin_unlock ( & data - > scid_lock ) ;
if ( ctx - > sc ) {
dma_pool_free ( data - > sc_pool , ctx - > sc , ctx - > sc_phys ) ;
ctx - > sc = NULL ;
}
}
static int sa_init_ctx_info ( struct sa_ctx_info * ctx ,
struct sa_crypto_data * data )
{
unsigned long bn ;
int err ;
spin_lock ( & data - > scid_lock ) ;
bn = find_first_zero_bit ( data - > ctx_bm , SA_MAX_NUM_CTX ) ;
__set_bit ( bn , data - > ctx_bm ) ;
data - > sc_id + + ;
spin_unlock ( & data - > scid_lock ) ;
ctx - > sc_id = ( u16 ) ( data - > sc_id_start + bn ) ;
ctx - > sc = dma_pool_alloc ( data - > sc_pool , GFP_KERNEL , & ctx - > sc_phys ) ;
if ( ! ctx - > sc ) {
dev_err ( & data - > pdev - > dev , " Failed to allocate SC memory \n " ) ;
err = - ENOMEM ;
goto scid_rollback ;
}
return 0 ;
scid_rollback :
spin_lock ( & data - > scid_lock ) ;
__clear_bit ( bn , data - > ctx_bm ) ;
data - > sc_id - - ;
spin_unlock ( & data - > scid_lock ) ;
return err ;
}
static void sa_cipher_cra_exit ( struct crypto_skcipher * tfm )
{
struct sa_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
struct sa_crypto_data * data = dev_get_drvdata ( sa_k3_dev ) ;
dev_dbg ( sa_k3_dev , " %s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad)) \n " ,
__func__ , tfm , ctx - > enc . sc_id , & ctx - > enc . sc_phys ,
ctx - > dec . sc_id , & ctx - > dec . sc_phys ) ;
sa_free_ctx_info ( & ctx - > enc , data ) ;
sa_free_ctx_info ( & ctx - > dec , data ) ;
crypto_free_sync_skcipher ( ctx - > fallback . skcipher ) ;
}
static int sa_cipher_cra_init ( struct crypto_skcipher * tfm )
{
struct sa_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
struct sa_crypto_data * data = dev_get_drvdata ( sa_k3_dev ) ;
const char * name = crypto_tfm_alg_name ( & tfm - > base ) ;
int ret ;
memzero_explicit ( ctx , sizeof ( * ctx ) ) ;
ctx - > dev_data = data ;
ret = sa_init_ctx_info ( & ctx - > enc , data ) ;
if ( ret )
return ret ;
ret = sa_init_ctx_info ( & ctx - > dec , data ) ;
if ( ret ) {
sa_free_ctx_info ( & ctx - > enc , data ) ;
return ret ;
}
ctx - > fallback . skcipher =
crypto_alloc_sync_skcipher ( name , 0 , CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( ctx - > fallback . skcipher ) ) {
dev_err ( sa_k3_dev , " Error allocating fallback algo %s \n " , name ) ;
return PTR_ERR ( ctx - > fallback . skcipher ) ;
}
dev_dbg ( sa_k3_dev , " %s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad)) \n " ,
__func__ , tfm , ctx - > enc . sc_id , & ctx - > enc . sc_phys ,
ctx - > dec . sc_id , & ctx - > dec . sc_phys ) ;
return 0 ;
}
static int sa_cipher_setkey ( struct crypto_skcipher * tfm , const u8 * key ,
unsigned int keylen , struct algo_data * ad )
{
struct sa_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
int cmdl_len ;
struct sa_cmdl_cfg cfg ;
int ret ;
if ( keylen ! = AES_KEYSIZE_128 & & keylen ! = AES_KEYSIZE_192 & &
keylen ! = AES_KEYSIZE_256 )
return - EINVAL ;
ad - > enc_eng . eng_id = SA_ENG_ID_EM1 ;
ad - > enc_eng . sc_size = SA_CTX_ENC_TYPE1_SZ ;
memzero_explicit ( & cfg , sizeof ( cfg ) ) ;
cfg . enc_eng_id = ad - > enc_eng . eng_id ;
cfg . iv_size = crypto_skcipher_ivsize ( tfm ) ;
crypto_sync_skcipher_clear_flags ( ctx - > fallback . skcipher ,
CRYPTO_TFM_REQ_MASK ) ;
crypto_sync_skcipher_set_flags ( ctx - > fallback . skcipher ,
tfm - > base . crt_flags &
CRYPTO_TFM_REQ_MASK ) ;
ret = crypto_sync_skcipher_setkey ( ctx - > fallback . skcipher , key , keylen ) ;
if ( ret )
return ret ;
/* Setup Encryption Security Context & Command label template */
2020-07-13 11:34:23 +03:00
if ( sa_init_sc ( & ctx - > enc , key , keylen , NULL , 0 , ad , 1 ,
& ctx - > enc . epib [ 1 ] ) )
2020-07-13 11:34:22 +03:00
goto badkey ;
cmdl_len = sa_format_cmdl_gen ( & cfg ,
( u8 * ) ctx - > enc . cmdl ,
& ctx - > enc . cmdl_upd_info ) ;
if ( cmdl_len < = 0 | | ( cmdl_len > SA_MAX_CMDL_WORDS * sizeof ( u32 ) ) )
goto badkey ;
ctx - > enc . cmdl_size = cmdl_len ;
/* Setup Decryption Security Context & Command label template */
2020-07-13 11:34:23 +03:00
if ( sa_init_sc ( & ctx - > dec , key , keylen , NULL , 0 , ad , 0 ,
& ctx - > dec . epib [ 1 ] ) )
2020-07-13 11:34:22 +03:00
goto badkey ;
cfg . enc_eng_id = ad - > enc_eng . eng_id ;
cmdl_len = sa_format_cmdl_gen ( & cfg , ( u8 * ) ctx - > dec . cmdl ,
& ctx - > dec . cmdl_upd_info ) ;
if ( cmdl_len < = 0 | | ( cmdl_len > SA_MAX_CMDL_WORDS * sizeof ( u32 ) ) )
goto badkey ;
ctx - > dec . cmdl_size = cmdl_len ;
ctx - > iv_idx = ad - > iv_idx ;
return 0 ;
badkey :
dev_err ( sa_k3_dev , " %s: badkey \n " , __func__ ) ;
return - EINVAL ;
}
static int sa_aes_cbc_setkey ( struct crypto_skcipher * tfm , const u8 * key ,
unsigned int keylen )
{
struct algo_data ad = { 0 } ;
/* Convert the key size (16/24/32) to the key size index (0/1/2) */
int key_idx = ( keylen > > 3 ) - 2 ;
if ( key_idx > = 3 )
return - EINVAL ;
ad . mci_enc = mci_cbc_enc_array [ key_idx ] ;
ad . mci_dec = mci_cbc_dec_array [ key_idx ] ;
ad . inv_key = true ;
ad . ealg_id = SA_EALG_ID_AES_CBC ;
ad . iv_idx = 4 ;
ad . iv_out_size = 16 ;
return sa_cipher_setkey ( tfm , key , keylen , & ad ) ;
}
static int sa_aes_ecb_setkey ( struct crypto_skcipher * tfm , const u8 * key ,
unsigned int keylen )
{
struct algo_data ad = { 0 } ;
/* Convert the key size (16/24/32) to the key size index (0/1/2) */
int key_idx = ( keylen > > 3 ) - 2 ;
if ( key_idx > = 3 )
return - EINVAL ;
ad . mci_enc = mci_ecb_enc_array [ key_idx ] ;
ad . mci_dec = mci_ecb_dec_array [ key_idx ] ;
ad . inv_key = true ;
ad . ealg_id = SA_EALG_ID_AES_ECB ;
return sa_cipher_setkey ( tfm , key , keylen , & ad ) ;
}
static int sa_3des_cbc_setkey ( struct crypto_skcipher * tfm , const u8 * key ,
unsigned int keylen )
{
struct algo_data ad = { 0 } ;
ad . mci_enc = mci_cbc_3des_enc_array ;
ad . mci_dec = mci_cbc_3des_dec_array ;
ad . ealg_id = SA_EALG_ID_3DES_CBC ;
ad . iv_idx = 6 ;
ad . iv_out_size = 8 ;
return sa_cipher_setkey ( tfm , key , keylen , & ad ) ;
}
static int sa_3des_ecb_setkey ( struct crypto_skcipher * tfm , const u8 * key ,
unsigned int keylen )
{
struct algo_data ad = { 0 } ;
ad . mci_enc = mci_ecb_3des_enc_array ;
ad . mci_dec = mci_ecb_3des_dec_array ;
return sa_cipher_setkey ( tfm , key , keylen , & ad ) ;
}
static void sa_aes_dma_in_callback ( void * data )
{
struct sa_rx_data * rxd = ( struct sa_rx_data * ) data ;
struct skcipher_request * req ;
int sglen ;
u32 * result ;
__be32 * mdptr ;
size_t ml , pl ;
int i ;
enum dma_data_direction dir_src ;
bool diff_dst ;
req = container_of ( rxd - > req , struct skcipher_request , base ) ;
sglen = sg_nents_for_len ( req - > src , req - > cryptlen ) ;
diff_dst = ( req - > src ! = req - > dst ) ? true : false ;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL ;
if ( req - > iv ) {
mdptr = ( __be32 * ) dmaengine_desc_get_metadata_ptr ( rxd - > tx_in , & pl ,
& ml ) ;
result = ( u32 * ) req - > iv ;
for ( i = 0 ; i < ( rxd - > enc_iv_size / 4 ) ; i + + )
result [ i ] = be32_to_cpu ( mdptr [ i + rxd - > iv_idx ] ) ;
}
dma_unmap_sg ( rxd - > ddev , req - > src , sglen , dir_src ) ;
kfree ( rxd - > split_src_sg ) ;
if ( diff_dst ) {
sglen = sg_nents_for_len ( req - > dst , req - > cryptlen ) ;
dma_unmap_sg ( rxd - > ddev , req - > dst , sglen ,
DMA_FROM_DEVICE ) ;
kfree ( rxd - > split_dst_sg ) ;
}
kfree ( rxd ) ;
skcipher_request_complete ( req , 0 ) ;
}
static void
sa_prepare_tx_desc ( u32 * mdptr , u32 pslen , u32 * psdata , u32 epiblen , u32 * epib )
{
u32 * out , * in ;
int i ;
for ( out = mdptr , in = epib , i = 0 ; i < epiblen / sizeof ( u32 ) ; i + + )
* out + + = * in + + ;
mdptr [ 4 ] = ( 0xFFFF < < 16 ) ;
for ( out = & mdptr [ 5 ] , in = psdata , i = 0 ;
i < pslen / sizeof ( u32 ) ; i + + )
* out + + = * in + + ;
}
static int sa_run ( struct sa_req * req )
{
struct sa_rx_data * rxd ;
gfp_t gfp_flags ;
u32 cmdl [ SA_MAX_CMDL_WORDS ] ;
struct sa_crypto_data * pdata = dev_get_drvdata ( sa_k3_dev ) ;
struct device * ddev ;
struct dma_chan * dma_rx ;
int sg_nents , src_nents , dst_nents ;
int mapped_src_nents , mapped_dst_nents ;
struct scatterlist * src , * dst ;
size_t pl , ml , split_size ;
struct sa_ctx_info * sa_ctx = req - > enc ? & req - > ctx - > enc : & req - > ctx - > dec ;
int ret ;
struct dma_async_tx_descriptor * tx_out ;
u32 * mdptr ;
bool diff_dst ;
enum dma_data_direction dir_src ;
gfp_flags = req - > base - > flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC ;
rxd = kzalloc ( sizeof ( * rxd ) , gfp_flags ) ;
if ( ! rxd )
return - ENOMEM ;
if ( req - > src ! = req - > dst ) {
diff_dst = true ;
dir_src = DMA_TO_DEVICE ;
} else {
diff_dst = false ;
dir_src = DMA_BIDIRECTIONAL ;
}
/*
* SA2UL has an interesting feature where the receive DMA channel
* is selected based on the data passed to the engine . Within the
* transition range , there is also a space where it is impossible
* to determine where the data will end up , and this should be
* avoided . This will be handled by the SW fallback mechanism by
* the individual algorithm implementations .
*/
if ( req - > size > = 256 )
dma_rx = pdata - > dma_rx2 ;
else
dma_rx = pdata - > dma_rx1 ;
ddev = dma_rx - > device - > dev ;
memcpy ( cmdl , sa_ctx - > cmdl , sa_ctx - > cmdl_size ) ;
sa_update_cmdl ( req , cmdl , & sa_ctx - > cmdl_upd_info ) ;
if ( req - > type ! = CRYPTO_ALG_TYPE_AHASH ) {
if ( req - > enc )
req - > type | =
( SA_REQ_SUBTYPE_ENC < < SA_REQ_SUBTYPE_SHIFT ) ;
else
req - > type | =
( SA_REQ_SUBTYPE_DEC < < SA_REQ_SUBTYPE_SHIFT ) ;
}
cmdl [ sa_ctx - > cmdl_size / sizeof ( u32 ) ] = req - > type ;
/*
* Map the packets , first we check if the data fits into a single
* sg entry and use that if possible . If it does not fit , we check
* if we need to do sg_split to align the scatterlist data on the
* actual data size being processed by the crypto engine .
*/
src = req - > src ;
sg_nents = sg_nents_for_len ( src , req - > size ) ;
split_size = req - > size ;
if ( sg_nents = = 1 & & split_size < = req - > src - > length ) {
src = & rxd - > rx_sg ;
sg_init_table ( src , 1 ) ;
sg_set_page ( src , sg_page ( req - > src ) , split_size ,
req - > src - > offset ) ;
src_nents = 1 ;
dma_map_sg ( ddev , src , sg_nents , dir_src ) ;
} else {
mapped_src_nents = dma_map_sg ( ddev , req - > src , sg_nents ,
dir_src ) ;
ret = sg_split ( req - > src , mapped_src_nents , 0 , 1 , & split_size ,
& src , & src_nents , gfp_flags ) ;
if ( ret ) {
src_nents = sg_nents ;
src = req - > src ;
} else {
rxd - > split_src_sg = src ;
}
}
if ( ! diff_dst ) {
dst_nents = src_nents ;
dst = src ;
} else {
dst_nents = sg_nents_for_len ( req - > dst , req - > size ) ;
if ( dst_nents = = 1 & & split_size < = req - > dst - > length ) {
dst = & rxd - > tx_sg ;
sg_init_table ( dst , 1 ) ;
sg_set_page ( dst , sg_page ( req - > dst ) , split_size ,
req - > dst - > offset ) ;
dst_nents = 1 ;
dma_map_sg ( ddev , dst , dst_nents , DMA_FROM_DEVICE ) ;
} else {
mapped_dst_nents = dma_map_sg ( ddev , req - > dst , dst_nents ,
DMA_FROM_DEVICE ) ;
ret = sg_split ( req - > dst , mapped_dst_nents , 0 , 1 ,
& split_size , & dst , & dst_nents ,
gfp_flags ) ;
if ( ret ) {
dst_nents = dst_nents ;
dst = req - > dst ;
} else {
rxd - > split_dst_sg = dst ;
}
}
}
if ( unlikely ( src_nents ! = sg_nents ) ) {
dev_warn_ratelimited ( sa_k3_dev , " failed to map tx pkt \n " ) ;
ret = - EIO ;
goto err_cleanup ;
}
rxd - > tx_in = dmaengine_prep_slave_sg ( dma_rx , dst , dst_nents ,
DMA_DEV_TO_MEM ,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
if ( ! rxd - > tx_in ) {
dev_err ( pdata - > dev , " IN prep_slave_sg() failed \n " ) ;
ret = - EINVAL ;
goto err_cleanup ;
}
rxd - > req = ( void * ) req - > base ;
rxd - > enc = req - > enc ;
rxd - > ddev = ddev ;
rxd - > src = src ;
rxd - > dst = dst ;
rxd - > iv_idx = req - > ctx - > iv_idx ;
rxd - > enc_iv_size = sa_ctx - > cmdl_upd_info . enc_iv . size ;
rxd - > tx_in - > callback = req - > callback ;
rxd - > tx_in - > callback_param = rxd ;
tx_out = dmaengine_prep_slave_sg ( pdata - > dma_tx , src ,
src_nents , DMA_MEM_TO_DEV ,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
if ( ! tx_out ) {
dev_err ( pdata - > dev , " OUT prep_slave_sg() failed \n " ) ;
ret = - EINVAL ;
goto err_cleanup ;
}
/*
* Prepare metadata for DMA engine . This essentially describes the
* crypto algorithm to be used , data sizes , different keys etc .
*/
mdptr = ( u32 * ) dmaengine_desc_get_metadata_ptr ( tx_out , & pl , & ml ) ;
sa_prepare_tx_desc ( mdptr , ( sa_ctx - > cmdl_size + ( SA_PSDATA_CTX_WORDS *
sizeof ( u32 ) ) ) , cmdl , sizeof ( sa_ctx - > epib ) ,
sa_ctx - > epib ) ;
ml = sa_ctx - > cmdl_size + ( SA_PSDATA_CTX_WORDS * sizeof ( u32 ) ) ;
dmaengine_desc_set_metadata_len ( tx_out , req - > mdata_size ) ;
dmaengine_submit ( tx_out ) ;
dmaengine_submit ( rxd - > tx_in ) ;
dma_async_issue_pending ( dma_rx ) ;
dma_async_issue_pending ( pdata - > dma_tx ) ;
return - EINPROGRESS ;
err_cleanup :
dma_unmap_sg ( ddev , req - > src , sg_nents , DMA_TO_DEVICE ) ;
kfree ( rxd - > split_src_sg ) ;
if ( req - > src ! = req - > dst ) {
dst_nents = sg_nents_for_len ( req - > dst , req - > size ) ;
dma_unmap_sg ( ddev , req - > dst , dst_nents , DMA_FROM_DEVICE ) ;
kfree ( rxd - > split_dst_sg ) ;
}
kfree ( rxd ) ;
return ret ;
}
static int sa_cipher_run ( struct skcipher_request * req , u8 * iv , int enc )
{
struct sa_tfm_ctx * ctx =
crypto_skcipher_ctx ( crypto_skcipher_reqtfm ( req ) ) ;
struct crypto_alg * alg = req - > base . tfm - > __crt_alg ;
struct sa_req sa_req = { 0 } ;
int ret ;
if ( ! req - > cryptlen )
return 0 ;
if ( req - > cryptlen % alg - > cra_blocksize )
return - EINVAL ;
/* Use SW fallback if the data size is not supported */
if ( req - > cryptlen > SA_MAX_DATA_SZ | |
( req - > cryptlen > = SA_UNSAFE_DATA_SZ_MIN & &
req - > cryptlen < = SA_UNSAFE_DATA_SZ_MAX ) ) {
SYNC_SKCIPHER_REQUEST_ON_STACK ( subreq , ctx - > fallback . skcipher ) ;
skcipher_request_set_sync_tfm ( subreq , ctx - > fallback . skcipher ) ;
skcipher_request_set_callback ( subreq , req - > base . flags ,
NULL , NULL ) ;
skcipher_request_set_crypt ( subreq , req - > src , req - > dst ,
req - > cryptlen , req - > iv ) ;
if ( enc )
ret = crypto_skcipher_encrypt ( subreq ) ;
else
ret = crypto_skcipher_decrypt ( subreq ) ;
skcipher_request_zero ( subreq ) ;
return ret ;
}
sa_req . size = req - > cryptlen ;
sa_req . enc_size = req - > cryptlen ;
sa_req . src = req - > src ;
sa_req . dst = req - > dst ;
sa_req . enc_iv = iv ;
sa_req . type = CRYPTO_ALG_TYPE_SKCIPHER ;
sa_req . enc = enc ;
sa_req . callback = sa_aes_dma_in_callback ;
sa_req . mdata_size = 44 ;
sa_req . base = & req - > base ;
sa_req . ctx = ctx ;
return sa_run ( & sa_req ) ;
}
static int sa_encrypt ( struct skcipher_request * req )
{
return sa_cipher_run ( req , req - > iv , 1 ) ;
}
static int sa_decrypt ( struct skcipher_request * req )
{
return sa_cipher_run ( req , req - > iv , 0 ) ;
}
2020-07-13 11:34:23 +03:00
static void sa_sha_dma_in_callback ( void * data )
{
struct sa_rx_data * rxd = ( struct sa_rx_data * ) data ;
struct ahash_request * req ;
struct crypto_ahash * tfm ;
unsigned int authsize ;
int i , sg_nents ;
size_t ml , pl ;
u32 * result ;
__be32 * mdptr ;
req = container_of ( rxd - > req , struct ahash_request , base ) ;
tfm = crypto_ahash_reqtfm ( req ) ;
authsize = crypto_ahash_digestsize ( tfm ) ;
mdptr = ( __be32 * ) dmaengine_desc_get_metadata_ptr ( rxd - > tx_in , & pl , & ml ) ;
result = ( u32 * ) req - > result ;
for ( i = 0 ; i < ( authsize / 4 ) ; i + + )
result [ i ] = be32_to_cpu ( mdptr [ i + 4 ] ) ;
sg_nents = sg_nents_for_len ( req - > src , req - > nbytes ) ;
dma_unmap_sg ( rxd - > ddev , req - > src , sg_nents , DMA_FROM_DEVICE ) ;
kfree ( rxd - > split_src_sg ) ;
kfree ( rxd ) ;
ahash_request_complete ( req , 0 ) ;
}
static int zero_message_process ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
int sa_digest_size = crypto_ahash_digestsize ( tfm ) ;
switch ( sa_digest_size ) {
case SHA1_DIGEST_SIZE :
memcpy ( req - > result , sha1_zero_message_hash , sa_digest_size ) ;
break ;
case SHA256_DIGEST_SIZE :
memcpy ( req - > result , sha256_zero_message_hash , sa_digest_size ) ;
break ;
case SHA512_DIGEST_SIZE :
memcpy ( req - > result , sha512_zero_message_hash , sa_digest_size ) ;
break ;
default :
return - EINVAL ;
}
return 0 ;
}
static int sa_sha_run ( struct ahash_request * req )
{
struct sa_tfm_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( req ) ) ;
struct sa_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct sa_req sa_req = { 0 } ;
size_t auth_len ;
auth_len = req - > nbytes ;
if ( ! auth_len )
return zero_message_process ( req ) ;
if ( auth_len > SA_MAX_DATA_SZ | |
( auth_len > = SA_UNSAFE_DATA_SZ_MIN & &
auth_len < = SA_UNSAFE_DATA_SZ_MAX ) ) {
struct ahash_request * subreq = & rctx - > fallback_req ;
int ret = 0 ;
ahash_request_set_tfm ( subreq , ctx - > fallback . ahash ) ;
subreq - > base . flags = req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
crypto_ahash_init ( subreq ) ;
subreq - > nbytes = auth_len ;
subreq - > src = req - > src ;
subreq - > result = req - > result ;
ret | = crypto_ahash_update ( subreq ) ;
subreq - > nbytes = 0 ;
ret | = crypto_ahash_final ( subreq ) ;
return ret ;
}
sa_req . size = auth_len ;
sa_req . auth_size = auth_len ;
sa_req . src = req - > src ;
sa_req . dst = req - > src ;
sa_req . enc = true ;
sa_req . type = CRYPTO_ALG_TYPE_AHASH ;
sa_req . callback = sa_sha_dma_in_callback ;
sa_req . mdata_size = 28 ;
sa_req . ctx = ctx ;
sa_req . base = & req - > base ;
return sa_run ( & sa_req ) ;
}
static int sa_sha_setup ( struct sa_tfm_ctx * ctx , struct algo_data * ad )
{
int bs = crypto_shash_blocksize ( ctx - > shash ) ;
int cmdl_len ;
struct sa_cmdl_cfg cfg ;
ad - > enc_eng . sc_size = SA_CTX_ENC_TYPE1_SZ ;
ad - > auth_eng . eng_id = SA_ENG_ID_AM1 ;
ad - > auth_eng . sc_size = SA_CTX_AUTH_TYPE2_SZ ;
memset ( ctx - > authkey , 0 , bs ) ;
memset ( & cfg , 0 , sizeof ( cfg ) ) ;
cfg . aalg = ad - > aalg_id ;
cfg . enc_eng_id = ad - > enc_eng . eng_id ;
cfg . auth_eng_id = ad - > auth_eng . eng_id ;
cfg . iv_size = 0 ;
cfg . akey = NULL ;
cfg . akey_len = 0 ;
/* Setup Encryption Security Context & Command label template */
if ( sa_init_sc ( & ctx - > enc , NULL , 0 , NULL , 0 , ad , 0 ,
& ctx - > enc . epib [ 1 ] ) )
goto badkey ;
cmdl_len = sa_format_cmdl_gen ( & cfg ,
( u8 * ) ctx - > enc . cmdl ,
& ctx - > enc . cmdl_upd_info ) ;
if ( cmdl_len < = 0 | | ( cmdl_len > SA_MAX_CMDL_WORDS * sizeof ( u32 ) ) )
goto badkey ;
ctx - > enc . cmdl_size = cmdl_len ;
return 0 ;
badkey :
dev_err ( sa_k3_dev , " %s: badkey \n " , __func__ ) ;
return - EINVAL ;
}
static int sa_sha_cra_init_alg ( struct crypto_tfm * tfm , const char * alg_base )
{
struct sa_tfm_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct sa_crypto_data * data = dev_get_drvdata ( sa_k3_dev ) ;
int ret ;
memset ( ctx , 0 , sizeof ( * ctx ) ) ;
ctx - > dev_data = data ;
ret = sa_init_ctx_info ( & ctx - > enc , data ) ;
if ( ret )
return ret ;
if ( alg_base ) {
ctx - > shash = crypto_alloc_shash ( alg_base , 0 ,
CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( ctx - > shash ) ) {
dev_err ( sa_k3_dev , " base driver %s couldn't be loaded \n " ,
alg_base ) ;
return PTR_ERR ( ctx - > shash ) ;
}
/* for fallback */
ctx - > fallback . ahash =
crypto_alloc_ahash ( alg_base , 0 ,
CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( ctx - > fallback . ahash ) ) {
dev_err ( ctx - > dev_data - > dev ,
" Could not load fallback driver \n " ) ;
return PTR_ERR ( ctx - > fallback . ahash ) ;
}
}
dev_dbg ( sa_k3_dev , " %s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad)) \n " ,
__func__ , tfm , ctx - > enc . sc_id , & ctx - > enc . sc_phys ,
ctx - > dec . sc_id , & ctx - > dec . sc_phys ) ;
crypto_ahash_set_reqsize ( __crypto_ahash_cast ( tfm ) ,
sizeof ( struct sa_sha_req_ctx ) +
crypto_ahash_reqsize ( ctx - > fallback . ahash ) ) ;
return 0 ;
}
static int sa_sha_digest ( struct ahash_request * req )
{
return sa_sha_run ( req ) ;
}
static int sa_sha_init ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct sa_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct sa_tfm_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
2020-08-18 17:00:01 +03:00
dev_dbg ( sa_k3_dev , " init: digest size: %u, rctx=%p \n " ,
crypto_ahash_digestsize ( tfm ) , rctx ) ;
2020-07-13 11:34:23 +03:00
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback . ahash ) ;
rctx - > fallback_req . base . flags =
req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
return crypto_ahash_init ( & rctx - > fallback_req ) ;
}
static int sa_sha_update ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct sa_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct sa_tfm_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback . ahash ) ;
rctx - > fallback_req . base . flags =
req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
rctx - > fallback_req . nbytes = req - > nbytes ;
rctx - > fallback_req . src = req - > src ;
return crypto_ahash_update ( & rctx - > fallback_req ) ;
}
static int sa_sha_final ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct sa_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct sa_tfm_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback . ahash ) ;
rctx - > fallback_req . base . flags =
req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
rctx - > fallback_req . result = req - > result ;
return crypto_ahash_final ( & rctx - > fallback_req ) ;
}
static int sa_sha_finup ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct sa_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct sa_tfm_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback . ahash ) ;
rctx - > fallback_req . base . flags =
req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
rctx - > fallback_req . nbytes = req - > nbytes ;
rctx - > fallback_req . src = req - > src ;
rctx - > fallback_req . result = req - > result ;
return crypto_ahash_finup ( & rctx - > fallback_req ) ;
}
static int sa_sha_import ( struct ahash_request * req , const void * in )
{
struct sa_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct sa_tfm_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback . ahash ) ;
rctx - > fallback_req . base . flags = req - > base . flags &
CRYPTO_TFM_REQ_MAY_SLEEP ;
return crypto_ahash_import ( & rctx - > fallback_req , in ) ;
}
static int sa_sha_export ( struct ahash_request * req , void * out )
{
struct sa_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct sa_tfm_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
struct ahash_request * subreq = & rctx - > fallback_req ;
ahash_request_set_tfm ( subreq , ctx - > fallback . ahash ) ;
subreq - > base . flags = req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
return crypto_ahash_export ( subreq , out ) ;
}
static int sa_sha1_cra_init ( struct crypto_tfm * tfm )
{
struct algo_data ad = { 0 } ;
struct sa_tfm_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
sa_sha_cra_init_alg ( tfm , " sha1 " ) ;
ad . aalg_id = SA_AALG_ID_SHA1 ;
ad . hash_size = SHA1_DIGEST_SIZE ;
ad . auth_ctrl = SA_AUTH_SW_CTRL_SHA1 ;
sa_sha_setup ( ctx , & ad ) ;
return 0 ;
}
static int sa_sha256_cra_init ( struct crypto_tfm * tfm )
{
struct algo_data ad = { 0 } ;
struct sa_tfm_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
sa_sha_cra_init_alg ( tfm , " sha256 " ) ;
ad . aalg_id = SA_AALG_ID_SHA2_256 ;
ad . hash_size = SHA256_DIGEST_SIZE ;
ad . auth_ctrl = SA_AUTH_SW_CTRL_SHA256 ;
sa_sha_setup ( ctx , & ad ) ;
return 0 ;
}
static int sa_sha512_cra_init ( struct crypto_tfm * tfm )
{
struct algo_data ad = { 0 } ;
struct sa_tfm_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
sa_sha_cra_init_alg ( tfm , " sha512 " ) ;
ad . aalg_id = SA_AALG_ID_SHA2_512 ;
ad . hash_size = SHA512_DIGEST_SIZE ;
ad . auth_ctrl = SA_AUTH_SW_CTRL_SHA512 ;
sa_sha_setup ( ctx , & ad ) ;
return 0 ;
}
static void sa_sha_cra_exit ( struct crypto_tfm * tfm )
{
struct sa_tfm_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct sa_crypto_data * data = dev_get_drvdata ( sa_k3_dev ) ;
dev_dbg ( sa_k3_dev , " %s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad)) \n " ,
__func__ , tfm , ctx - > enc . sc_id , & ctx - > enc . sc_phys ,
ctx - > dec . sc_id , & ctx - > dec . sc_phys ) ;
if ( crypto_tfm_alg_type ( tfm ) = = CRYPTO_ALG_TYPE_AHASH )
sa_free_ctx_info ( & ctx - > enc , data ) ;
crypto_free_shash ( ctx - > shash ) ;
crypto_free_ahash ( ctx - > fallback . ahash ) ;
}
2020-07-13 11:34:24 +03:00
static void sa_aead_dma_in_callback ( void * data )
{
struct sa_rx_data * rxd = ( struct sa_rx_data * ) data ;
struct aead_request * req ;
struct crypto_aead * tfm ;
unsigned int start ;
unsigned int authsize ;
u8 auth_tag [ SA_MAX_AUTH_TAG_SZ ] ;
size_t pl , ml ;
int i , sglen ;
int err = 0 ;
u16 auth_len ;
u32 * mdptr ;
bool diff_dst ;
enum dma_data_direction dir_src ;
req = container_of ( rxd - > req , struct aead_request , base ) ;
tfm = crypto_aead_reqtfm ( req ) ;
start = req - > assoclen + req - > cryptlen ;
authsize = crypto_aead_authsize ( tfm ) ;
diff_dst = ( req - > src ! = req - > dst ) ? true : false ;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL ;
mdptr = ( u32 * ) dmaengine_desc_get_metadata_ptr ( rxd - > tx_in , & pl , & ml ) ;
for ( i = 0 ; i < ( authsize / 4 ) ; i + + )
mdptr [ i + 4 ] = swab32 ( mdptr [ i + 4 ] ) ;
auth_len = req - > assoclen + req - > cryptlen ;
if ( ! rxd - > enc )
auth_len - = authsize ;
sglen = sg_nents_for_len ( rxd - > src , auth_len ) ;
dma_unmap_sg ( rxd - > ddev , rxd - > src , sglen , dir_src ) ;
kfree ( rxd - > split_src_sg ) ;
if ( diff_dst ) {
sglen = sg_nents_for_len ( rxd - > dst , auth_len ) ;
dma_unmap_sg ( rxd - > ddev , rxd - > dst , sglen , DMA_FROM_DEVICE ) ;
kfree ( rxd - > split_dst_sg ) ;
}
if ( rxd - > enc ) {
scatterwalk_map_and_copy ( & mdptr [ 4 ] , req - > dst , start , authsize ,
1 ) ;
} else {
start - = authsize ;
scatterwalk_map_and_copy ( auth_tag , req - > src , start , authsize ,
0 ) ;
err = memcmp ( & mdptr [ 4 ] , auth_tag , authsize ) ? - EBADMSG : 0 ;
}
kfree ( rxd ) ;
aead_request_complete ( req , err ) ;
}
static int sa_cra_init_aead ( struct crypto_aead * tfm , const char * hash ,
const char * fallback )
{
struct sa_tfm_ctx * ctx = crypto_aead_ctx ( tfm ) ;
struct sa_crypto_data * data = dev_get_drvdata ( sa_k3_dev ) ;
int ret ;
memzero_explicit ( ctx , sizeof ( * ctx ) ) ;
ctx - > shash = crypto_alloc_shash ( hash , 0 , CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( ctx - > shash ) ) {
dev_err ( sa_k3_dev , " base driver %s couldn't be loaded \n " , hash ) ;
return PTR_ERR ( ctx - > shash ) ;
}
ctx - > fallback . aead = crypto_alloc_aead ( fallback , 0 ,
CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( ctx - > fallback . aead ) ) {
dev_err ( sa_k3_dev , " fallback driver %s couldn't be loaded \n " ,
fallback ) ;
return PTR_ERR ( ctx - > fallback . aead ) ;
}
crypto_aead_set_reqsize ( tfm , sizeof ( struct aead_request ) +
crypto_aead_reqsize ( ctx - > fallback . aead ) ) ;
ret = sa_init_ctx_info ( & ctx - > enc , data ) ;
if ( ret )
return ret ;
ret = sa_init_ctx_info ( & ctx - > dec , data ) ;
if ( ret ) {
sa_free_ctx_info ( & ctx - > enc , data ) ;
return ret ;
}
dev_dbg ( sa_k3_dev , " %s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad)) \n " ,
__func__ , tfm , ctx - > enc . sc_id , & ctx - > enc . sc_phys ,
ctx - > dec . sc_id , & ctx - > dec . sc_phys ) ;
return ret ;
}
static int sa_cra_init_aead_sha1 ( struct crypto_aead * tfm )
{
return sa_cra_init_aead ( tfm , " sha1 " ,
" authenc(hmac(sha1-ce),cbc(aes-ce)) " ) ;
}
static int sa_cra_init_aead_sha256 ( struct crypto_aead * tfm )
{
return sa_cra_init_aead ( tfm , " sha256 " ,
" authenc(hmac(sha256-ce),cbc(aes-ce)) " ) ;
}
static void sa_exit_tfm_aead ( struct crypto_aead * tfm )
{
struct sa_tfm_ctx * ctx = crypto_aead_ctx ( tfm ) ;
struct sa_crypto_data * data = dev_get_drvdata ( sa_k3_dev ) ;
crypto_free_shash ( ctx - > shash ) ;
crypto_free_aead ( ctx - > fallback . aead ) ;
sa_free_ctx_info ( & ctx - > enc , data ) ;
sa_free_ctx_info ( & ctx - > dec , data ) ;
}
/* AEAD algorithm configuration interface function */
static int sa_aead_setkey ( struct crypto_aead * authenc ,
const u8 * key , unsigned int keylen ,
struct algo_data * ad )
{
struct sa_tfm_ctx * ctx = crypto_aead_ctx ( authenc ) ;
struct crypto_authenc_keys keys ;
int cmdl_len ;
struct sa_cmdl_cfg cfg ;
int key_idx ;
if ( crypto_authenc_extractkeys ( & keys , key , keylen ) ! = 0 )
return - EINVAL ;
/* Convert the key size (16/24/32) to the key size index (0/1/2) */
key_idx = ( keys . enckeylen > > 3 ) - 2 ;
if ( key_idx > = 3 )
return - EINVAL ;
ad - > ctx = ctx ;
ad - > enc_eng . eng_id = SA_ENG_ID_EM1 ;
ad - > enc_eng . sc_size = SA_CTX_ENC_TYPE1_SZ ;
ad - > auth_eng . eng_id = SA_ENG_ID_AM1 ;
ad - > auth_eng . sc_size = SA_CTX_AUTH_TYPE2_SZ ;
ad - > mci_enc = mci_cbc_enc_no_iv_array [ key_idx ] ;
ad - > mci_dec = mci_cbc_dec_no_iv_array [ key_idx ] ;
ad - > inv_key = true ;
ad - > keyed_mac = true ;
ad - > ealg_id = SA_EALG_ID_AES_CBC ;
ad - > prep_iopad = sa_prepare_iopads ;
memset ( & cfg , 0 , sizeof ( cfg ) ) ;
cfg . enc = true ;
cfg . aalg = ad - > aalg_id ;
cfg . enc_eng_id = ad - > enc_eng . eng_id ;
cfg . auth_eng_id = ad - > auth_eng . eng_id ;
cfg . iv_size = crypto_aead_ivsize ( authenc ) ;
cfg . akey = keys . authkey ;
cfg . akey_len = keys . authkeylen ;
/* Setup Encryption Security Context & Command label template */
if ( sa_init_sc ( & ctx - > enc , keys . enckey , keys . enckeylen ,
keys . authkey , keys . authkeylen ,
ad , 1 , & ctx - > enc . epib [ 1 ] ) )
return - EINVAL ;
cmdl_len = sa_format_cmdl_gen ( & cfg ,
( u8 * ) ctx - > enc . cmdl ,
& ctx - > enc . cmdl_upd_info ) ;
if ( cmdl_len < = 0 | | ( cmdl_len > SA_MAX_CMDL_WORDS * sizeof ( u32 ) ) )
return - EINVAL ;
ctx - > enc . cmdl_size = cmdl_len ;
/* Setup Decryption Security Context & Command label template */
if ( sa_init_sc ( & ctx - > dec , keys . enckey , keys . enckeylen ,
keys . authkey , keys . authkeylen ,
ad , 0 , & ctx - > dec . epib [ 1 ] ) )
return - EINVAL ;
cfg . enc = false ;
cmdl_len = sa_format_cmdl_gen ( & cfg , ( u8 * ) ctx - > dec . cmdl ,
& ctx - > dec . cmdl_upd_info ) ;
if ( cmdl_len < = 0 | | ( cmdl_len > SA_MAX_CMDL_WORDS * sizeof ( u32 ) ) )
return - EINVAL ;
ctx - > dec . cmdl_size = cmdl_len ;
crypto_aead_clear_flags ( ctx - > fallback . aead , CRYPTO_TFM_REQ_MASK ) ;
crypto_aead_set_flags ( ctx - > fallback . aead ,
crypto_aead_get_flags ( authenc ) &
CRYPTO_TFM_REQ_MASK ) ;
crypto_aead_setkey ( ctx - > fallback . aead , key , keylen ) ;
return 0 ;
}
static int sa_aead_setauthsize ( struct crypto_aead * tfm , unsigned int authsize )
{
struct sa_tfm_ctx * ctx = crypto_tfm_ctx ( crypto_aead_tfm ( tfm ) ) ;
return crypto_aead_setauthsize ( ctx - > fallback . aead , authsize ) ;
}
static int sa_aead_cbc_sha1_setkey ( struct crypto_aead * authenc ,
const u8 * key , unsigned int keylen )
{
struct algo_data ad = { 0 } ;
ad . ealg_id = SA_EALG_ID_AES_CBC ;
ad . aalg_id = SA_AALG_ID_HMAC_SHA1 ;
ad . hash_size = SHA1_DIGEST_SIZE ;
ad . auth_ctrl = SA_AUTH_SW_CTRL_SHA1 ;
return sa_aead_setkey ( authenc , key , keylen , & ad ) ;
}
static int sa_aead_cbc_sha256_setkey ( struct crypto_aead * authenc ,
const u8 * key , unsigned int keylen )
{
struct algo_data ad = { 0 } ;
ad . ealg_id = SA_EALG_ID_AES_CBC ;
ad . aalg_id = SA_AALG_ID_HMAC_SHA2_256 ;
ad . hash_size = SHA256_DIGEST_SIZE ;
ad . auth_ctrl = SA_AUTH_SW_CTRL_SHA256 ;
return sa_aead_setkey ( authenc , key , keylen , & ad ) ;
}
static int sa_aead_run ( struct aead_request * req , u8 * iv , int enc )
{
struct crypto_aead * tfm = crypto_aead_reqtfm ( req ) ;
struct sa_tfm_ctx * ctx = crypto_aead_ctx ( tfm ) ;
struct sa_req sa_req = { 0 } ;
size_t auth_size , enc_size ;
enc_size = req - > cryptlen ;
auth_size = req - > assoclen + req - > cryptlen ;
if ( ! enc ) {
enc_size - = crypto_aead_authsize ( tfm ) ;
auth_size - = crypto_aead_authsize ( tfm ) ;
}
if ( auth_size > SA_MAX_DATA_SZ | |
( auth_size > = SA_UNSAFE_DATA_SZ_MIN & &
auth_size < = SA_UNSAFE_DATA_SZ_MAX ) ) {
struct aead_request * subreq = aead_request_ctx ( req ) ;
int ret ;
aead_request_set_tfm ( subreq , ctx - > fallback . aead ) ;
aead_request_set_callback ( subreq , req - > base . flags ,
req - > base . complete , req - > base . data ) ;
aead_request_set_crypt ( subreq , req - > src , req - > dst ,
req - > cryptlen , req - > iv ) ;
aead_request_set_ad ( subreq , req - > assoclen ) ;
ret = enc ? crypto_aead_encrypt ( subreq ) :
crypto_aead_decrypt ( subreq ) ;
return ret ;
}
sa_req . enc_offset = req - > assoclen ;
sa_req . enc_size = enc_size ;
sa_req . auth_size = auth_size ;
sa_req . size = auth_size ;
sa_req . enc_iv = iv ;
sa_req . type = CRYPTO_ALG_TYPE_AEAD ;
sa_req . enc = enc ;
sa_req . callback = sa_aead_dma_in_callback ;
sa_req . mdata_size = 52 ;
sa_req . base = & req - > base ;
sa_req . ctx = ctx ;
sa_req . src = req - > src ;
sa_req . dst = req - > dst ;
return sa_run ( & sa_req ) ;
}
/* AEAD algorithm encrypt interface function */
static int sa_aead_encrypt ( struct aead_request * req )
{
return sa_aead_run ( req , req - > iv , 1 ) ;
}
/* AEAD algorithm decrypt interface function */
static int sa_aead_decrypt ( struct aead_request * req )
{
return sa_aead_run ( req , req - > iv , 0 ) ;
}
2020-07-13 11:34:22 +03:00
static struct sa_alg_tmpl sa_algs [ ] = {
{
. type = CRYPTO_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
. base . cra_name = " cbc(aes) " ,
. base . cra_driver_name = " cbc-aes-sa2ul " ,
. base . cra_priority = 30000 ,
. base . cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct sa_tfm_ctx ) ,
. base . cra_module = THIS_MODULE ,
. init = sa_cipher_cra_init ,
. exit = sa_cipher_cra_exit ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = sa_aes_cbc_setkey ,
. encrypt = sa_encrypt ,
. decrypt = sa_decrypt ,
}
} ,
{
. type = CRYPTO_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
. base . cra_name = " ecb(aes) " ,
. base . cra_driver_name = " ecb-aes-sa2ul " ,
. base . cra_priority = 30000 ,
. base . cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct sa_tfm_ctx ) ,
. base . cra_module = THIS_MODULE ,
. init = sa_cipher_cra_init ,
. exit = sa_cipher_cra_exit ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = sa_aes_ecb_setkey ,
. encrypt = sa_encrypt ,
. decrypt = sa_decrypt ,
}
} ,
{
. type = CRYPTO_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
. base . cra_name = " cbc(des3_ede) " ,
. base . cra_driver_name = " cbc-des3-sa2ul " ,
. base . cra_priority = 30000 ,
. base . cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. base . cra_blocksize = DES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct sa_tfm_ctx ) ,
. base . cra_module = THIS_MODULE ,
. init = sa_cipher_cra_init ,
. exit = sa_cipher_cra_exit ,
. min_keysize = 3 * DES_KEY_SIZE ,
. max_keysize = 3 * DES_KEY_SIZE ,
. ivsize = DES_BLOCK_SIZE ,
. setkey = sa_3des_cbc_setkey ,
. encrypt = sa_encrypt ,
. decrypt = sa_decrypt ,
}
} ,
{
. type = CRYPTO_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
. base . cra_name = " ecb(des3_ede) " ,
. base . cra_driver_name = " ecb-des3-sa2ul " ,
. base . cra_priority = 30000 ,
. base . cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. base . cra_blocksize = DES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct sa_tfm_ctx ) ,
. base . cra_module = THIS_MODULE ,
. init = sa_cipher_cra_init ,
. exit = sa_cipher_cra_exit ,
. min_keysize = 3 * DES_KEY_SIZE ,
. max_keysize = 3 * DES_KEY_SIZE ,
. setkey = sa_3des_ecb_setkey ,
. encrypt = sa_encrypt ,
. decrypt = sa_decrypt ,
}
} ,
2020-07-13 11:34:23 +03:00
{
. type = CRYPTO_ALG_TYPE_AHASH ,
. alg . ahash = {
. halg . base = {
. cra_name = " sha1 " ,
. cra_driver_name = " sha1-sa2ul " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sa_tfm_ctx ) ,
. cra_module = THIS_MODULE ,
. cra_init = sa_sha1_cra_init ,
. cra_exit = sa_sha_cra_exit ,
} ,
. halg . digestsize = SHA1_DIGEST_SIZE ,
. halg . statesize = sizeof ( struct sa_sha_req_ctx ) +
sizeof ( struct sha1_state ) ,
. init = sa_sha_init ,
. update = sa_sha_update ,
. final = sa_sha_final ,
. finup = sa_sha_finup ,
. digest = sa_sha_digest ,
. export = sa_sha_export ,
. import = sa_sha_import ,
} ,
} ,
{
. type = CRYPTO_ALG_TYPE_AHASH ,
. alg . ahash = {
. halg . base = {
. cra_name = " sha256 " ,
. cra_driver_name = " sha256-sa2ul " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA256_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sa_tfm_ctx ) ,
. cra_module = THIS_MODULE ,
. cra_init = sa_sha256_cra_init ,
. cra_exit = sa_sha_cra_exit ,
} ,
. halg . digestsize = SHA256_DIGEST_SIZE ,
. halg . statesize = sizeof ( struct sa_sha_req_ctx ) +
sizeof ( struct sha256_state ) ,
. init = sa_sha_init ,
. update = sa_sha_update ,
. final = sa_sha_final ,
. finup = sa_sha_finup ,
. digest = sa_sha_digest ,
. export = sa_sha_export ,
. import = sa_sha_import ,
} ,
} ,
{
. type = CRYPTO_ALG_TYPE_AHASH ,
. alg . ahash = {
. halg . base = {
. cra_name = " sha512 " ,
. cra_driver_name = " sha512-sa2ul " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA512_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sa_tfm_ctx ) ,
. cra_module = THIS_MODULE ,
. cra_init = sa_sha512_cra_init ,
. cra_exit = sa_sha_cra_exit ,
} ,
. halg . digestsize = SHA512_DIGEST_SIZE ,
. halg . statesize = sizeof ( struct sa_sha_req_ctx ) +
sizeof ( struct sha512_state ) ,
. init = sa_sha_init ,
. update = sa_sha_update ,
. final = sa_sha_final ,
. finup = sa_sha_finup ,
. digest = sa_sha_digest ,
. export = sa_sha_export ,
. import = sa_sha_import ,
} ,
} ,
2020-07-13 11:34:24 +03:00
{
. type = CRYPTO_ALG_TYPE_AEAD ,
. alg . aead = {
. base = {
. cra_name = " authenc(hmac(sha1),cbc(aes)) " ,
. cra_driver_name =
" authenc(hmac(sha1),cbc(aes))-sa2ul " ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_flags = CRYPTO_ALG_TYPE_AEAD |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_ctxsize = sizeof ( struct sa_tfm_ctx ) ,
. cra_module = THIS_MODULE ,
. cra_priority = 3000 ,
} ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
. init = sa_cra_init_aead_sha1 ,
. exit = sa_exit_tfm_aead ,
. setkey = sa_aead_cbc_sha1_setkey ,
. setauthsize = sa_aead_setauthsize ,
. encrypt = sa_aead_encrypt ,
. decrypt = sa_aead_decrypt ,
} ,
} ,
{
. type = CRYPTO_ALG_TYPE_AEAD ,
. alg . aead = {
. base = {
. cra_name = " authenc(hmac(sha256),cbc(aes)) " ,
. cra_driver_name =
" authenc(hmac(sha256),cbc(aes))-sa2ul " ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_flags = CRYPTO_ALG_TYPE_AEAD |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_ctxsize = sizeof ( struct sa_tfm_ctx ) ,
. cra_module = THIS_MODULE ,
. cra_alignmask = 0 ,
. cra_priority = 3000 ,
} ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
. init = sa_cra_init_aead_sha256 ,
. exit = sa_exit_tfm_aead ,
. setkey = sa_aead_cbc_sha256_setkey ,
. setauthsize = sa_aead_setauthsize ,
. encrypt = sa_aead_encrypt ,
. decrypt = sa_aead_decrypt ,
} ,
} ,
2020-07-13 11:34:22 +03:00
} ;
/* Register the algorithms in crypto framework */
static void sa_register_algos ( const struct device * dev )
{
char * alg_name ;
u32 type ;
int i , err ;
for ( i = 0 ; i < ARRAY_SIZE ( sa_algs ) ; i + + ) {
type = sa_algs [ i ] . type ;
if ( type = = CRYPTO_ALG_TYPE_SKCIPHER ) {
alg_name = sa_algs [ i ] . alg . skcipher . base . cra_name ;
err = crypto_register_skcipher ( & sa_algs [ i ] . alg . skcipher ) ;
2020-07-13 11:34:23 +03:00
} else if ( type = = CRYPTO_ALG_TYPE_AHASH ) {
alg_name = sa_algs [ i ] . alg . ahash . halg . base . cra_name ;
err = crypto_register_ahash ( & sa_algs [ i ] . alg . ahash ) ;
2020-07-13 11:34:24 +03:00
} else if ( type = = CRYPTO_ALG_TYPE_AEAD ) {
alg_name = sa_algs [ i ] . alg . aead . base . cra_name ;
err = crypto_register_aead ( & sa_algs [ i ] . alg . aead ) ;
2020-07-13 11:34:22 +03:00
} else {
dev_err ( dev ,
" un-supported crypto algorithm (%d) " ,
sa_algs [ i ] . type ) ;
continue ;
}
if ( err )
dev_err ( dev , " Failed to register '%s' \n " , alg_name ) ;
else
sa_algs [ i ] . registered = true ;
}
}
/* Unregister the algorithms in crypto framework */
static void sa_unregister_algos ( const struct device * dev )
{
u32 type ;
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( sa_algs ) ; i + + ) {
type = sa_algs [ i ] . type ;
if ( ! sa_algs [ i ] . registered )
continue ;
if ( type = = CRYPTO_ALG_TYPE_SKCIPHER )
crypto_unregister_skcipher ( & sa_algs [ i ] . alg . skcipher ) ;
2020-07-13 11:34:23 +03:00
else if ( type = = CRYPTO_ALG_TYPE_AHASH )
crypto_unregister_ahash ( & sa_algs [ i ] . alg . ahash ) ;
2020-07-13 11:34:24 +03:00
else if ( type = = CRYPTO_ALG_TYPE_AEAD )
crypto_unregister_aead ( & sa_algs [ i ] . alg . aead ) ;
2020-07-13 11:34:22 +03:00
sa_algs [ i ] . registered = false ;
}
}
static int sa_init_mem ( struct sa_crypto_data * dev_data )
{
struct device * dev = & dev_data - > pdev - > dev ;
/* Setup dma pool for security context buffers */
dev_data - > sc_pool = dma_pool_create ( " keystone-sc " , dev ,
SA_CTX_MAX_SZ , 64 , 0 ) ;
if ( ! dev_data - > sc_pool ) {
dev_err ( dev , " Failed to create dma pool " ) ;
return - ENOMEM ;
}
return 0 ;
}
static int sa_dma_init ( struct sa_crypto_data * dd )
{
int ret ;
struct dma_slave_config cfg ;
dd - > dma_rx1 = NULL ;
dd - > dma_tx = NULL ;
dd - > dma_rx2 = NULL ;
ret = dma_coerce_mask_and_coherent ( dd - > dev , DMA_BIT_MASK ( 48 ) ) ;
if ( ret )
return ret ;
dd - > dma_rx1 = dma_request_chan ( dd - > dev , " rx1 " ) ;
if ( IS_ERR ( dd - > dma_rx1 ) ) {
if ( PTR_ERR ( dd - > dma_rx1 ) ! = - EPROBE_DEFER )
dev_err ( dd - > dev , " Unable to request rx1 DMA channel \n " ) ;
return PTR_ERR ( dd - > dma_rx1 ) ;
}
dd - > dma_rx2 = dma_request_chan ( dd - > dev , " rx2 " ) ;
if ( IS_ERR ( dd - > dma_rx2 ) ) {
dma_release_channel ( dd - > dma_rx1 ) ;
if ( PTR_ERR ( dd - > dma_rx2 ) ! = - EPROBE_DEFER )
dev_err ( dd - > dev , " Unable to request rx2 DMA channel \n " ) ;
return PTR_ERR ( dd - > dma_rx2 ) ;
}
dd - > dma_tx = dma_request_chan ( dd - > dev , " tx " ) ;
if ( IS_ERR ( dd - > dma_tx ) ) {
2020-07-24 19:50:27 +03:00
if ( PTR_ERR ( dd - > dma_tx ) ! = - EPROBE_DEFER )
2020-07-13 11:34:22 +03:00
dev_err ( dd - > dev , " Unable to request tx DMA channel \n " ) ;
ret = PTR_ERR ( dd - > dma_tx ) ;
goto err_dma_tx ;
}
memzero_explicit ( & cfg , sizeof ( cfg ) ) ;
cfg . src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
cfg . dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
cfg . src_maxburst = 4 ;
cfg . dst_maxburst = 4 ;
ret = dmaengine_slave_config ( dd - > dma_rx1 , & cfg ) ;
if ( ret ) {
dev_err ( dd - > dev , " can't configure IN dmaengine slave: %d \n " ,
ret ) ;
return ret ;
}
ret = dmaengine_slave_config ( dd - > dma_rx2 , & cfg ) ;
if ( ret ) {
dev_err ( dd - > dev , " can't configure IN dmaengine slave: %d \n " ,
ret ) ;
return ret ;
}
ret = dmaengine_slave_config ( dd - > dma_tx , & cfg ) ;
if ( ret ) {
dev_err ( dd - > dev , " can't configure OUT dmaengine slave: %d \n " ,
ret ) ;
return ret ;
}
return 0 ;
err_dma_tx :
dma_release_channel ( dd - > dma_rx1 ) ;
dma_release_channel ( dd - > dma_rx2 ) ;
return ret ;
}
2020-07-13 11:34:25 +03:00
static int sa_link_child ( struct device * dev , void * data )
{
struct device * parent = data ;
device_link_add ( dev , parent , DL_FLAG_AUTOPROBE_CONSUMER ) ;
return 0 ;
}
2020-07-13 11:34:22 +03:00
static int sa_ul_probe ( struct platform_device * pdev )
{
struct device * dev = & pdev - > dev ;
struct device_node * node = dev - > of_node ;
struct resource * res ;
static void __iomem * saul_base ;
struct sa_crypto_data * dev_data ;
u32 val ;
int ret ;
dev_data = devm_kzalloc ( dev , sizeof ( * dev_data ) , GFP_KERNEL ) ;
if ( ! dev_data )
return - ENOMEM ;
sa_k3_dev = dev ;
dev_data - > dev = dev ;
dev_data - > pdev = pdev ;
platform_set_drvdata ( pdev , dev_data ) ;
dev_set_drvdata ( sa_k3_dev , dev_data ) ;
pm_runtime_enable ( dev ) ;
ret = pm_runtime_get_sync ( dev ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " %s: failed to get sync: %d \n " , __func__ ,
ret ) ;
return ret ;
}
sa_init_mem ( dev_data ) ;
ret = sa_dma_init ( dev_data ) ;
if ( ret )
goto disable_pm_runtime ;
spin_lock_init ( & dev_data - > scid_lock ) ;
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
saul_base = devm_ioremap_resource ( dev , res ) ;
dev_data - > base = saul_base ;
val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
SA_EEC_TRNG_EN ;
writel_relaxed ( val , saul_base + SA_ENGINE_ENABLE_CONTROL ) ;
sa_register_algos ( dev ) ;
ret = of_platform_populate ( node , NULL , NULL , & pdev - > dev ) ;
if ( ret )
goto release_dma ;
2020-07-13 11:34:25 +03:00
device_for_each_child ( & pdev - > dev , & pdev - > dev , sa_link_child ) ;
2020-07-13 11:34:22 +03:00
return 0 ;
release_dma :
sa_unregister_algos ( & pdev - > dev ) ;
dma_release_channel ( dev_data - > dma_rx2 ) ;
dma_release_channel ( dev_data - > dma_rx1 ) ;
dma_release_channel ( dev_data - > dma_tx ) ;
dma_pool_destroy ( dev_data - > sc_pool ) ;
disable_pm_runtime :
pm_runtime_put_sync ( & pdev - > dev ) ;
pm_runtime_disable ( & pdev - > dev ) ;
return ret ;
}
static int sa_ul_remove ( struct platform_device * pdev )
{
struct sa_crypto_data * dev_data = platform_get_drvdata ( pdev ) ;
sa_unregister_algos ( & pdev - > dev ) ;
dma_release_channel ( dev_data - > dma_rx2 ) ;
dma_release_channel ( dev_data - > dma_rx1 ) ;
dma_release_channel ( dev_data - > dma_tx ) ;
dma_pool_destroy ( dev_data - > sc_pool ) ;
platform_set_drvdata ( pdev , NULL ) ;
pm_runtime_put_sync ( & pdev - > dev ) ;
pm_runtime_disable ( & pdev - > dev ) ;
return 0 ;
}
static const struct of_device_id of_match [ ] = {
{ . compatible = " ti,j721e-sa2ul " , } ,
{ . compatible = " ti,am654-sa2ul " , } ,
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , of_match ) ;
static struct platform_driver sa_ul_driver = {
. probe = sa_ul_probe ,
. remove = sa_ul_remove ,
. driver = {
. name = " saul-crypto " ,
. of_match_table = of_match ,
} ,
} ;
module_platform_driver ( sa_ul_driver ) ;
MODULE_LICENSE ( " GPL v2 " ) ;