2018-07-23 16:49:54 +01:00
// SPDX-License-Identifier: GPL-2.0
2021-03-26 16:42:39 +08:00
/* Copyright (c) 2016-2017 HiSilicon Limited. */
2018-07-23 16:49:54 +01:00
# include <linux/crypto.h>
# include <linux/dma-mapping.h>
# include <linux/dmapool.h>
# include <linux/module.h>
# include <linux/mutex.h>
# include <linux/slab.h>
# include <crypto/aes.h>
# include <crypto/algapi.h>
2019-08-15 12:00:54 +03:00
# include <crypto/internal/des.h>
2018-07-23 16:49:54 +01:00
# include <crypto/skcipher.h>
# include <crypto/xts.h>
# include <crypto/internal/skcipher.h>
# include "sec_drv.h"
# define SEC_MAX_CIPHER_KEY 64
# define SEC_REQ_LIMIT SZ_32M
struct sec_c_alg_cfg {
unsigned c_alg : 3 ;
unsigned c_mode : 3 ;
unsigned key_len : 2 ;
unsigned c_width : 2 ;
} ;
static const struct sec_c_alg_cfg sec_c_alg_cfgs [ ] = {
[ SEC_C_DES_ECB_64 ] = {
. c_alg = SEC_C_ALG_DES ,
. c_mode = SEC_C_MODE_ECB ,
. key_len = SEC_KEY_LEN_DES ,
} ,
[ SEC_C_DES_CBC_64 ] = {
. c_alg = SEC_C_ALG_DES ,
. c_mode = SEC_C_MODE_CBC ,
. key_len = SEC_KEY_LEN_DES ,
} ,
[ SEC_C_3DES_ECB_192_3KEY ] = {
. c_alg = SEC_C_ALG_3DES ,
. c_mode = SEC_C_MODE_ECB ,
. key_len = SEC_KEY_LEN_3DES_3_KEY ,
} ,
[ SEC_C_3DES_ECB_192_2KEY ] = {
. c_alg = SEC_C_ALG_3DES ,
. c_mode = SEC_C_MODE_ECB ,
. key_len = SEC_KEY_LEN_3DES_2_KEY ,
} ,
[ SEC_C_3DES_CBC_192_3KEY ] = {
. c_alg = SEC_C_ALG_3DES ,
. c_mode = SEC_C_MODE_CBC ,
. key_len = SEC_KEY_LEN_3DES_3_KEY ,
} ,
[ SEC_C_3DES_CBC_192_2KEY ] = {
. c_alg = SEC_C_ALG_3DES ,
. c_mode = SEC_C_MODE_CBC ,
. key_len = SEC_KEY_LEN_3DES_2_KEY ,
} ,
[ SEC_C_AES_ECB_128 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_ECB ,
. key_len = SEC_KEY_LEN_AES_128 ,
} ,
[ SEC_C_AES_ECB_192 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_ECB ,
. key_len = SEC_KEY_LEN_AES_192 ,
} ,
[ SEC_C_AES_ECB_256 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_ECB ,
. key_len = SEC_KEY_LEN_AES_256 ,
} ,
[ SEC_C_AES_CBC_128 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_CBC ,
. key_len = SEC_KEY_LEN_AES_128 ,
} ,
[ SEC_C_AES_CBC_192 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_CBC ,
. key_len = SEC_KEY_LEN_AES_192 ,
} ,
[ SEC_C_AES_CBC_256 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_CBC ,
. key_len = SEC_KEY_LEN_AES_256 ,
} ,
[ SEC_C_AES_CTR_128 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_CTR ,
. key_len = SEC_KEY_LEN_AES_128 ,
} ,
[ SEC_C_AES_CTR_192 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_CTR ,
. key_len = SEC_KEY_LEN_AES_192 ,
} ,
[ SEC_C_AES_CTR_256 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_CTR ,
. key_len = SEC_KEY_LEN_AES_256 ,
} ,
[ SEC_C_AES_XTS_128 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_XTS ,
. key_len = SEC_KEY_LEN_AES_128 ,
} ,
[ SEC_C_AES_XTS_256 ] = {
. c_alg = SEC_C_ALG_AES ,
. c_mode = SEC_C_MODE_XTS ,
. key_len = SEC_KEY_LEN_AES_256 ,
} ,
[ SEC_C_NULL ] = {
} ,
} ;
/*
* Mutex used to ensure safe operation of reference count of
* alg providers
*/
static DEFINE_MUTEX ( algs_lock ) ;
static unsigned int active_devs ;
static void sec_alg_skcipher_init_template ( struct sec_alg_tfm_ctx * ctx ,
struct sec_bd_info * req ,
enum sec_cipher_alg alg )
{
const struct sec_c_alg_cfg * cfg = & sec_c_alg_cfgs [ alg ] ;
memset ( req , 0 , sizeof ( * req ) ) ;
req - > w0 | = cfg - > c_mode < < SEC_BD_W0_C_MODE_S ;
req - > w1 | = cfg - > c_alg < < SEC_BD_W1_C_ALG_S ;
req - > w3 | = cfg - > key_len < < SEC_BD_W3_C_KEY_LEN_S ;
req - > w0 | = cfg - > c_width < < SEC_BD_W0_C_WIDTH_S ;
req - > cipher_key_addr_lo = lower_32_bits ( ctx - > pkey ) ;
req - > cipher_key_addr_hi = upper_32_bits ( ctx - > pkey ) ;
}
static void sec_alg_skcipher_init_context ( struct crypto_skcipher * atfm ,
const u8 * key ,
unsigned int keylen ,
enum sec_cipher_alg alg )
{
struct crypto_tfm * tfm = crypto_skcipher_tfm ( atfm ) ;
struct sec_alg_tfm_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
ctx - > cipher_alg = alg ;
memcpy ( ctx - > key , key , keylen ) ;
sec_alg_skcipher_init_template ( ctx , & ctx - > req_template ,
ctx - > cipher_alg ) ;
}
2019-09-15 17:31:14 +08:00
static void sec_free_hw_sgl ( struct sec_hw_sgl * hw_sgl ,
dma_addr_t psec_sgl , struct sec_dev_info * info )
{
struct sec_hw_sgl * sgl_current , * sgl_next ;
dma_addr_t sgl_next_dma ;
sgl_current = hw_sgl ;
while ( sgl_current ) {
sgl_next = sgl_current - > next ;
sgl_next_dma = sgl_current - > next_sgl ;
dma_pool_free ( info - > hw_sgl_pool , sgl_current , psec_sgl ) ;
sgl_current = sgl_next ;
psec_sgl = sgl_next_dma ;
}
}
2018-07-23 16:49:54 +01:00
static int sec_alloc_and_fill_hw_sgl ( struct sec_hw_sgl * * sec_sgl ,
dma_addr_t * psec_sgl ,
struct scatterlist * sgl ,
int count ,
2020-06-17 09:49:52 -04:00
struct sec_dev_info * info ,
gfp_t gfp )
2018-07-23 16:49:54 +01:00
{
struct sec_hw_sgl * sgl_current = NULL ;
struct sec_hw_sgl * sgl_next ;
dma_addr_t sgl_next_dma ;
struct scatterlist * sg ;
int ret , sge_index , i ;
if ( ! count )
return - EINVAL ;
for_each_sg ( sgl , sg , count , i ) {
sge_index = i % SEC_MAX_SGE_NUM ;
if ( sge_index = = 0 ) {
sgl_next = dma_pool_zalloc ( info - > hw_sgl_pool ,
2020-06-17 09:49:52 -04:00
gfp , & sgl_next_dma ) ;
2018-07-23 16:49:54 +01:00
if ( ! sgl_next ) {
ret = - ENOMEM ;
goto err_free_hw_sgls ;
}
if ( ! sgl_current ) { /* First one */
* psec_sgl = sgl_next_dma ;
* sec_sgl = sgl_next ;
} else { /* Chained */
sgl_current - > entry_sum_in_sgl = SEC_MAX_SGE_NUM ;
sgl_current - > next_sgl = sgl_next_dma ;
sgl_current - > next = sgl_next ;
}
sgl_current = sgl_next ;
}
sgl_current - > sge_entries [ sge_index ] . buf = sg_dma_address ( sg ) ;
sgl_current - > sge_entries [ sge_index ] . len = sg_dma_len ( sg ) ;
sgl_current - > data_bytes_in_sgl + = sg_dma_len ( sg ) ;
}
sgl_current - > entry_sum_in_sgl = count % SEC_MAX_SGE_NUM ;
sgl_current - > next_sgl = 0 ;
( * sec_sgl ) - > entry_sum_in_chain = count ;
return 0 ;
err_free_hw_sgls :
2019-09-15 17:31:14 +08:00
sec_free_hw_sgl ( * sec_sgl , * psec_sgl , info ) ;
2018-07-23 16:49:54 +01:00
* psec_sgl = 0 ;
return ret ;
}
static int sec_alg_skcipher_setkey ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen ,
enum sec_cipher_alg alg )
{
struct sec_alg_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
struct device * dev = ctx - > queue - > dev_info - > dev ;
mutex_lock ( & ctx - > lock ) ;
if ( ctx - > key ) {
/* rekeying */
memset ( ctx - > key , 0 , SEC_MAX_CIPHER_KEY ) ;
} else {
/* new key */
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-01-04 09:23:09 +01:00
ctx - > key = dma_alloc_coherent ( dev , SEC_MAX_CIPHER_KEY ,
& ctx - > pkey , GFP_KERNEL ) ;
2018-07-23 16:49:54 +01:00
if ( ! ctx - > key ) {
mutex_unlock ( & ctx - > lock ) ;
return - ENOMEM ;
}
}
mutex_unlock ( & ctx - > lock ) ;
sec_alg_skcipher_init_context ( tfm , key , keylen , alg ) ;
return 0 ;
}
static int sec_alg_skcipher_setkey_aes_ecb ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
enum sec_cipher_alg alg ;
switch ( keylen ) {
case AES_KEYSIZE_128 :
alg = SEC_C_AES_ECB_128 ;
break ;
case AES_KEYSIZE_192 :
alg = SEC_C_AES_ECB_192 ;
break ;
case AES_KEYSIZE_256 :
alg = SEC_C_AES_ECB_256 ;
break ;
default :
return - EINVAL ;
}
return sec_alg_skcipher_setkey ( tfm , key , keylen , alg ) ;
}
static int sec_alg_skcipher_setkey_aes_cbc ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
enum sec_cipher_alg alg ;
switch ( keylen ) {
case AES_KEYSIZE_128 :
alg = SEC_C_AES_CBC_128 ;
break ;
case AES_KEYSIZE_192 :
alg = SEC_C_AES_CBC_192 ;
break ;
case AES_KEYSIZE_256 :
alg = SEC_C_AES_CBC_256 ;
break ;
default :
return - EINVAL ;
}
return sec_alg_skcipher_setkey ( tfm , key , keylen , alg ) ;
}
static int sec_alg_skcipher_setkey_aes_ctr ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
enum sec_cipher_alg alg ;
switch ( keylen ) {
case AES_KEYSIZE_128 :
alg = SEC_C_AES_CTR_128 ;
break ;
case AES_KEYSIZE_192 :
alg = SEC_C_AES_CTR_192 ;
break ;
case AES_KEYSIZE_256 :
alg = SEC_C_AES_CTR_256 ;
break ;
default :
return - EINVAL ;
}
return sec_alg_skcipher_setkey ( tfm , key , keylen , alg ) ;
}
static int sec_alg_skcipher_setkey_aes_xts ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
enum sec_cipher_alg alg ;
int ret ;
ret = xts_verify_key ( tfm , key , keylen ) ;
if ( ret )
return ret ;
switch ( keylen ) {
case AES_KEYSIZE_128 * 2 :
alg = SEC_C_AES_XTS_128 ;
break ;
case AES_KEYSIZE_256 * 2 :
alg = SEC_C_AES_XTS_256 ;
break ;
default :
return - EINVAL ;
}
return sec_alg_skcipher_setkey ( tfm , key , keylen , alg ) ;
}
static int sec_alg_skcipher_setkey_des_ecb ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
2019-08-15 12:00:54 +03:00
return verify_skcipher_des_key ( tfm , key ) ? :
sec_alg_skcipher_setkey ( tfm , key , keylen , SEC_C_DES_ECB_64 ) ;
2018-07-23 16:49:54 +01:00
}
static int sec_alg_skcipher_setkey_des_cbc ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
2019-08-15 12:00:54 +03:00
return verify_skcipher_des_key ( tfm , key ) ? :
sec_alg_skcipher_setkey ( tfm , key , keylen , SEC_C_DES_CBC_64 ) ;
2018-07-23 16:49:54 +01:00
}
static int sec_alg_skcipher_setkey_3des_ecb ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
2019-08-15 12:00:54 +03:00
return verify_skcipher_des3_key ( tfm , key ) ? :
2019-04-11 16:51:08 +08:00
sec_alg_skcipher_setkey ( tfm , key , keylen ,
2018-07-23 16:49:54 +01:00
SEC_C_3DES_ECB_192_3KEY ) ;
}
static int sec_alg_skcipher_setkey_3des_cbc ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
2019-08-15 12:00:54 +03:00
return verify_skcipher_des3_key ( tfm , key ) ? :
2019-04-11 16:51:08 +08:00
sec_alg_skcipher_setkey ( tfm , key , keylen ,
2018-07-23 16:49:54 +01:00
SEC_C_3DES_CBC_192_3KEY ) ;
}
static void sec_alg_free_el ( struct sec_request_el * el ,
struct sec_dev_info * info )
{
sec_free_hw_sgl ( el - > out , el - > dma_out , info ) ;
sec_free_hw_sgl ( el - > in , el - > dma_in , info ) ;
kfree ( el - > sgl_in ) ;
kfree ( el - > sgl_out ) ;
kfree ( el ) ;
}
/* queuelock must be held */
2018-08-04 06:21:01 +08:00
static int sec_send_request ( struct sec_request * sec_req , struct sec_queue * queue )
2018-07-23 16:49:54 +01:00
{
struct sec_request_el * el , * temp ;
int ret = 0 ;
mutex_lock ( & sec_req - > lock ) ;
list_for_each_entry_safe ( el , temp , & sec_req - > elements , head ) {
/*
* Add to hardware queue only under following circumstances
* 1 ) Software and hardware queue empty so no chain dependencies
* 2 ) No dependencies as new IV - ( check software queue empty
* to maintain order )
* 3 ) No dependencies because the mode does no chaining .
*
* In other cases first insert onto the software queue which
* is then emptied as requests complete
*/
if ( ! queue - > havesoftqueue | |
( kfifo_is_empty ( & queue - > softqueue ) & &
sec_queue_empty ( queue ) ) ) {
ret = sec_queue_send ( queue , & el - > req , sec_req ) ;
if ( ret = = - EAGAIN ) {
/* Wait unti we can send then try again */
/* DEAD if here - should not happen */
ret = - EBUSY ;
goto err_unlock ;
}
} else {
kfifo_put ( & queue - > softqueue , el ) ;
}
}
err_unlock :
mutex_unlock ( & sec_req - > lock ) ;
return ret ;
}
static void sec_skcipher_alg_callback ( struct sec_bd_info * sec_resp ,
struct crypto_async_request * req_base )
{
struct skcipher_request * skreq = container_of ( req_base ,
struct skcipher_request ,
base ) ;
struct sec_request * sec_req = skcipher_request_ctx ( skreq ) ;
struct sec_request * backlog_req ;
struct sec_request_el * sec_req_el , * nextrequest ;
struct sec_alg_tfm_ctx * ctx = sec_req - > tfm_ctx ;
struct crypto_skcipher * atfm = crypto_skcipher_reqtfm ( skreq ) ;
struct device * dev = ctx - > queue - > dev_info - > dev ;
int icv_or_skey_en , ret ;
bool done ;
sec_req_el = list_first_entry ( & sec_req - > elements , struct sec_request_el ,
head ) ;
icv_or_skey_en = ( sec_resp - > w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M ) > >
SEC_BD_W0_ICV_OR_SKEY_EN_S ;
if ( sec_resp - > w1 & SEC_BD_W1_BD_INVALID | | icv_or_skey_en = = 3 ) {
dev_err ( dev , " Got an invalid answer %lu %d \n " ,
sec_resp - > w1 & SEC_BD_W1_BD_INVALID ,
icv_or_skey_en ) ;
sec_req - > err = - EINVAL ;
/*
* We need to muddle on to avoid getting stuck with elements
* on the queue . Error will be reported so requester so
* it should be able to handle appropriately .
*/
}
2022-07-01 09:59:54 +08:00
spin_lock_bh ( & ctx - > queue - > queuelock ) ;
2018-07-23 16:49:54 +01:00
/* Put the IV in place for chained cases */
switch ( ctx - > cipher_alg ) {
case SEC_C_AES_CBC_128 :
case SEC_C_AES_CBC_192 :
case SEC_C_AES_CBC_256 :
if ( sec_req_el - > req . w0 & SEC_BD_W0_DE )
sg_pcopy_to_buffer ( sec_req_el - > sgl_out ,
sg_nents ( sec_req_el - > sgl_out ) ,
skreq - > iv ,
crypto_skcipher_ivsize ( atfm ) ,
sec_req_el - > el_length -
crypto_skcipher_ivsize ( atfm ) ) ;
else
sg_pcopy_to_buffer ( sec_req_el - > sgl_in ,
sg_nents ( sec_req_el - > sgl_in ) ,
skreq - > iv ,
crypto_skcipher_ivsize ( atfm ) ,
sec_req_el - > el_length -
crypto_skcipher_ivsize ( atfm ) ) ;
/* No need to sync to the device as coherent DMA */
break ;
case SEC_C_AES_CTR_128 :
case SEC_C_AES_CTR_192 :
case SEC_C_AES_CTR_256 :
crypto_inc ( skreq - > iv , 16 ) ;
break ;
default :
/* Do not update */
break ;
}
if ( ctx - > queue - > havesoftqueue & &
! kfifo_is_empty ( & ctx - > queue - > softqueue ) & &
sec_queue_empty ( ctx - > queue ) ) {
ret = kfifo_get ( & ctx - > queue - > softqueue , & nextrequest ) ;
if ( ret < = 0 )
dev_err ( dev ,
" Error getting next element from kfifo %d \n " ,
ret ) ;
else
/* We know there is space so this cannot fail */
sec_queue_send ( ctx - > queue , & nextrequest - > req ,
nextrequest - > sec_req ) ;
} else if ( ! list_empty ( & ctx - > backlog ) ) {
/* Need to verify there is room first */
backlog_req = list_first_entry ( & ctx - > backlog ,
typeof ( * backlog_req ) ,
backlog_head ) ;
if ( sec_queue_can_enqueue ( ctx - > queue ,
backlog_req - > num_elements ) | |
( ctx - > queue - > havesoftqueue & &
kfifo_avail ( & ctx - > queue - > softqueue ) >
backlog_req - > num_elements ) ) {
sec_send_request ( backlog_req , ctx - > queue ) ;
2023-01-31 16:02:25 +08:00
crypto_request_complete ( backlog_req - > req_base ,
- EINPROGRESS ) ;
2018-07-23 16:49:54 +01:00
list_del ( & backlog_req - > backlog_head ) ;
}
}
2022-07-01 09:59:54 +08:00
spin_unlock_bh ( & ctx - > queue - > queuelock ) ;
2018-07-23 16:49:54 +01:00
mutex_lock ( & sec_req - > lock ) ;
list_del ( & sec_req_el - > head ) ;
mutex_unlock ( & sec_req - > lock ) ;
sec_alg_free_el ( sec_req_el , ctx - > queue - > dev_info ) ;
/*
* Request is done .
* The dance is needed as the lock is freed in the completion
*/
mutex_lock ( & sec_req - > lock ) ;
done = list_empty ( & sec_req - > elements ) ;
mutex_unlock ( & sec_req - > lock ) ;
if ( done ) {
if ( crypto_skcipher_ivsize ( atfm ) ) {
dma_unmap_single ( dev , sec_req - > dma_iv ,
crypto_skcipher_ivsize ( atfm ) ,
DMA_TO_DEVICE ) ;
}
dma_unmap_sg ( dev , skreq - > src , sec_req - > len_in ,
DMA_BIDIRECTIONAL ) ;
if ( skreq - > src ! = skreq - > dst )
dma_unmap_sg ( dev , skreq - > dst , sec_req - > len_out ,
DMA_BIDIRECTIONAL ) ;
2023-01-31 16:02:25 +08:00
skcipher_request_complete ( skreq , sec_req - > err ) ;
2018-07-23 16:49:54 +01:00
}
}
void sec_alg_callback ( struct sec_bd_info * resp , void * shadow )
{
struct sec_request * sec_req = shadow ;
sec_req - > cb ( resp , sec_req - > req_base ) ;
}
static int sec_alg_alloc_and_calc_split_sizes ( int length , size_t * * split_sizes ,
2020-06-17 09:49:52 -04:00
int * steps , gfp_t gfp )
2018-07-23 16:49:54 +01:00
{
size_t * sizes ;
int i ;
/* Split into suitable sized blocks */
* steps = roundup ( length , SEC_REQ_LIMIT ) / SEC_REQ_LIMIT ;
2020-06-17 09:49:52 -04:00
sizes = kcalloc ( * steps , sizeof ( * sizes ) , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( ! sizes )
return - ENOMEM ;
for ( i = 0 ; i < * steps - 1 ; i + + )
sizes [ i ] = SEC_REQ_LIMIT ;
sizes [ * steps - 1 ] = length - SEC_REQ_LIMIT * ( * steps - 1 ) ;
* split_sizes = sizes ;
return 0 ;
}
static int sec_map_and_split_sg ( struct scatterlist * sgl , size_t * split_sizes ,
int steps , struct scatterlist * * * splits ,
int * * splits_nents ,
int sgl_len_in ,
2020-06-17 09:49:52 -04:00
struct device * dev , gfp_t gfp )
2018-07-23 16:49:54 +01:00
{
int ret , count ;
count = dma_map_sg ( dev , sgl , sgl_len_in , DMA_BIDIRECTIONAL ) ;
if ( ! count )
return - EINVAL ;
2020-06-17 09:49:52 -04:00
* splits = kcalloc ( steps , sizeof ( struct scatterlist * ) , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( ! * splits ) {
ret = - ENOMEM ;
goto err_unmap_sg ;
}
2020-06-17 09:49:52 -04:00
* splits_nents = kcalloc ( steps , sizeof ( int ) , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( ! * splits_nents ) {
ret = - ENOMEM ;
goto err_free_splits ;
}
/* output the scatter list before and after this */
ret = sg_split ( sgl , count , 0 , steps , split_sizes ,
2020-06-17 09:49:52 -04:00
* splits , * splits_nents , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( ret ) {
ret = - ENOMEM ;
goto err_free_splits_nents ;
}
return 0 ;
err_free_splits_nents :
kfree ( * splits_nents ) ;
err_free_splits :
kfree ( * splits ) ;
err_unmap_sg :
dma_unmap_sg ( dev , sgl , sgl_len_in , DMA_BIDIRECTIONAL ) ;
return ret ;
}
/*
* Reverses the sec_map_and_split_sg call for messages not yet added to
* the queues .
*/
static void sec_unmap_sg_on_err ( struct scatterlist * sgl , int steps ,
struct scatterlist * * splits , int * splits_nents ,
int sgl_len_in , struct device * dev )
{
int i ;
for ( i = 0 ; i < steps ; i + + )
kfree ( splits [ i ] ) ;
kfree ( splits_nents ) ;
kfree ( splits ) ;
dma_unmap_sg ( dev , sgl , sgl_len_in , DMA_BIDIRECTIONAL ) ;
}
static struct sec_request_el
* sec_alg_alloc_and_fill_el ( struct sec_bd_info * template , int encrypt ,
int el_size , bool different_dest ,
struct scatterlist * sgl_in , int n_ents_in ,
struct scatterlist * sgl_out , int n_ents_out ,
2020-06-17 09:49:52 -04:00
struct sec_dev_info * info , gfp_t gfp )
2018-07-23 16:49:54 +01:00
{
struct sec_request_el * el ;
struct sec_bd_info * req ;
int ret ;
2020-06-17 09:49:52 -04:00
el = kzalloc ( sizeof ( * el ) , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( ! el )
return ERR_PTR ( - ENOMEM ) ;
el - > el_length = el_size ;
req = & el - > req ;
memcpy ( req , template , sizeof ( * req ) ) ;
req - > w0 & = ~ SEC_BD_W0_CIPHER_M ;
if ( encrypt )
req - > w0 | = SEC_CIPHER_ENCRYPT < < SEC_BD_W0_CIPHER_S ;
else
req - > w0 | = SEC_CIPHER_DECRYPT < < SEC_BD_W0_CIPHER_S ;
req - > w0 & = ~ SEC_BD_W0_C_GRAN_SIZE_19_16_M ;
req - > w0 | = ( ( el_size > > 16 ) < < SEC_BD_W0_C_GRAN_SIZE_19_16_S ) &
SEC_BD_W0_C_GRAN_SIZE_19_16_M ;
req - > w0 & = ~ SEC_BD_W0_C_GRAN_SIZE_21_20_M ;
req - > w0 | = ( ( el_size > > 20 ) < < SEC_BD_W0_C_GRAN_SIZE_21_20_S ) &
SEC_BD_W0_C_GRAN_SIZE_21_20_M ;
/* Writing whole u32 so no need to take care of masking */
req - > w2 = ( ( 1 < < SEC_BD_W2_GRAN_NUM_S ) & SEC_BD_W2_GRAN_NUM_M ) |
( ( el_size < < SEC_BD_W2_C_GRAN_SIZE_15_0_S ) &
SEC_BD_W2_C_GRAN_SIZE_15_0_M ) ;
req - > w3 & = ~ SEC_BD_W3_CIPHER_LEN_OFFSET_M ;
req - > w1 | = SEC_BD_W1_ADDR_TYPE ;
el - > sgl_in = sgl_in ;
ret = sec_alloc_and_fill_hw_sgl ( & el - > in , & el - > dma_in , el - > sgl_in ,
2020-06-17 09:49:52 -04:00
n_ents_in , info , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( ret )
goto err_free_el ;
req - > data_addr_lo = lower_32_bits ( el - > dma_in ) ;
req - > data_addr_hi = upper_32_bits ( el - > dma_in ) ;
if ( different_dest ) {
el - > sgl_out = sgl_out ;
ret = sec_alloc_and_fill_hw_sgl ( & el - > out , & el - > dma_out ,
el - > sgl_out ,
2020-06-17 09:49:52 -04:00
n_ents_out , info , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( ret )
goto err_free_hw_sgl_in ;
req - > w0 | = SEC_BD_W0_DE ;
req - > cipher_destin_addr_lo = lower_32_bits ( el - > dma_out ) ;
req - > cipher_destin_addr_hi = upper_32_bits ( el - > dma_out ) ;
} else {
req - > w0 & = ~ SEC_BD_W0_DE ;
req - > cipher_destin_addr_lo = lower_32_bits ( el - > dma_in ) ;
req - > cipher_destin_addr_hi = upper_32_bits ( el - > dma_in ) ;
}
return el ;
err_free_hw_sgl_in :
sec_free_hw_sgl ( el - > in , el - > dma_in , info ) ;
err_free_el :
kfree ( el ) ;
return ERR_PTR ( ret ) ;
}
static int sec_alg_skcipher_crypto ( struct skcipher_request * skreq ,
bool encrypt )
{
struct crypto_skcipher * atfm = crypto_skcipher_reqtfm ( skreq ) ;
struct crypto_tfm * tfm = crypto_skcipher_tfm ( atfm ) ;
struct sec_alg_tfm_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct sec_queue * queue = ctx - > queue ;
struct sec_request * sec_req = skcipher_request_ctx ( skreq ) ;
struct sec_dev_info * info = queue - > dev_info ;
int i , ret , steps ;
size_t * split_sizes ;
struct scatterlist * * splits_in ;
struct scatterlist * * splits_out = NULL ;
int * splits_in_nents ;
int * splits_out_nents = NULL ;
struct sec_request_el * el , * temp ;
crypto: hisilicon - Fix NULL dereference for same dst and src
When the source and destination addresses for the cipher are the same, we
will get a NULL dereference from accessing the split destination
scatterlist memories, as shown:
[ 56.565719] tcrypt:
[ 56.565719] testing speed of async ecb(aes) (hisi_sec_aes_ecb) encryption
[ 56.574683] tcrypt: test 0 (128 bit key, 16 byte blocks):
[ 56.587585] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
[ 56.596361] Mem abort info:
[ 56.599151] ESR = 0x96000006
[ 56.602196] Exception class = DABT (current EL), IL = 32 bits
[ 56.608105] SET = 0, FnV = 0
[ 56.611149] EA = 0, S1PTW = 0
[ 56.614280] Data abort info:
[ 56.617151] ISV = 0, ISS = 0x00000006
[ 56.620976] CM = 0, WnR = 0
[ 56.623930] user pgtable: 4k pages, 48-bit VAs, pgdp = (____ptrval____)
[ 56.630533] [0000000000000000] pgd=0000041fc7e4d003, pud=0000041fcd9bf003, pmd=0000000000000000
[ 56.639224] Internal error: Oops: 96000006 [#1] PREEMPT SMP
[ 56.644782] Modules linked in: tcrypt(+)
[ 56.648695] CPU: 21 PID: 2326 Comm: insmod Tainted: G W 4.19.0-rc6-00001-g3fabfb8-dirty #716
[ 56.658420] Hardware name: Huawei Taishan 2280 /D05, BIOS Hisilicon D05 IT17 Nemo 2.0 RC0 10/05/2018
[ 56.667537] pstate: 20000005 (nzCv daif -PAN -UAO)
[ 56.672322] pc : sec_alg_skcipher_crypto+0x318/0x748
[ 56.677274] lr : sec_alg_skcipher_crypto+0x178/0x748
[ 56.682224] sp : ffff0000118e3840
[ 56.685525] x29: ffff0000118e3840 x28: ffff841fbb3f8118
[ 56.690825] x27: 0000000000000000 x26: 0000000000000000
[ 56.696125] x25: ffff841fbb3f8080 x24: ffff841fbadc0018
[ 56.701425] x23: ffff000009119000 x22: ffff841fbb24e280
[ 56.706724] x21: ffff841ff212e780 x20: ffff841ff212e700
[ 56.712023] x19: 0000000000000001 x18: ffffffffffffffff
[ 56.717322] x17: 0000000000000000 x16: 0000000000000000
[ 56.722621] x15: ffff0000091196c8 x14: 72635f7265687069
[ 56.727920] x13: 636b735f676c615f x12: ffff000009119940
[ 56.733219] x11: 0000000000000000 x10: 00000000006080c0
[ 56.738519] x9 : 0000000000000000 x8 : ffff841fbb24e480
[ 56.743818] x7 : ffff841fbb24e500 x6 : ffff841ff00cdcc0
[ 56.749117] x5 : 0000000000000010 x4 : 0000000000000000
[ 56.754416] x3 : ffff841fbb24e380 x2 : ffff841fbb24e480
[ 56.759715] x1 : 0000000000000000 x0 : ffff000008f682c8
[ 56.765016] Process insmod (pid: 2326, stack limit = 0x(____ptrval____))
[ 56.771702] Call trace:
[ 56.774136] sec_alg_skcipher_crypto+0x318/0x748
[ 56.778740] sec_alg_skcipher_encrypt+0x10/0x18
[ 56.783259] test_skcipher_speed+0x2a0/0x700 [tcrypt]
[ 56.788298] do_test+0x18f8/0x48c8 [tcrypt]
[ 56.792469] tcrypt_mod_init+0x60/0x1000 [tcrypt]
[ 56.797161] do_one_initcall+0x5c/0x178
[ 56.800985] do_init_module+0x58/0x1b4
[ 56.804721] load_module+0x1da4/0x2150
[ 56.808456] __se_sys_init_module+0x14c/0x1e8
[ 56.812799] __arm64_sys_init_module+0x18/0x20
[ 56.817231] el0_svc_common+0x60/0xe8
[ 56.820880] el0_svc_handler+0x2c/0x80
[ 56.824615] el0_svc+0x8/0xc
[ 56.827483] Code: a94c87a3 910b2000 f87b7842 f9004ba2 (b87b7821)
[ 56.833564] ---[ end trace 0f63290590e93d94 ]---
Segmentation fault
Fix this by only accessing these memories when we have different src and
dst.
Fixes: 915e4e8413da ("crypto: hisilicon - SEC security accelerator driver")
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-11-05 20:35:14 +08:00
bool split = skreq - > src ! = skreq - > dst ;
2020-06-17 09:49:52 -04:00
gfp_t gfp = skreq - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC ;
2018-07-23 16:49:54 +01:00
mutex_init ( & sec_req - > lock ) ;
sec_req - > req_base = & skreq - > base ;
sec_req - > err = 0 ;
/* SGL mapping out here to allow us to break it up as necessary */
sec_req - > len_in = sg_nents ( skreq - > src ) ;
ret = sec_alg_alloc_and_calc_split_sizes ( skreq - > cryptlen , & split_sizes ,
2020-06-17 09:49:52 -04:00
& steps , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( ret )
return ret ;
sec_req - > num_elements = steps ;
ret = sec_map_and_split_sg ( skreq - > src , split_sizes , steps , & splits_in ,
& splits_in_nents , sec_req - > len_in ,
2020-06-17 09:49:52 -04:00
info - > dev , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( ret )
goto err_free_split_sizes ;
crypto: hisilicon - Fix NULL dereference for same dst and src
When the source and destination addresses for the cipher are the same, we
will get a NULL dereference from accessing the split destination
scatterlist memories, as shown:
[ 56.565719] tcrypt:
[ 56.565719] testing speed of async ecb(aes) (hisi_sec_aes_ecb) encryption
[ 56.574683] tcrypt: test 0 (128 bit key, 16 byte blocks):
[ 56.587585] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
[ 56.596361] Mem abort info:
[ 56.599151] ESR = 0x96000006
[ 56.602196] Exception class = DABT (current EL), IL = 32 bits
[ 56.608105] SET = 0, FnV = 0
[ 56.611149] EA = 0, S1PTW = 0
[ 56.614280] Data abort info:
[ 56.617151] ISV = 0, ISS = 0x00000006
[ 56.620976] CM = 0, WnR = 0
[ 56.623930] user pgtable: 4k pages, 48-bit VAs, pgdp = (____ptrval____)
[ 56.630533] [0000000000000000] pgd=0000041fc7e4d003, pud=0000041fcd9bf003, pmd=0000000000000000
[ 56.639224] Internal error: Oops: 96000006 [#1] PREEMPT SMP
[ 56.644782] Modules linked in: tcrypt(+)
[ 56.648695] CPU: 21 PID: 2326 Comm: insmod Tainted: G W 4.19.0-rc6-00001-g3fabfb8-dirty #716
[ 56.658420] Hardware name: Huawei Taishan 2280 /D05, BIOS Hisilicon D05 IT17 Nemo 2.0 RC0 10/05/2018
[ 56.667537] pstate: 20000005 (nzCv daif -PAN -UAO)
[ 56.672322] pc : sec_alg_skcipher_crypto+0x318/0x748
[ 56.677274] lr : sec_alg_skcipher_crypto+0x178/0x748
[ 56.682224] sp : ffff0000118e3840
[ 56.685525] x29: ffff0000118e3840 x28: ffff841fbb3f8118
[ 56.690825] x27: 0000000000000000 x26: 0000000000000000
[ 56.696125] x25: ffff841fbb3f8080 x24: ffff841fbadc0018
[ 56.701425] x23: ffff000009119000 x22: ffff841fbb24e280
[ 56.706724] x21: ffff841ff212e780 x20: ffff841ff212e700
[ 56.712023] x19: 0000000000000001 x18: ffffffffffffffff
[ 56.717322] x17: 0000000000000000 x16: 0000000000000000
[ 56.722621] x15: ffff0000091196c8 x14: 72635f7265687069
[ 56.727920] x13: 636b735f676c615f x12: ffff000009119940
[ 56.733219] x11: 0000000000000000 x10: 00000000006080c0
[ 56.738519] x9 : 0000000000000000 x8 : ffff841fbb24e480
[ 56.743818] x7 : ffff841fbb24e500 x6 : ffff841ff00cdcc0
[ 56.749117] x5 : 0000000000000010 x4 : 0000000000000000
[ 56.754416] x3 : ffff841fbb24e380 x2 : ffff841fbb24e480
[ 56.759715] x1 : 0000000000000000 x0 : ffff000008f682c8
[ 56.765016] Process insmod (pid: 2326, stack limit = 0x(____ptrval____))
[ 56.771702] Call trace:
[ 56.774136] sec_alg_skcipher_crypto+0x318/0x748
[ 56.778740] sec_alg_skcipher_encrypt+0x10/0x18
[ 56.783259] test_skcipher_speed+0x2a0/0x700 [tcrypt]
[ 56.788298] do_test+0x18f8/0x48c8 [tcrypt]
[ 56.792469] tcrypt_mod_init+0x60/0x1000 [tcrypt]
[ 56.797161] do_one_initcall+0x5c/0x178
[ 56.800985] do_init_module+0x58/0x1b4
[ 56.804721] load_module+0x1da4/0x2150
[ 56.808456] __se_sys_init_module+0x14c/0x1e8
[ 56.812799] __arm64_sys_init_module+0x18/0x20
[ 56.817231] el0_svc_common+0x60/0xe8
[ 56.820880] el0_svc_handler+0x2c/0x80
[ 56.824615] el0_svc+0x8/0xc
[ 56.827483] Code: a94c87a3 910b2000 f87b7842 f9004ba2 (b87b7821)
[ 56.833564] ---[ end trace 0f63290590e93d94 ]---
Segmentation fault
Fix this by only accessing these memories when we have different src and
dst.
Fixes: 915e4e8413da ("crypto: hisilicon - SEC security accelerator driver")
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-11-05 20:35:14 +08:00
if ( split ) {
2018-07-23 16:49:54 +01:00
sec_req - > len_out = sg_nents ( skreq - > dst ) ;
ret = sec_map_and_split_sg ( skreq - > dst , split_sizes , steps ,
& splits_out , & splits_out_nents ,
2020-06-17 09:49:52 -04:00
sec_req - > len_out , info - > dev , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( ret )
goto err_unmap_in_sg ;
}
/* Shared info stored in seq_req - applies to all BDs */
sec_req - > tfm_ctx = ctx ;
sec_req - > cb = sec_skcipher_alg_callback ;
INIT_LIST_HEAD ( & sec_req - > elements ) ;
/*
* Future optimization .
* In the chaining case we can ' t use a dma pool bounce buffer
* but in the case where we know there is no chaining we can
*/
if ( crypto_skcipher_ivsize ( atfm ) ) {
sec_req - > dma_iv = dma_map_single ( info - > dev , skreq - > iv ,
crypto_skcipher_ivsize ( atfm ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( info - > dev , sec_req - > dma_iv ) ) {
ret = - ENOMEM ;
goto err_unmap_out_sg ;
}
}
/* Set them all up then queue - cleaner error handling. */
for ( i = 0 ; i < steps ; i + + ) {
el = sec_alg_alloc_and_fill_el ( & ctx - > req_template ,
encrypt ? 1 : 0 ,
split_sizes [ i ] ,
skreq - > src ! = skreq - > dst ,
splits_in [ i ] , splits_in_nents [ i ] ,
crypto: hisilicon - Fix NULL dereference for same dst and src
When the source and destination addresses for the cipher are the same, we
will get a NULL dereference from accessing the split destination
scatterlist memories, as shown:
[ 56.565719] tcrypt:
[ 56.565719] testing speed of async ecb(aes) (hisi_sec_aes_ecb) encryption
[ 56.574683] tcrypt: test 0 (128 bit key, 16 byte blocks):
[ 56.587585] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
[ 56.596361] Mem abort info:
[ 56.599151] ESR = 0x96000006
[ 56.602196] Exception class = DABT (current EL), IL = 32 bits
[ 56.608105] SET = 0, FnV = 0
[ 56.611149] EA = 0, S1PTW = 0
[ 56.614280] Data abort info:
[ 56.617151] ISV = 0, ISS = 0x00000006
[ 56.620976] CM = 0, WnR = 0
[ 56.623930] user pgtable: 4k pages, 48-bit VAs, pgdp = (____ptrval____)
[ 56.630533] [0000000000000000] pgd=0000041fc7e4d003, pud=0000041fcd9bf003, pmd=0000000000000000
[ 56.639224] Internal error: Oops: 96000006 [#1] PREEMPT SMP
[ 56.644782] Modules linked in: tcrypt(+)
[ 56.648695] CPU: 21 PID: 2326 Comm: insmod Tainted: G W 4.19.0-rc6-00001-g3fabfb8-dirty #716
[ 56.658420] Hardware name: Huawei Taishan 2280 /D05, BIOS Hisilicon D05 IT17 Nemo 2.0 RC0 10/05/2018
[ 56.667537] pstate: 20000005 (nzCv daif -PAN -UAO)
[ 56.672322] pc : sec_alg_skcipher_crypto+0x318/0x748
[ 56.677274] lr : sec_alg_skcipher_crypto+0x178/0x748
[ 56.682224] sp : ffff0000118e3840
[ 56.685525] x29: ffff0000118e3840 x28: ffff841fbb3f8118
[ 56.690825] x27: 0000000000000000 x26: 0000000000000000
[ 56.696125] x25: ffff841fbb3f8080 x24: ffff841fbadc0018
[ 56.701425] x23: ffff000009119000 x22: ffff841fbb24e280
[ 56.706724] x21: ffff841ff212e780 x20: ffff841ff212e700
[ 56.712023] x19: 0000000000000001 x18: ffffffffffffffff
[ 56.717322] x17: 0000000000000000 x16: 0000000000000000
[ 56.722621] x15: ffff0000091196c8 x14: 72635f7265687069
[ 56.727920] x13: 636b735f676c615f x12: ffff000009119940
[ 56.733219] x11: 0000000000000000 x10: 00000000006080c0
[ 56.738519] x9 : 0000000000000000 x8 : ffff841fbb24e480
[ 56.743818] x7 : ffff841fbb24e500 x6 : ffff841ff00cdcc0
[ 56.749117] x5 : 0000000000000010 x4 : 0000000000000000
[ 56.754416] x3 : ffff841fbb24e380 x2 : ffff841fbb24e480
[ 56.759715] x1 : 0000000000000000 x0 : ffff000008f682c8
[ 56.765016] Process insmod (pid: 2326, stack limit = 0x(____ptrval____))
[ 56.771702] Call trace:
[ 56.774136] sec_alg_skcipher_crypto+0x318/0x748
[ 56.778740] sec_alg_skcipher_encrypt+0x10/0x18
[ 56.783259] test_skcipher_speed+0x2a0/0x700 [tcrypt]
[ 56.788298] do_test+0x18f8/0x48c8 [tcrypt]
[ 56.792469] tcrypt_mod_init+0x60/0x1000 [tcrypt]
[ 56.797161] do_one_initcall+0x5c/0x178
[ 56.800985] do_init_module+0x58/0x1b4
[ 56.804721] load_module+0x1da4/0x2150
[ 56.808456] __se_sys_init_module+0x14c/0x1e8
[ 56.812799] __arm64_sys_init_module+0x18/0x20
[ 56.817231] el0_svc_common+0x60/0xe8
[ 56.820880] el0_svc_handler+0x2c/0x80
[ 56.824615] el0_svc+0x8/0xc
[ 56.827483] Code: a94c87a3 910b2000 f87b7842 f9004ba2 (b87b7821)
[ 56.833564] ---[ end trace 0f63290590e93d94 ]---
Segmentation fault
Fix this by only accessing these memories when we have different src and
dst.
Fixes: 915e4e8413da ("crypto: hisilicon - SEC security accelerator driver")
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-11-05 20:35:14 +08:00
split ? splits_out [ i ] : NULL ,
split ? splits_out_nents [ i ] : 0 ,
2020-06-17 09:49:52 -04:00
info , gfp ) ;
2018-07-23 16:49:54 +01:00
if ( IS_ERR ( el ) ) {
ret = PTR_ERR ( el ) ;
goto err_free_elements ;
}
el - > req . cipher_iv_addr_lo = lower_32_bits ( sec_req - > dma_iv ) ;
el - > req . cipher_iv_addr_hi = upper_32_bits ( sec_req - > dma_iv ) ;
el - > sec_req = sec_req ;
list_add_tail ( & el - > head , & sec_req - > elements ) ;
}
/*
* Only attempt to queue if the whole lot can fit in the queue -
* we can ' t successfully cleanup after a partial queing so this
* must succeed or fail atomically .
*
* Big hammer test of both software and hardware queues - could be
* more refined but this is unlikely to happen so no need .
*/
/* Grab a big lock for a long time to avoid concurrency issues */
2022-07-01 09:59:54 +08:00
spin_lock_bh ( & queue - > queuelock ) ;
2018-07-23 16:49:54 +01:00
/*
* Can go on to queue if we have space in either :
* 1 ) The hardware queue and no software queue
* 2 ) The software queue
* AND there is nothing in the backlog . If there is backlog we
* have to only queue to the backlog queue and return busy .
*/
if ( ( ! sec_queue_can_enqueue ( queue , steps ) & &
( ! queue - > havesoftqueue | |
kfifo_avail ( & queue - > softqueue ) > steps ) ) | |
! list_empty ( & ctx - > backlog ) ) {
2018-11-05 20:35:15 +08:00
ret = - EBUSY ;
2018-07-23 16:49:54 +01:00
if ( ( skreq - > base . flags & CRYPTO_TFM_REQ_MAY_BACKLOG ) ) {
list_add_tail ( & sec_req - > backlog_head , & ctx - > backlog ) ;
2022-07-01 09:59:54 +08:00
spin_unlock_bh ( & queue - > queuelock ) ;
2018-11-05 20:35:15 +08:00
goto out ;
2018-07-23 16:49:54 +01:00
}
2022-07-01 09:59:54 +08:00
spin_unlock_bh ( & queue - > queuelock ) ;
2018-07-23 16:49:54 +01:00
goto err_free_elements ;
}
ret = sec_send_request ( sec_req , queue ) ;
2022-07-01 09:59:54 +08:00
spin_unlock_bh ( & queue - > queuelock ) ;
2018-07-23 16:49:54 +01:00
if ( ret )
goto err_free_elements ;
2018-11-05 20:35:15 +08:00
ret = - EINPROGRESS ;
out :
/* Cleanup - all elements in pointer arrays have been copied */
kfree ( splits_in_nents ) ;
kfree ( splits_in ) ;
kfree ( splits_out_nents ) ;
kfree ( splits_out ) ;
kfree ( split_sizes ) ;
return ret ;
2018-07-23 16:49:54 +01:00
err_free_elements :
list_for_each_entry_safe ( el , temp , & sec_req - > elements , head ) {
list_del ( & el - > head ) ;
sec_alg_free_el ( el , info ) ;
}
if ( crypto_skcipher_ivsize ( atfm ) )
dma_unmap_single ( info - > dev , sec_req - > dma_iv ,
crypto_skcipher_ivsize ( atfm ) ,
DMA_BIDIRECTIONAL ) ;
err_unmap_out_sg :
crypto: hisilicon - Fix NULL dereference for same dst and src
When the source and destination addresses for the cipher are the same, we
will get a NULL dereference from accessing the split destination
scatterlist memories, as shown:
[ 56.565719] tcrypt:
[ 56.565719] testing speed of async ecb(aes) (hisi_sec_aes_ecb) encryption
[ 56.574683] tcrypt: test 0 (128 bit key, 16 byte blocks):
[ 56.587585] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
[ 56.596361] Mem abort info:
[ 56.599151] ESR = 0x96000006
[ 56.602196] Exception class = DABT (current EL), IL = 32 bits
[ 56.608105] SET = 0, FnV = 0
[ 56.611149] EA = 0, S1PTW = 0
[ 56.614280] Data abort info:
[ 56.617151] ISV = 0, ISS = 0x00000006
[ 56.620976] CM = 0, WnR = 0
[ 56.623930] user pgtable: 4k pages, 48-bit VAs, pgdp = (____ptrval____)
[ 56.630533] [0000000000000000] pgd=0000041fc7e4d003, pud=0000041fcd9bf003, pmd=0000000000000000
[ 56.639224] Internal error: Oops: 96000006 [#1] PREEMPT SMP
[ 56.644782] Modules linked in: tcrypt(+)
[ 56.648695] CPU: 21 PID: 2326 Comm: insmod Tainted: G W 4.19.0-rc6-00001-g3fabfb8-dirty #716
[ 56.658420] Hardware name: Huawei Taishan 2280 /D05, BIOS Hisilicon D05 IT17 Nemo 2.0 RC0 10/05/2018
[ 56.667537] pstate: 20000005 (nzCv daif -PAN -UAO)
[ 56.672322] pc : sec_alg_skcipher_crypto+0x318/0x748
[ 56.677274] lr : sec_alg_skcipher_crypto+0x178/0x748
[ 56.682224] sp : ffff0000118e3840
[ 56.685525] x29: ffff0000118e3840 x28: ffff841fbb3f8118
[ 56.690825] x27: 0000000000000000 x26: 0000000000000000
[ 56.696125] x25: ffff841fbb3f8080 x24: ffff841fbadc0018
[ 56.701425] x23: ffff000009119000 x22: ffff841fbb24e280
[ 56.706724] x21: ffff841ff212e780 x20: ffff841ff212e700
[ 56.712023] x19: 0000000000000001 x18: ffffffffffffffff
[ 56.717322] x17: 0000000000000000 x16: 0000000000000000
[ 56.722621] x15: ffff0000091196c8 x14: 72635f7265687069
[ 56.727920] x13: 636b735f676c615f x12: ffff000009119940
[ 56.733219] x11: 0000000000000000 x10: 00000000006080c0
[ 56.738519] x9 : 0000000000000000 x8 : ffff841fbb24e480
[ 56.743818] x7 : ffff841fbb24e500 x6 : ffff841ff00cdcc0
[ 56.749117] x5 : 0000000000000010 x4 : 0000000000000000
[ 56.754416] x3 : ffff841fbb24e380 x2 : ffff841fbb24e480
[ 56.759715] x1 : 0000000000000000 x0 : ffff000008f682c8
[ 56.765016] Process insmod (pid: 2326, stack limit = 0x(____ptrval____))
[ 56.771702] Call trace:
[ 56.774136] sec_alg_skcipher_crypto+0x318/0x748
[ 56.778740] sec_alg_skcipher_encrypt+0x10/0x18
[ 56.783259] test_skcipher_speed+0x2a0/0x700 [tcrypt]
[ 56.788298] do_test+0x18f8/0x48c8 [tcrypt]
[ 56.792469] tcrypt_mod_init+0x60/0x1000 [tcrypt]
[ 56.797161] do_one_initcall+0x5c/0x178
[ 56.800985] do_init_module+0x58/0x1b4
[ 56.804721] load_module+0x1da4/0x2150
[ 56.808456] __se_sys_init_module+0x14c/0x1e8
[ 56.812799] __arm64_sys_init_module+0x18/0x20
[ 56.817231] el0_svc_common+0x60/0xe8
[ 56.820880] el0_svc_handler+0x2c/0x80
[ 56.824615] el0_svc+0x8/0xc
[ 56.827483] Code: a94c87a3 910b2000 f87b7842 f9004ba2 (b87b7821)
[ 56.833564] ---[ end trace 0f63290590e93d94 ]---
Segmentation fault
Fix this by only accessing these memories when we have different src and
dst.
Fixes: 915e4e8413da ("crypto: hisilicon - SEC security accelerator driver")
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-11-05 20:35:14 +08:00
if ( split )
2018-07-23 16:49:54 +01:00
sec_unmap_sg_on_err ( skreq - > dst , steps , splits_out ,
splits_out_nents , sec_req - > len_out ,
info - > dev ) ;
err_unmap_in_sg :
sec_unmap_sg_on_err ( skreq - > src , steps , splits_in , splits_in_nents ,
sec_req - > len_in , info - > dev ) ;
err_free_split_sizes :
kfree ( split_sizes ) ;
return ret ;
}
static int sec_alg_skcipher_encrypt ( struct skcipher_request * req )
{
return sec_alg_skcipher_crypto ( req , true ) ;
}
static int sec_alg_skcipher_decrypt ( struct skcipher_request * req )
{
return sec_alg_skcipher_crypto ( req , false ) ;
}
static int sec_alg_skcipher_init ( struct crypto_skcipher * tfm )
{
struct sec_alg_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
mutex_init ( & ctx - > lock ) ;
INIT_LIST_HEAD ( & ctx - > backlog ) ;
crypto_skcipher_set_reqsize ( tfm , sizeof ( struct sec_request ) ) ;
ctx - > queue = sec_queue_alloc_start_safe ( ) ;
if ( IS_ERR ( ctx - > queue ) )
return PTR_ERR ( ctx - > queue ) ;
2022-07-01 09:59:54 +08:00
spin_lock_init ( & ctx - > queue - > queuelock ) ;
2018-07-23 16:49:54 +01:00
ctx - > queue - > havesoftqueue = false ;
return 0 ;
}
static void sec_alg_skcipher_exit ( struct crypto_skcipher * tfm )
{
struct sec_alg_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
struct device * dev = ctx - > queue - > dev_info - > dev ;
if ( ctx - > key ) {
memzero_explicit ( ctx - > key , SEC_MAX_CIPHER_KEY ) ;
dma_free_coherent ( dev , SEC_MAX_CIPHER_KEY , ctx - > key ,
ctx - > pkey ) ;
}
sec_queue_stop_release ( ctx - > queue ) ;
}
static int sec_alg_skcipher_init_with_queue ( struct crypto_skcipher * tfm )
{
struct sec_alg_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
int ret ;
ret = sec_alg_skcipher_init ( tfm ) ;
if ( ret )
return ret ;
INIT_KFIFO ( ctx - > queue - > softqueue ) ;
ret = kfifo_alloc ( & ctx - > queue - > softqueue , 512 , GFP_KERNEL ) ;
if ( ret ) {
sec_alg_skcipher_exit ( tfm ) ;
return ret ;
}
ctx - > queue - > havesoftqueue = true ;
return 0 ;
}
static void sec_alg_skcipher_exit_with_queue ( struct crypto_skcipher * tfm )
{
struct sec_alg_tfm_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
kfifo_free ( & ctx - > queue - > softqueue ) ;
sec_alg_skcipher_exit ( tfm ) ;
}
static struct skcipher_alg sec_algs [ ] = {
{
. base = {
. cra_name = " ecb(aes) " ,
. cra_driver_name = " hisi_sec_aes_ecb " ,
. cra_priority = 4001 ,
2020-07-09 23:20:41 -07:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY ,
2018-07-23 16:49:54 +01:00
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sec_alg_tfm_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
} ,
. init = sec_alg_skcipher_init ,
. exit = sec_alg_skcipher_exit ,
. setkey = sec_alg_skcipher_setkey_aes_ecb ,
. decrypt = sec_alg_skcipher_decrypt ,
. encrypt = sec_alg_skcipher_encrypt ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = 0 ,
} , {
. base = {
. cra_name = " cbc(aes) " ,
. cra_driver_name = " hisi_sec_aes_cbc " ,
. cra_priority = 4001 ,
2020-07-09 23:20:41 -07:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY ,
2018-07-23 16:49:54 +01:00
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sec_alg_tfm_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
} ,
. init = sec_alg_skcipher_init_with_queue ,
. exit = sec_alg_skcipher_exit_with_queue ,
. setkey = sec_alg_skcipher_setkey_aes_cbc ,
. decrypt = sec_alg_skcipher_decrypt ,
. encrypt = sec_alg_skcipher_encrypt ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
} , {
. base = {
. cra_name = " ctr(aes) " ,
. cra_driver_name = " hisi_sec_aes_ctr " ,
. cra_priority = 4001 ,
2020-07-09 23:20:41 -07:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY ,
2018-07-23 16:49:54 +01:00
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sec_alg_tfm_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
} ,
. init = sec_alg_skcipher_init_with_queue ,
. exit = sec_alg_skcipher_exit_with_queue ,
. setkey = sec_alg_skcipher_setkey_aes_ctr ,
. decrypt = sec_alg_skcipher_decrypt ,
. encrypt = sec_alg_skcipher_encrypt ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
} , {
. base = {
. cra_name = " xts(aes) " ,
. cra_driver_name = " hisi_sec_aes_xts " ,
. cra_priority = 4001 ,
2020-07-09 23:20:41 -07:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY ,
2018-07-23 16:49:54 +01:00
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sec_alg_tfm_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
} ,
. init = sec_alg_skcipher_init ,
. exit = sec_alg_skcipher_exit ,
. setkey = sec_alg_skcipher_setkey_aes_xts ,
. decrypt = sec_alg_skcipher_decrypt ,
. encrypt = sec_alg_skcipher_encrypt ,
. min_keysize = 2 * AES_MIN_KEY_SIZE ,
. max_keysize = 2 * AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
} , {
/* Unable to find any test vectors so untested */
. base = {
. cra_name = " ecb(des) " ,
. cra_driver_name = " hisi_sec_des_ecb " ,
. cra_priority = 4001 ,
2020-07-09 23:20:41 -07:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY ,
2018-07-23 16:49:54 +01:00
. cra_blocksize = DES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sec_alg_tfm_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
} ,
. init = sec_alg_skcipher_init ,
. exit = sec_alg_skcipher_exit ,
. setkey = sec_alg_skcipher_setkey_des_ecb ,
. decrypt = sec_alg_skcipher_decrypt ,
. encrypt = sec_alg_skcipher_encrypt ,
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. ivsize = 0 ,
} , {
. base = {
. cra_name = " cbc(des) " ,
. cra_driver_name = " hisi_sec_des_cbc " ,
. cra_priority = 4001 ,
2020-07-09 23:20:41 -07:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY ,
2018-07-23 16:49:54 +01:00
. cra_blocksize = DES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sec_alg_tfm_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
} ,
. init = sec_alg_skcipher_init_with_queue ,
. exit = sec_alg_skcipher_exit_with_queue ,
. setkey = sec_alg_skcipher_setkey_des_cbc ,
. decrypt = sec_alg_skcipher_decrypt ,
. encrypt = sec_alg_skcipher_encrypt ,
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. ivsize = DES_BLOCK_SIZE ,
} , {
. base = {
. cra_name = " cbc(des3_ede) " ,
. cra_driver_name = " hisi_sec_3des_cbc " ,
. cra_priority = 4001 ,
2020-07-09 23:20:41 -07:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY ,
2018-07-23 16:49:54 +01:00
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sec_alg_tfm_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
} ,
. init = sec_alg_skcipher_init_with_queue ,
. exit = sec_alg_skcipher_exit_with_queue ,
. setkey = sec_alg_skcipher_setkey_3des_cbc ,
. decrypt = sec_alg_skcipher_decrypt ,
. encrypt = sec_alg_skcipher_encrypt ,
. min_keysize = DES3_EDE_KEY_SIZE ,
. max_keysize = DES3_EDE_KEY_SIZE ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
} , {
. base = {
. cra_name = " ecb(des3_ede) " ,
. cra_driver_name = " hisi_sec_3des_ecb " ,
. cra_priority = 4001 ,
2020-07-09 23:20:41 -07:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY ,
2018-07-23 16:49:54 +01:00
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sec_alg_tfm_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
} ,
. init = sec_alg_skcipher_init ,
. exit = sec_alg_skcipher_exit ,
. setkey = sec_alg_skcipher_setkey_3des_ecb ,
. decrypt = sec_alg_skcipher_decrypt ,
. encrypt = sec_alg_skcipher_encrypt ,
. min_keysize = DES3_EDE_KEY_SIZE ,
. max_keysize = DES3_EDE_KEY_SIZE ,
. ivsize = 0 ,
}
} ;
int sec_algs_register ( void )
{
int ret = 0 ;
mutex_lock ( & algs_lock ) ;
if ( + + active_devs ! = 1 )
goto unlock ;
ret = crypto_register_skciphers ( sec_algs , ARRAY_SIZE ( sec_algs ) ) ;
if ( ret )
- - active_devs ;
unlock :
mutex_unlock ( & algs_lock ) ;
return ret ;
}
void sec_algs_unregister ( void )
{
mutex_lock ( & algs_lock ) ;
if ( - - active_devs ! = 0 )
goto unlock ;
crypto_unregister_skciphers ( sec_algs , ARRAY_SIZE ( sec_algs ) ) ;
unlock :
mutex_unlock ( & algs_lock ) ;
}