2016-12-15 10:03:16 +08:00
/* Algorithms supported by virtio crypto device
*
* Authors : Gonglei < arei . gonglei @ huawei . com >
*
* Copyright 2016 HUAWEI TECHNOLOGIES CO . , LTD .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , see < http : //www.gnu.org/licenses/>.
*/
# include <linux/scatterlist.h>
# include <crypto/algapi.h>
# include <linux/err.h>
# include <crypto/scatterwalk.h>
# include <linux/atomic.h>
# include <uapi/linux/virtio_crypto.h>
# include "virtio_crypto_common.h"
2017-06-23 11:31:19 -04:00
struct virtio_crypto_ablkcipher_ctx {
2018-01-26 20:15:32 +01:00
struct crypto_engine_ctx enginectx ;
2017-06-23 11:31:19 -04:00
struct virtio_crypto * vcrypto ;
struct crypto_tfm * tfm ;
struct virtio_crypto_sym_session_info enc_sess_info ;
struct virtio_crypto_sym_session_info dec_sess_info ;
} ;
struct virtio_crypto_sym_request {
struct virtio_crypto_request base ;
/* Cipher or aead */
uint32_t type ;
struct virtio_crypto_ablkcipher_ctx * ablkcipher_ctx ;
struct ablkcipher_request * ablkcipher_req ;
uint8_t * iv ;
/* Encryption? */
bool encrypt ;
} ;
2016-12-15 10:03:16 +08:00
/*
* The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion .
*/
static DEFINE_MUTEX ( algs_lock ) ;
static unsigned int virtio_crypto_active_devs ;
2017-06-23 11:31:19 -04:00
static void virtio_crypto_ablkcipher_finalize_req (
struct virtio_crypto_sym_request * vc_sym_req ,
struct ablkcipher_request * req ,
int err ) ;
static void virtio_crypto_dataq_sym_callback
( struct virtio_crypto_request * vc_req , int len )
{
struct virtio_crypto_sym_request * vc_sym_req =
container_of ( vc_req , struct virtio_crypto_sym_request , base ) ;
struct ablkcipher_request * ablk_req ;
int error ;
/* Finish the encrypt or decrypt process */
if ( vc_sym_req - > type = = VIRTIO_CRYPTO_SYM_OP_CIPHER ) {
switch ( vc_req - > status ) {
case VIRTIO_CRYPTO_OK :
error = 0 ;
break ;
case VIRTIO_CRYPTO_INVSESS :
case VIRTIO_CRYPTO_ERR :
error = - EINVAL ;
break ;
case VIRTIO_CRYPTO_BADMSG :
error = - EBADMSG ;
break ;
default :
error = - EIO ;
break ;
}
ablk_req = vc_sym_req - > ablkcipher_req ;
virtio_crypto_ablkcipher_finalize_req ( vc_sym_req ,
ablk_req , error ) ;
}
}
2016-12-15 10:03:16 +08:00
static u64 virtio_crypto_alg_sg_nents_length ( struct scatterlist * sg )
{
u64 total = 0 ;
for ( total = 0 ; sg ; sg = sg_next ( sg ) )
total + = sg - > length ;
return total ;
}
static int
virtio_crypto_alg_validate_key ( int key_len , uint32_t * alg )
{
switch ( key_len ) {
case AES_KEYSIZE_128 :
case AES_KEYSIZE_192 :
case AES_KEYSIZE_256 :
* alg = VIRTIO_CRYPTO_CIPHER_AES_CBC ;
break ;
default :
pr_err ( " virtio_crypto: Unsupported key length: %d \n " ,
key_len ) ;
return - EINVAL ;
}
return 0 ;
}
static int virtio_crypto_alg_ablkcipher_init_session (
struct virtio_crypto_ablkcipher_ctx * ctx ,
uint32_t alg , const uint8_t * key ,
unsigned int keylen ,
int encrypt )
{
struct scatterlist outhdr , key_sg , inhdr , * sgs [ 3 ] ;
unsigned int tmp ;
struct virtio_crypto * vcrypto = ctx - > vcrypto ;
int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT ;
int err ;
unsigned int num_out = 0 , num_in = 0 ;
/*
* Avoid to do DMA from the stack , switch to using
* dynamically - allocated for the key
*/
uint8_t * cipher_key = kmalloc ( keylen , GFP_ATOMIC ) ;
if ( ! cipher_key )
return - ENOMEM ;
memcpy ( cipher_key , key , keylen ) ;
spin_lock ( & vcrypto - > ctrl_lock ) ;
/* Pad ctrl header */
vcrypto - > ctrl . header . opcode =
cpu_to_le32 ( VIRTIO_CRYPTO_CIPHER_CREATE_SESSION ) ;
vcrypto - > ctrl . header . algo = cpu_to_le32 ( alg ) ;
/* Set the default dataqueue id to 0 */
vcrypto - > ctrl . header . queue_id = 0 ;
vcrypto - > input . status = cpu_to_le32 ( VIRTIO_CRYPTO_ERR ) ;
/* Pad cipher's parameters */
vcrypto - > ctrl . u . sym_create_session . op_type =
cpu_to_le32 ( VIRTIO_CRYPTO_SYM_OP_CIPHER ) ;
vcrypto - > ctrl . u . sym_create_session . u . cipher . para . algo =
vcrypto - > ctrl . header . algo ;
vcrypto - > ctrl . u . sym_create_session . u . cipher . para . keylen =
cpu_to_le32 ( keylen ) ;
vcrypto - > ctrl . u . sym_create_session . u . cipher . para . op =
cpu_to_le32 ( op ) ;
sg_init_one ( & outhdr , & vcrypto - > ctrl , sizeof ( vcrypto - > ctrl ) ) ;
sgs [ num_out + + ] = & outhdr ;
/* Set key */
sg_init_one ( & key_sg , cipher_key , keylen ) ;
sgs [ num_out + + ] = & key_sg ;
/* Return status and session id back */
sg_init_one ( & inhdr , & vcrypto - > input , sizeof ( vcrypto - > input ) ) ;
sgs [ num_out + num_in + + ] = & inhdr ;
err = virtqueue_add_sgs ( vcrypto - > ctrl_vq , sgs , num_out ,
num_in , vcrypto , GFP_ATOMIC ) ;
if ( err < 0 ) {
spin_unlock ( & vcrypto - > ctrl_lock ) ;
kzfree ( cipher_key ) ;
return err ;
}
virtqueue_kick ( vcrypto - > ctrl_vq ) ;
/*
* Trapping into the hypervisor , so the request should be
* handled immediately .
*/
while ( ! virtqueue_get_buf ( vcrypto - > ctrl_vq , & tmp ) & &
! virtqueue_is_broken ( vcrypto - > ctrl_vq ) )
cpu_relax ( ) ;
if ( le32_to_cpu ( vcrypto - > input . status ) ! = VIRTIO_CRYPTO_OK ) {
spin_unlock ( & vcrypto - > ctrl_lock ) ;
pr_err ( " virtio_crypto: Create session failed status: %u \n " ,
le32_to_cpu ( vcrypto - > input . status ) ) ;
kzfree ( cipher_key ) ;
return - EINVAL ;
}
if ( encrypt )
ctx - > enc_sess_info . session_id =
le64_to_cpu ( vcrypto - > input . session_id ) ;
else
ctx - > dec_sess_info . session_id =
le64_to_cpu ( vcrypto - > input . session_id ) ;
spin_unlock ( & vcrypto - > ctrl_lock ) ;
kzfree ( cipher_key ) ;
return 0 ;
}
static int virtio_crypto_alg_ablkcipher_close_session (
struct virtio_crypto_ablkcipher_ctx * ctx ,
int encrypt )
{
struct scatterlist outhdr , status_sg , * sgs [ 2 ] ;
unsigned int tmp ;
struct virtio_crypto_destroy_session_req * destroy_session ;
struct virtio_crypto * vcrypto = ctx - > vcrypto ;
int err ;
unsigned int num_out = 0 , num_in = 0 ;
spin_lock ( & vcrypto - > ctrl_lock ) ;
vcrypto - > ctrl_status . status = VIRTIO_CRYPTO_ERR ;
/* Pad ctrl header */
vcrypto - > ctrl . header . opcode =
cpu_to_le32 ( VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION ) ;
/* Set the default virtqueue id to 0 */
vcrypto - > ctrl . header . queue_id = 0 ;
destroy_session = & vcrypto - > ctrl . u . destroy_session ;
if ( encrypt )
destroy_session - > session_id =
cpu_to_le64 ( ctx - > enc_sess_info . session_id ) ;
else
destroy_session - > session_id =
cpu_to_le64 ( ctx - > dec_sess_info . session_id ) ;
sg_init_one ( & outhdr , & vcrypto - > ctrl , sizeof ( vcrypto - > ctrl ) ) ;
sgs [ num_out + + ] = & outhdr ;
/* Return status and session id back */
sg_init_one ( & status_sg , & vcrypto - > ctrl_status . status ,
sizeof ( vcrypto - > ctrl_status . status ) ) ;
sgs [ num_out + num_in + + ] = & status_sg ;
err = virtqueue_add_sgs ( vcrypto - > ctrl_vq , sgs , num_out ,
num_in , vcrypto , GFP_ATOMIC ) ;
if ( err < 0 ) {
spin_unlock ( & vcrypto - > ctrl_lock ) ;
return err ;
}
virtqueue_kick ( vcrypto - > ctrl_vq ) ;
while ( ! virtqueue_get_buf ( vcrypto - > ctrl_vq , & tmp ) & &
! virtqueue_is_broken ( vcrypto - > ctrl_vq ) )
cpu_relax ( ) ;
if ( vcrypto - > ctrl_status . status ! = VIRTIO_CRYPTO_OK ) {
spin_unlock ( & vcrypto - > ctrl_lock ) ;
pr_err ( " virtio_crypto: Close session failed status: %u, session_id: 0x%llx \n " ,
vcrypto - > ctrl_status . status ,
destroy_session - > session_id ) ;
return - EINVAL ;
}
spin_unlock ( & vcrypto - > ctrl_lock ) ;
return 0 ;
}
static int virtio_crypto_alg_ablkcipher_init_sessions (
struct virtio_crypto_ablkcipher_ctx * ctx ,
const uint8_t * key , unsigned int keylen )
{
uint32_t alg ;
int ret ;
struct virtio_crypto * vcrypto = ctx - > vcrypto ;
if ( keylen > vcrypto - > max_cipher_key_len ) {
pr_err ( " virtio_crypto: the key is too long \n " ) ;
goto bad_key ;
}
if ( virtio_crypto_alg_validate_key ( keylen , & alg ) )
goto bad_key ;
/* Create encryption session */
ret = virtio_crypto_alg_ablkcipher_init_session ( ctx ,
alg , key , keylen , 1 ) ;
if ( ret )
return ret ;
/* Create decryption session */
ret = virtio_crypto_alg_ablkcipher_init_session ( ctx ,
alg , key , keylen , 0 ) ;
if ( ret ) {
virtio_crypto_alg_ablkcipher_close_session ( ctx , 1 ) ;
return ret ;
}
return 0 ;
bad_key :
crypto_tfm_set_flags ( ctx - > tfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return - EINVAL ;
}
/* Note: kernel crypto API realization */
static int virtio_crypto_ablkcipher_setkey ( struct crypto_ablkcipher * tfm ,
const uint8_t * key ,
unsigned int keylen )
{
struct virtio_crypto_ablkcipher_ctx * ctx = crypto_ablkcipher_ctx ( tfm ) ;
int ret ;
if ( ! ctx - > vcrypto ) {
/* New key */
int node = virtio_crypto_get_current_node ( ) ;
struct virtio_crypto * vcrypto =
virtcrypto_get_dev_node ( node ) ;
if ( ! vcrypto ) {
2017-09-25 16:06:13 +05:30
pr_err ( " virtio_crypto: Could not find a virtio device in the system \n " ) ;
2016-12-15 10:03:16 +08:00
return - ENODEV ;
}
ctx - > vcrypto = vcrypto ;
} else {
/* Rekeying, we should close the created sessions previously */
virtio_crypto_alg_ablkcipher_close_session ( ctx , 1 ) ;
virtio_crypto_alg_ablkcipher_close_session ( ctx , 0 ) ;
}
ret = virtio_crypto_alg_ablkcipher_init_sessions ( ctx , key , keylen ) ;
if ( ret ) {
virtcrypto_dev_put ( ctx - > vcrypto ) ;
ctx - > vcrypto = NULL ;
return ret ;
}
return 0 ;
}
static int
2017-06-23 11:31:19 -04:00
__virtio_crypto_ablkcipher_do_req ( struct virtio_crypto_sym_request * vc_sym_req ,
2016-12-15 10:03:16 +08:00
struct ablkcipher_request * req ,
2016-12-27 14:49:07 +08:00
struct data_queue * data_vq )
2016-12-15 10:03:16 +08:00
{
struct crypto_ablkcipher * tfm = crypto_ablkcipher_reqtfm ( req ) ;
2017-06-23 11:31:19 -04:00
struct virtio_crypto_ablkcipher_ctx * ctx = vc_sym_req - > ablkcipher_ctx ;
struct virtio_crypto_request * vc_req = & vc_sym_req - > base ;
2016-12-15 10:03:16 +08:00
unsigned int ivsize = crypto_ablkcipher_ivsize ( tfm ) ;
struct virtio_crypto * vcrypto = ctx - > vcrypto ;
struct virtio_crypto_op_data_req * req_data ;
int src_nents , dst_nents ;
int err ;
unsigned long flags ;
struct scatterlist outhdr , iv_sg , status_sg , * * sgs ;
int i ;
u64 dst_len ;
unsigned int num_out = 0 , num_in = 0 ;
int sg_total ;
uint8_t * iv ;
src_nents = sg_nents_for_len ( req - > src , req - > nbytes ) ;
dst_nents = sg_nents ( req - > dst ) ;
pr_debug ( " virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d) \n " ,
src_nents , dst_nents ) ;
/* Why 3? outhdr + iv + inhdr */
sg_total = src_nents + dst_nents + 3 ;
treewide: kzalloc_node() -> kcalloc_node()
The kzalloc_node() function has a 2-factor argument form, kcalloc_node(). This
patch replaces cases of:
kzalloc_node(a * b, gfp, node)
with:
kcalloc_node(a * b, gfp, node)
as well as handling cases of:
kzalloc_node(a * b * c, gfp, node)
with:
kzalloc_node(array3_size(a, b, c), gfp, node)
as it's slightly less ugly than:
kcalloc_node(array_size(a, b), c, gfp, node)
This does, however, attempt to ignore constant size factors like:
kzalloc_node(4 * 1024, gfp, node)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc_node(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc_node(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc_node(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc_node
+ kcalloc_node
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc_node(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc_node(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc_node(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc_node(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc_node(C1 * C2 * C3, ...)
|
kzalloc_node(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc_node(sizeof(THING) * C2, ...)
|
kzalloc_node(sizeof(TYPE) * C2, ...)
|
kzalloc_node(C1 * C2 * C3, ...)
|
kzalloc_node(C1 * C2, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:04:20 -07:00
sgs = kcalloc_node ( sg_total , sizeof ( * sgs ) , GFP_ATOMIC ,
2016-12-15 10:03:16 +08:00
dev_to_node ( & vcrypto - > vdev - > dev ) ) ;
if ( ! sgs )
return - ENOMEM ;
req_data = kzalloc_node ( sizeof ( * req_data ) , GFP_ATOMIC ,
dev_to_node ( & vcrypto - > vdev - > dev ) ) ;
if ( ! req_data ) {
kfree ( sgs ) ;
return - ENOMEM ;
}
vc_req - > req_data = req_data ;
2017-06-23 11:31:19 -04:00
vc_sym_req - > type = VIRTIO_CRYPTO_SYM_OP_CIPHER ;
2016-12-15 10:03:16 +08:00
/* Head of operation */
2017-06-23 11:31:19 -04:00
if ( vc_sym_req - > encrypt ) {
2016-12-15 10:03:16 +08:00
req_data - > header . session_id =
cpu_to_le64 ( ctx - > enc_sess_info . session_id ) ;
req_data - > header . opcode =
cpu_to_le32 ( VIRTIO_CRYPTO_CIPHER_ENCRYPT ) ;
} else {
req_data - > header . session_id =
cpu_to_le64 ( ctx - > dec_sess_info . session_id ) ;
req_data - > header . opcode =
cpu_to_le32 ( VIRTIO_CRYPTO_CIPHER_DECRYPT ) ;
}
req_data - > u . sym_req . op_type = cpu_to_le32 ( VIRTIO_CRYPTO_SYM_OP_CIPHER ) ;
req_data - > u . sym_req . u . cipher . para . iv_len = cpu_to_le32 ( ivsize ) ;
req_data - > u . sym_req . u . cipher . para . src_data_len =
cpu_to_le32 ( req - > nbytes ) ;
dst_len = virtio_crypto_alg_sg_nents_length ( req - > dst ) ;
if ( unlikely ( dst_len > U32_MAX ) ) {
pr_err ( " virtio_crypto: The dst_len is beyond U32_MAX \n " ) ;
err = - EINVAL ;
goto free ;
}
pr_debug ( " virtio_crypto: src_len: %u, dst_len: %llu \n " ,
req - > nbytes , dst_len ) ;
if ( unlikely ( req - > nbytes + dst_len + ivsize +
sizeof ( vc_req - > status ) > vcrypto - > max_size ) ) {
pr_err ( " virtio_crypto: The length is too big \n " ) ;
err = - EINVAL ;
goto free ;
}
req_data - > u . sym_req . u . cipher . para . dst_data_len =
cpu_to_le32 ( ( uint32_t ) dst_len ) ;
/* Outhdr */
sg_init_one ( & outhdr , req_data , sizeof ( * req_data ) ) ;
sgs [ num_out + + ] = & outhdr ;
/* IV */
/*
* Avoid to do DMA from the stack , switch to using
* dynamically - allocated for the IV
*/
iv = kzalloc_node ( ivsize , GFP_ATOMIC ,
dev_to_node ( & vcrypto - > vdev - > dev ) ) ;
if ( ! iv ) {
err = - ENOMEM ;
goto free ;
}
memcpy ( iv , req - > info , ivsize ) ;
sg_init_one ( & iv_sg , iv , ivsize ) ;
sgs [ num_out + + ] = & iv_sg ;
2017-06-23 11:31:19 -04:00
vc_sym_req - > iv = iv ;
2016-12-15 10:03:16 +08:00
/* Source data */
for ( i = 0 ; i < src_nents ; i + + )
sgs [ num_out + + ] = & req - > src [ i ] ;
/* Destination data */
for ( i = 0 ; i < dst_nents ; i + + )
sgs [ num_out + num_in + + ] = & req - > dst [ i ] ;
/* Status */
sg_init_one ( & status_sg , & vc_req - > status , sizeof ( vc_req - > status ) ) ;
sgs [ num_out + num_in + + ] = & status_sg ;
vc_req - > sgs = sgs ;
spin_lock_irqsave ( & data_vq - > lock , flags ) ;
err = virtqueue_add_sgs ( data_vq - > vq , sgs , num_out ,
num_in , vc_req , GFP_ATOMIC ) ;
virtqueue_kick ( data_vq - > vq ) ;
spin_unlock_irqrestore ( & data_vq - > lock , flags ) ;
if ( unlikely ( err < 0 ) )
goto free_iv ;
return 0 ;
free_iv :
kzfree ( iv ) ;
free :
kzfree ( req_data ) ;
kfree ( sgs ) ;
return err ;
}
static int virtio_crypto_ablkcipher_encrypt ( struct ablkcipher_request * req )
{
struct crypto_ablkcipher * atfm = crypto_ablkcipher_reqtfm ( req ) ;
struct virtio_crypto_ablkcipher_ctx * ctx = crypto_ablkcipher_ctx ( atfm ) ;
2017-06-23 11:31:19 -04:00
struct virtio_crypto_sym_request * vc_sym_req =
ablkcipher_request_ctx ( req ) ;
struct virtio_crypto_request * vc_req = & vc_sym_req - > base ;
2016-12-15 10:03:16 +08:00
struct virtio_crypto * vcrypto = ctx - > vcrypto ;
/* Use the first data virtqueue as default */
struct data_queue * data_vq = & vcrypto - > data_vq [ 0 ] ;
2016-12-27 14:49:07 +08:00
vc_req - > dataq = data_vq ;
2017-06-23 11:31:19 -04:00
vc_req - > alg_cb = virtio_crypto_dataq_sym_callback ;
vc_sym_req - > ablkcipher_ctx = ctx ;
vc_sym_req - > ablkcipher_req = req ;
vc_sym_req - > encrypt = true ;
2016-12-15 10:03:16 +08:00
2018-01-26 20:15:32 +01:00
return crypto_transfer_ablkcipher_request_to_engine ( data_vq - > engine , req ) ;
2016-12-15 10:03:16 +08:00
}
static int virtio_crypto_ablkcipher_decrypt ( struct ablkcipher_request * req )
{
struct crypto_ablkcipher * atfm = crypto_ablkcipher_reqtfm ( req ) ;
struct virtio_crypto_ablkcipher_ctx * ctx = crypto_ablkcipher_ctx ( atfm ) ;
2017-06-23 11:31:19 -04:00
struct virtio_crypto_sym_request * vc_sym_req =
ablkcipher_request_ctx ( req ) ;
struct virtio_crypto_request * vc_req = & vc_sym_req - > base ;
2016-12-15 10:03:16 +08:00
struct virtio_crypto * vcrypto = ctx - > vcrypto ;
/* Use the first data virtqueue as default */
struct data_queue * data_vq = & vcrypto - > data_vq [ 0 ] ;
2016-12-27 14:49:07 +08:00
vc_req - > dataq = data_vq ;
2017-06-23 11:31:19 -04:00
vc_req - > alg_cb = virtio_crypto_dataq_sym_callback ;
vc_sym_req - > ablkcipher_ctx = ctx ;
vc_sym_req - > ablkcipher_req = req ;
vc_sym_req - > encrypt = false ;
2016-12-15 10:03:16 +08:00
2018-01-26 20:15:32 +01:00
return crypto_transfer_ablkcipher_request_to_engine ( data_vq - > engine , req ) ;
2016-12-15 10:03:16 +08:00
}
static int virtio_crypto_ablkcipher_init ( struct crypto_tfm * tfm )
{
struct virtio_crypto_ablkcipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2017-06-23 11:31:19 -04:00
tfm - > crt_ablkcipher . reqsize = sizeof ( struct virtio_crypto_sym_request ) ;
2016-12-15 10:03:16 +08:00
ctx - > tfm = tfm ;
2018-01-26 20:15:32 +01:00
ctx - > enginectx . op . do_one_request = virtio_crypto_ablkcipher_crypt_req ;
ctx - > enginectx . op . prepare_request = NULL ;
ctx - > enginectx . op . unprepare_request = NULL ;
2016-12-15 10:03:16 +08:00
return 0 ;
}
static void virtio_crypto_ablkcipher_exit ( struct crypto_tfm * tfm )
{
struct virtio_crypto_ablkcipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
if ( ! ctx - > vcrypto )
return ;
virtio_crypto_alg_ablkcipher_close_session ( ctx , 1 ) ;
virtio_crypto_alg_ablkcipher_close_session ( ctx , 0 ) ;
virtcrypto_dev_put ( ctx - > vcrypto ) ;
ctx - > vcrypto = NULL ;
}
2016-12-27 14:49:07 +08:00
int virtio_crypto_ablkcipher_crypt_req (
2018-01-26 20:15:32 +01:00
struct crypto_engine * engine , void * vreq )
2016-12-27 14:49:07 +08:00
{
2018-01-26 20:15:32 +01:00
struct ablkcipher_request * req = container_of ( vreq , struct ablkcipher_request , base ) ;
2017-06-23 11:31:19 -04:00
struct virtio_crypto_sym_request * vc_sym_req =
ablkcipher_request_ctx ( req ) ;
struct virtio_crypto_request * vc_req = & vc_sym_req - > base ;
2016-12-27 14:49:07 +08:00
struct data_queue * data_vq = vc_req - > dataq ;
int ret ;
2017-06-23 11:31:19 -04:00
ret = __virtio_crypto_ablkcipher_do_req ( vc_sym_req , req , data_vq ) ;
2016-12-27 14:49:07 +08:00
if ( ret < 0 )
return ret ;
virtqueue_kick ( data_vq - > vq ) ;
return 0 ;
}
2017-06-23 11:31:19 -04:00
static void virtio_crypto_ablkcipher_finalize_req (
struct virtio_crypto_sym_request * vc_sym_req ,
2016-12-27 14:49:07 +08:00
struct ablkcipher_request * req ,
int err )
{
2018-01-26 20:15:32 +01:00
crypto_finalize_ablkcipher_request ( vc_sym_req - > base . dataq - > engine ,
req , err ) ;
2017-06-23 11:31:19 -04:00
kzfree ( vc_sym_req - > iv ) ;
virtcrypto_clear_request ( & vc_sym_req - > base ) ;
2016-12-27 14:49:07 +08:00
}
2016-12-15 10:03:16 +08:00
static struct crypto_alg virtio_crypto_algs [ ] = { {
. cra_name = " cbc(aes) " ,
. cra_driver_name = " virtio_crypto_aes_cbc " ,
2017-01-13 17:34:16 +08:00
. cra_priority = 150 ,
2016-12-15 10:03:16 +08:00
. cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct virtio_crypto_ablkcipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
. cra_type = & crypto_ablkcipher_type ,
. cra_init = virtio_crypto_ablkcipher_init ,
. cra_exit = virtio_crypto_ablkcipher_exit ,
. cra_u = {
. ablkcipher = {
. setkey = virtio_crypto_ablkcipher_setkey ,
. decrypt = virtio_crypto_ablkcipher_decrypt ,
. encrypt = virtio_crypto_ablkcipher_encrypt ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
} ,
} ,
} } ;
int virtio_crypto_algs_register ( void )
{
int ret = 0 ;
mutex_lock ( & algs_lock ) ;
if ( + + virtio_crypto_active_devs ! = 1 )
goto unlock ;
ret = crypto_register_algs ( virtio_crypto_algs ,
ARRAY_SIZE ( virtio_crypto_algs ) ) ;
if ( ret )
virtio_crypto_active_devs - - ;
unlock :
mutex_unlock ( & algs_lock ) ;
return ret ;
}
void virtio_crypto_algs_unregister ( void )
{
mutex_lock ( & algs_lock ) ;
if ( - - virtio_crypto_active_devs ! = 0 )
goto unlock ;
crypto_unregister_algs ( virtio_crypto_algs ,
ARRAY_SIZE ( virtio_crypto_algs ) ) ;
unlock :
mutex_unlock ( & algs_lock ) ;
}