2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-11-12 21:46:22 +04:00
/*
* AMD Cryptographic Coprocessor ( CCP ) crypto API support
*
2017-07-17 23:00:49 +03:00
* Copyright ( C ) 2013 , 2017 Advanced Micro Devices , Inc .
2013-11-12 21:46:22 +04:00
*
* Author : Tom Lendacky < thomas . lendacky @ amd . com >
*/
# include <linux/module.h>
2014-01-25 02:17:56 +04:00
# include <linux/moduleparam.h>
2013-11-12 21:46:22 +04:00
# include <linux/kernel.h>
# include <linux/list.h>
# include <linux/ccp.h>
# include <linux/scatterlist.h>
# include <crypto/internal/hash.h>
2017-07-17 23:16:32 +03:00
# include <crypto/internal/akcipher.h>
2013-11-12 21:46:22 +04:00
# include "ccp-crypto.h"
MODULE_AUTHOR ( " Tom Lendacky <thomas.lendacky@amd.com> " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_VERSION ( " 1.0.0 " ) ;
MODULE_DESCRIPTION ( " AMD Cryptographic Coprocessor crypto API support " ) ;
2014-01-25 02:17:56 +04:00
static unsigned int aes_disable ;
module_param ( aes_disable , uint , 0444 ) ;
MODULE_PARM_DESC ( aes_disable , " Disable use of AES - any non-zero value " ) ;
static unsigned int sha_disable ;
module_param ( sha_disable , uint , 0444 ) ;
MODULE_PARM_DESC ( sha_disable , " Disable use of SHA - any non-zero value " ) ;
2017-03-15 21:20:52 +03:00
static unsigned int des3_disable ;
module_param ( des3_disable , uint , 0444 ) ;
MODULE_PARM_DESC ( des3_disable , " Disable use of 3DES - any non-zero value " ) ;
2017-07-17 23:16:32 +03:00
static unsigned int rsa_disable ;
module_param ( rsa_disable , uint , 0444 ) ;
MODULE_PARM_DESC ( rsa_disable , " Disable use of RSA - any non-zero value " ) ;
2013-11-12 21:46:22 +04:00
/* List heads for the supported algorithms */
static LIST_HEAD ( hash_algs ) ;
2019-11-09 20:09:29 +03:00
static LIST_HEAD ( skcipher_algs ) ;
2017-03-15 21:21:01 +03:00
static LIST_HEAD ( aead_algs ) ;
2017-07-17 23:16:32 +03:00
static LIST_HEAD ( akcipher_algs ) ;
2013-11-12 21:46:22 +04:00
2014-01-25 02:18:08 +04:00
/* For any tfm, requests for that tfm must be returned on the order
* received . With multiple queues available , the CCP can process more
* than one cmd at a time . Therefore we must maintain a cmd list to insure
* the proper ordering of requests on a given tfm .
2013-11-12 21:46:22 +04:00
*/
2014-01-25 02:18:08 +04:00
struct ccp_crypto_queue {
2013-11-12 21:46:22 +04:00
struct list_head cmds ;
struct list_head * backlog ;
unsigned int cmd_count ;
} ;
2015-02-03 22:07:05 +03:00
2014-01-25 02:18:08 +04:00
# define CCP_CRYPTO_MAX_QLEN 100
2013-11-12 21:46:22 +04:00
2014-01-25 02:18:08 +04:00
static struct ccp_crypto_queue req_queue ;
2021-03-31 05:00:55 +03:00
static DEFINE_SPINLOCK ( req_queue_lock ) ;
2013-11-12 21:46:22 +04:00
struct ccp_crypto_cmd {
struct list_head entry ;
struct ccp_cmd * cmd ;
/* Save the crypto_tfm and crypto_async_request addresses
* separately to avoid any reference to a possibly invalid
* crypto_async_request structure after invoking the request
* callback
*/
struct crypto_async_request * req ;
struct crypto_tfm * tfm ;
/* Used for held command processing to determine state */
int ret ;
} ;
struct ccp_crypto_cpu {
struct work_struct work ;
struct completion completion ;
struct ccp_crypto_cmd * crypto_cmd ;
int err ;
} ;
static inline bool ccp_crypto_success ( int err )
{
if ( err & & ( err ! = - EINPROGRESS ) & & ( err ! = - EBUSY ) )
return false ;
return true ;
}
static struct ccp_crypto_cmd * ccp_crypto_cmd_complete (
struct ccp_crypto_cmd * crypto_cmd , struct ccp_crypto_cmd * * backlog )
{
struct ccp_crypto_cmd * held = NULL , * tmp ;
2014-01-25 02:18:08 +04:00
unsigned long flags ;
2013-11-12 21:46:22 +04:00
* backlog = NULL ;
2014-01-25 02:18:08 +04:00
spin_lock_irqsave ( & req_queue_lock , flags ) ;
2013-11-12 21:46:22 +04:00
/* Held cmds will be after the current cmd in the queue so start
* searching for a cmd with a matching tfm for submission .
*/
tmp = crypto_cmd ;
2014-01-25 02:18:08 +04:00
list_for_each_entry_continue ( tmp , & req_queue . cmds , entry ) {
2013-11-12 21:46:22 +04:00
if ( crypto_cmd - > tfm ! = tmp - > tfm )
continue ;
held = tmp ;
break ;
}
/* Process the backlog:
* Because cmds can be executed from any point in the cmd list
* special precautions have to be taken when handling the backlog .
*/
2014-01-25 02:18:08 +04:00
if ( req_queue . backlog ! = & req_queue . cmds ) {
2013-11-12 21:46:22 +04:00
/* Skip over this cmd if it is the next backlog cmd */
2014-01-25 02:18:08 +04:00
if ( req_queue . backlog = = & crypto_cmd - > entry )
req_queue . backlog = crypto_cmd - > entry . next ;
2013-11-12 21:46:22 +04:00
2014-01-25 02:18:08 +04:00
* backlog = container_of ( req_queue . backlog ,
2013-11-12 21:46:22 +04:00
struct ccp_crypto_cmd , entry ) ;
2014-01-25 02:18:08 +04:00
req_queue . backlog = req_queue . backlog - > next ;
2013-11-12 21:46:22 +04:00
/* Skip over this cmd if it is now the next backlog cmd */
2014-01-25 02:18:08 +04:00
if ( req_queue . backlog = = & crypto_cmd - > entry )
req_queue . backlog = crypto_cmd - > entry . next ;
2013-11-12 21:46:22 +04:00
}
/* Remove the cmd entry from the list of cmds */
2014-01-25 02:18:08 +04:00
req_queue . cmd_count - - ;
2013-11-12 21:46:22 +04:00
list_del ( & crypto_cmd - > entry ) ;
2014-01-25 02:18:08 +04:00
spin_unlock_irqrestore ( & req_queue_lock , flags ) ;
2013-11-12 21:46:22 +04:00
return held ;
}
2014-01-25 02:18:08 +04:00
static void ccp_crypto_complete ( void * data , int err )
2013-11-12 21:46:22 +04:00
{
2014-01-25 02:18:08 +04:00
struct ccp_crypto_cmd * crypto_cmd = data ;
2013-11-12 21:46:22 +04:00
struct ccp_crypto_cmd * held , * next , * backlog ;
struct crypto_async_request * req = crypto_cmd - > req ;
struct ccp_ctx * ctx = crypto_tfm_ctx ( req - > tfm ) ;
2014-01-25 02:18:08 +04:00
int ret ;
2013-11-12 21:46:22 +04:00
2014-01-25 02:18:08 +04:00
if ( err = = - EINPROGRESS ) {
2015-02-03 22:07:05 +03:00
/* Only propagate the -EINPROGRESS if necessary */
2013-11-12 21:46:22 +04:00
if ( crypto_cmd - > ret = = - EBUSY ) {
crypto_cmd - > ret = - EINPROGRESS ;
req - > complete ( req , - EINPROGRESS ) ;
}
2014-01-25 02:18:08 +04:00
return ;
2013-11-12 21:46:22 +04:00
}
/* Operation has completed - update the queue before invoking
* the completion callbacks and retrieve the next cmd ( cmd with
* a matching tfm ) that can be submitted to the CCP .
*/
held = ccp_crypto_cmd_complete ( crypto_cmd , & backlog ) ;
if ( backlog ) {
backlog - > ret = - EINPROGRESS ;
backlog - > req - > complete ( backlog - > req , - EINPROGRESS ) ;
}
/* Transition the state from -EBUSY to -EINPROGRESS first */
if ( crypto_cmd - > ret = = - EBUSY )
req - > complete ( req , - EINPROGRESS ) ;
/* Completion callbacks */
2014-01-25 02:18:08 +04:00
ret = err ;
2013-11-12 21:46:22 +04:00
if ( ctx - > complete )
ret = ctx - > complete ( req , ret ) ;
req - > complete ( req , ret ) ;
/* Submit the next cmd */
while ( held ) {
2014-02-24 18:42:02 +04:00
/* Since we have already queued the cmd, we must indicate that
* we can backlog so as not to " lose " this request .
*/
held - > cmd - > flags | = CCP_CMD_MAY_BACKLOG ;
2013-11-12 21:46:22 +04:00
ret = ccp_enqueue_cmd ( held - > cmd ) ;
if ( ccp_crypto_success ( ret ) )
break ;
/* Error occurred, report it and get the next entry */
2014-02-24 18:42:08 +04:00
ctx = crypto_tfm_ctx ( held - > req - > tfm ) ;
if ( ctx - > complete )
ret = ctx - > complete ( held - > req , ret ) ;
2013-11-12 21:46:22 +04:00
held - > req - > complete ( held - > req , ret ) ;
next = ccp_crypto_cmd_complete ( held , & backlog ) ;
if ( backlog ) {
backlog - > ret = - EINPROGRESS ;
backlog - > req - > complete ( backlog - > req , - EINPROGRESS ) ;
}
kfree ( held ) ;
held = next ;
}
kfree ( crypto_cmd ) ;
}
static int ccp_crypto_enqueue_cmd ( struct ccp_crypto_cmd * crypto_cmd )
{
struct ccp_crypto_cmd * active = NULL , * tmp ;
2014-01-25 02:18:08 +04:00
unsigned long flags ;
2014-02-24 18:42:14 +04:00
bool free_cmd = true ;
2014-01-25 02:18:08 +04:00
int ret ;
2013-11-12 21:46:22 +04:00
2014-01-25 02:18:08 +04:00
spin_lock_irqsave ( & req_queue_lock , flags ) ;
2013-11-12 21:46:22 +04:00
/* Check if the cmd can/should be queued */
2014-01-25 02:18:08 +04:00
if ( req_queue . cmd_count > = CCP_CRYPTO_MAX_QLEN ) {
2017-10-18 10:00:34 +03:00
if ( ! ( crypto_cmd - > cmd - > flags & CCP_CMD_MAY_BACKLOG ) ) {
ret = - ENOSPC ;
2014-01-25 02:18:08 +04:00
goto e_lock ;
2017-10-18 10:00:34 +03:00
}
2013-11-12 21:46:22 +04:00
}
/* Look for an entry with the same tfm. If there is a cmd
2014-01-25 02:18:08 +04:00
* with the same tfm in the list then the current cmd cannot
* be submitted to the CCP yet .
2013-11-12 21:46:22 +04:00
*/
2014-01-25 02:18:08 +04:00
list_for_each_entry ( tmp , & req_queue . cmds , entry ) {
2013-11-12 21:46:22 +04:00
if ( crypto_cmd - > tfm ! = tmp - > tfm )
continue ;
active = tmp ;
break ;
}
ret = - EINPROGRESS ;
if ( ! active ) {
ret = ccp_enqueue_cmd ( crypto_cmd - > cmd ) ;
if ( ! ccp_crypto_success ( ret ) )
2014-02-24 18:42:14 +04:00
goto e_lock ; /* Error, don't queue it */
2013-11-12 21:46:22 +04:00
}
2014-01-25 02:18:08 +04:00
if ( req_queue . cmd_count > = CCP_CRYPTO_MAX_QLEN ) {
2013-11-12 21:46:22 +04:00
ret = - EBUSY ;
2014-01-25 02:18:08 +04:00
if ( req_queue . backlog = = & req_queue . cmds )
req_queue . backlog = & crypto_cmd - > entry ;
2013-11-12 21:46:22 +04:00
}
crypto_cmd - > ret = ret ;
2014-01-25 02:18:08 +04:00
req_queue . cmd_count + + ;
list_add_tail ( & crypto_cmd - > entry , & req_queue . cmds ) ;
2013-11-12 21:46:22 +04:00
2014-02-24 18:42:14 +04:00
free_cmd = false ;
2014-01-25 02:18:08 +04:00
e_lock :
spin_unlock_irqrestore ( & req_queue_lock , flags ) ;
2013-11-12 21:46:22 +04:00
2014-02-24 18:42:14 +04:00
if ( free_cmd )
kfree ( crypto_cmd ) ;
2013-11-12 21:46:22 +04:00
return ret ;
}
/**
* ccp_crypto_enqueue_request - queue an crypto async request for processing
* by the CCP
*
* @ req : crypto_async_request struct to be processed
* @ cmd : ccp_cmd struct to be sent to the CCP
*/
int ccp_crypto_enqueue_request ( struct crypto_async_request * req ,
struct ccp_cmd * cmd )
{
struct ccp_crypto_cmd * crypto_cmd ;
gfp_t gfp ;
gfp = req - > flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC ;
crypto_cmd = kzalloc ( sizeof ( * crypto_cmd ) , gfp ) ;
if ( ! crypto_cmd )
return - ENOMEM ;
/* The tfm pointer must be saved and not referenced from the
* crypto_async_request ( req ) pointer because it is used after
* completion callback for the request and the req pointer
* might not be valid anymore .
*/
crypto_cmd - > cmd = cmd ;
crypto_cmd - > req = req ;
crypto_cmd - > tfm = req - > tfm ;
cmd - > callback = ccp_crypto_complete ;
cmd - > data = crypto_cmd ;
if ( req - > flags & CRYPTO_TFM_REQ_MAY_BACKLOG )
cmd - > flags | = CCP_CMD_MAY_BACKLOG ;
else
cmd - > flags & = ~ CCP_CMD_MAY_BACKLOG ;
2014-02-24 18:42:14 +04:00
return ccp_crypto_enqueue_cmd ( crypto_cmd ) ;
2013-11-12 21:46:22 +04:00
}
struct scatterlist * ccp_crypto_sg_table_add ( struct sg_table * table ,
struct scatterlist * sg_add )
{
struct scatterlist * sg , * sg_last = NULL ;
for ( sg = table - > sgl ; sg ; sg = sg_next ( sg ) )
if ( ! sg_page ( sg ) )
break ;
2015-10-02 00:32:31 +03:00
if ( WARN_ON ( ! sg ) )
return NULL ;
2013-11-12 21:46:22 +04:00
for ( ; sg & & sg_add ; sg = sg_next ( sg ) , sg_add = sg_next ( sg_add ) ) {
sg_set_page ( sg , sg_page ( sg_add ) , sg_add - > length ,
sg_add - > offset ) ;
sg_last = sg ;
}
2015-10-02 00:32:31 +03:00
if ( WARN_ON ( sg_add ) )
return NULL ;
2013-11-12 21:46:22 +04:00
return sg_last ;
}
static int ccp_register_algs ( void )
{
int ret ;
2014-01-25 02:17:56 +04:00
if ( ! aes_disable ) {
2019-11-09 20:09:29 +03:00
ret = ccp_register_aes_algs ( & skcipher_algs ) ;
2014-01-25 02:17:56 +04:00
if ( ret )
return ret ;
2013-11-12 21:46:22 +04:00
2014-01-25 02:17:56 +04:00
ret = ccp_register_aes_cmac_algs ( & hash_algs ) ;
if ( ret )
return ret ;
2013-11-12 21:46:22 +04:00
2019-11-09 20:09:29 +03:00
ret = ccp_register_aes_xts_algs ( & skcipher_algs ) ;
2014-01-25 02:17:56 +04:00
if ( ret )
return ret ;
2017-03-15 21:21:01 +03:00
ret = ccp_register_aes_aeads ( & aead_algs ) ;
if ( ret )
return ret ;
2014-01-25 02:17:56 +04:00
}
2017-03-15 21:20:52 +03:00
if ( ! des3_disable ) {
2019-11-09 20:09:29 +03:00
ret = ccp_register_des3_algs ( & skcipher_algs ) ;
2017-03-15 21:20:52 +03:00
if ( ret )
return ret ;
}
2013-11-12 21:46:22 +04:00
2014-01-25 02:17:56 +04:00
if ( ! sha_disable ) {
ret = ccp_register_sha_algs ( & hash_algs ) ;
if ( ret )
return ret ;
}
2013-11-12 21:46:22 +04:00
2017-07-17 23:16:32 +03:00
if ( ! rsa_disable ) {
ret = ccp_register_rsa_algs ( & akcipher_algs ) ;
if ( ret )
return ret ;
}
2013-11-12 21:46:22 +04:00
return 0 ;
}
static void ccp_unregister_algs ( void )
{
struct ccp_crypto_ahash_alg * ahash_alg , * ahash_tmp ;
2019-11-09 20:09:29 +03:00
struct ccp_crypto_skcipher_alg * ablk_alg , * ablk_tmp ;
2017-03-15 21:21:01 +03:00
struct ccp_crypto_aead * aead_alg , * aead_tmp ;
2017-07-17 23:16:32 +03:00
struct ccp_crypto_akcipher_alg * akc_alg , * akc_tmp ;
2013-11-12 21:46:22 +04:00
list_for_each_entry_safe ( ahash_alg , ahash_tmp , & hash_algs , entry ) {
crypto_unregister_ahash ( & ahash_alg - > alg ) ;
list_del ( & ahash_alg - > entry ) ;
kfree ( ahash_alg ) ;
}
2019-11-09 20:09:29 +03:00
list_for_each_entry_safe ( ablk_alg , ablk_tmp , & skcipher_algs , entry ) {
crypto_unregister_skcipher ( & ablk_alg - > alg ) ;
2013-11-12 21:46:22 +04:00
list_del ( & ablk_alg - > entry ) ;
kfree ( ablk_alg ) ;
}
2017-03-15 21:21:01 +03:00
list_for_each_entry_safe ( aead_alg , aead_tmp , & aead_algs , entry ) {
crypto_unregister_aead ( & aead_alg - > alg ) ;
list_del ( & aead_alg - > entry ) ;
kfree ( aead_alg ) ;
}
2017-07-17 23:16:32 +03:00
list_for_each_entry_safe ( akc_alg , akc_tmp , & akcipher_algs , entry ) {
crypto_unregister_akcipher ( & akc_alg - > alg ) ;
list_del ( & akc_alg - > entry ) ;
kfree ( akc_alg ) ;
}
2013-11-12 21:46:22 +04:00
}
static int ccp_crypto_init ( void )
{
int ret ;
2014-09-05 19:31:09 +04:00
ret = ccp_present ( ) ;
2019-07-29 15:56:08 +03:00
if ( ret ) {
pr_err ( " Cannot load: there are no available CCPs \n " ) ;
2014-09-05 19:31:09 +04:00
return ret ;
2019-07-29 15:56:08 +03:00
}
2014-09-05 19:31:09 +04:00
2014-01-25 02:18:08 +04:00
INIT_LIST_HEAD ( & req_queue . cmds ) ;
req_queue . backlog = & req_queue . cmds ;
req_queue . cmd_count = 0 ;
2013-11-12 21:46:22 +04:00
ret = ccp_register_algs ( ) ;
2014-01-25 02:18:08 +04:00
if ( ret )
2013-11-12 21:46:22 +04:00
ccp_unregister_algs ( ) ;
return ret ;
}
static void ccp_crypto_exit ( void )
{
ccp_unregister_algs ( ) ;
}
module_init ( ccp_crypto_init ) ;
module_exit ( ccp_crypto_exit ) ;