Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.15: API: - Disambiguate EBUSY when queueing crypto request by adding ENOSPC. This change touches code outside the crypto API. - Reset settings when empty string is written to rng_current. Algorithms: - Add OSCCA SM3 secure hash. Drivers: - Remove old mv_cesa driver (replaced by marvell/cesa). - Enable rfc3686/ecb/cfb/ofb AES in crypto4xx. - Add ccm/gcm AES in crypto4xx. - Add support for BCM7278 in iproc-rng200. - Add hash support on Exynos in s5p-sss. - Fix fallback-induced error in vmx. - Fix output IV in atmel-aes. - Fix empty GCM hash in mediatek. Others: - Fix DoS potential in lib/mpi. - Fix potential out-of-order issues with padata" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (162 commits) lib/mpi: call cond_resched() from mpi_powm() loop crypto: stm32/hash - Fix return issue on update crypto: dh - Remove pointless checks for NULL 'p' and 'g' crypto: qat - Clean up error handling in qat_dh_set_secret() crypto: dh - Don't permit 'key' or 'g' size longer than 'p' crypto: dh - Don't permit 'p' to be 0 crypto: dh - Fix double free of ctx->p hwrng: iproc-rng200 - Add support for BCM7278 dt-bindings: rng: Document BCM7278 RNG200 compatible crypto: chcr - Replace _manual_ swap with swap macro crypto: marvell - Add a NULL entry at the end of mv_cesa_plat_id_table[] hwrng: virtio - Virtio RNG devices need to be re-registered after suspend/resume crypto: atmel - remove empty functions crypto: ecdh - remove empty exit() MAINTAINERS: update maintainer for qat crypto: caam - remove unused param of ctx_map_to_sec4_sg() crypto: caam - remove unneeded edesc zeroization crypto: atmel-aes - Reset the controller before each use crypto: atmel-aes - properly set IV after {en,de}crypt hwrng: core - Reset user selected rng by writing "" to rng_current ...
This commit is contained in:
commit
37dc79565c
@ -7,59 +7,27 @@ Code Example For Symmetric Key Cipher Operation
|
||||
::
|
||||
|
||||
|
||||
struct tcrypt_result {
|
||||
struct completion completion;
|
||||
int err;
|
||||
};
|
||||
|
||||
/* tie all data structures together */
|
||||
struct skcipher_def {
|
||||
struct scatterlist sg;
|
||||
struct crypto_skcipher *tfm;
|
||||
struct skcipher_request *req;
|
||||
struct tcrypt_result result;
|
||||
struct crypto_wait wait;
|
||||
};
|
||||
|
||||
/* Callback function */
|
||||
static void test_skcipher_cb(struct crypto_async_request *req, int error)
|
||||
{
|
||||
struct tcrypt_result *result = req->data;
|
||||
|
||||
if (error == -EINPROGRESS)
|
||||
return;
|
||||
result->err = error;
|
||||
complete(&result->completion);
|
||||
pr_info("Encryption finished successfully\n");
|
||||
}
|
||||
|
||||
/* Perform cipher operation */
|
||||
static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
|
||||
int enc)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc;
|
||||
|
||||
if (enc)
|
||||
rc = crypto_skcipher_encrypt(sk->req);
|
||||
rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait);
|
||||
else
|
||||
rc = crypto_skcipher_decrypt(sk->req);
|
||||
rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait);
|
||||
|
||||
switch (rc) {
|
||||
case 0:
|
||||
break;
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
rc = wait_for_completion_interruptible(
|
||||
&sk->result.completion);
|
||||
if (!rc && !sk->result.err) {
|
||||
reinit_completion(&sk->result.completion);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
pr_info("skcipher encrypt returned with %d result %d\n",
|
||||
rc, sk->result.err);
|
||||
break;
|
||||
}
|
||||
init_completion(&sk->result.completion);
|
||||
if (rc)
|
||||
pr_info("skcipher encrypt returned with result %d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -89,8 +57,8 @@ Code Example For Symmetric Key Cipher Operation
|
||||
}
|
||||
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
test_skcipher_cb,
|
||||
&sk.result);
|
||||
crypto_req_done,
|
||||
&sk.wait);
|
||||
|
||||
/* AES 256 with random key */
|
||||
get_random_bytes(&key, 32);
|
||||
@ -122,7 +90,7 @@ Code Example For Symmetric Key Cipher Operation
|
||||
/* We encrypt one block */
|
||||
sg_init_one(&sk.sg, scratchpad, 16);
|
||||
skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata);
|
||||
init_completion(&sk.result.completion);
|
||||
crypto_init_wait(&sk.wait);
|
||||
|
||||
/* encrypt data */
|
||||
ret = test_skcipher_encdec(&sk, 1);
|
||||
|
@ -1,7 +1,9 @@
|
||||
HWRNG support for the iproc-rng200 driver
|
||||
|
||||
Required properties:
|
||||
- compatible : "brcm,iproc-rng200"
|
||||
- compatible : Must be one of:
|
||||
"brcm,bcm7278-rng200"
|
||||
"brcm,iproc-rng200"
|
||||
- reg : base address and size of control register block
|
||||
|
||||
Example:
|
||||
|
@ -5484,7 +5484,7 @@ F: include/uapi/linux/fb.h
|
||||
|
||||
FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER
|
||||
M: Horia Geantă <horia.geanta@nxp.com>
|
||||
M: Dan Douglass <dan.douglass@nxp.com>
|
||||
M: Aymen Sghaier <aymen.sghaier@nxp.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/crypto/caam/
|
||||
@ -11060,7 +11060,6 @@ F: drivers/mtd/nand/pxa3xx_nand.c
|
||||
|
||||
QAT DRIVER
|
||||
M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
|
||||
M: Salvatore Benedetto <salvatore.benedetto@intel.com>
|
||||
L: qat-linux@intel.com
|
||||
S: Supported
|
||||
F: drivers/crypto/qat/
|
||||
@ -11793,7 +11792,7 @@ L: linux-crypto@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/crypto/exynos-rng.c
|
||||
F: Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt
|
||||
F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
|
||||
|
||||
SAMSUNG FRAMEBUFFER DRIVER
|
||||
M: Jingoo Han <jingoohan1@gmail.com>
|
||||
|
@ -140,6 +140,6 @@ CONFIG_CRYPTO_TWOFISH=y
|
||||
CONFIG_CRYPTO_DEFLATE=y
|
||||
CONFIG_CRYPTO_LZO=y
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
CONFIG_CRYPTO_DEV_MV_CESA=y
|
||||
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
|
||||
CONFIG_CRC_CCITT=y
|
||||
CONFIG_LIBCRC32C=y
|
||||
|
@ -279,6 +279,6 @@ CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_USER=y
|
||||
CONFIG_CRYPTO_CBC=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_DEV_MV_CESA=y
|
||||
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
|
||||
CONFIG_CRC_CCITT=y
|
||||
CONFIG_LIBCRC32C=y
|
||||
|
@ -163,5 +163,5 @@ CONFIG_CRYPTO_CBC=m
|
||||
CONFIG_CRYPTO_ECB=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
CONFIG_CRYPTO_DEV_MV_CESA=y
|
||||
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
|
||||
CONFIG_CRC_T10DIF=y
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/fpu/api.h>
|
||||
@ -1067,9 +1068,10 @@ static struct skcipher_alg aesni_skciphers[] = {
|
||||
}
|
||||
};
|
||||
|
||||
static
|
||||
struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
|
||||
|
||||
struct {
|
||||
static struct {
|
||||
const char *algname;
|
||||
const char *drvname;
|
||||
const char *basename;
|
||||
@ -1131,7 +1133,7 @@ static struct aead_alg aesni_aead_algs[] = { {
|
||||
.setauthsize = common_rfc4106_set_authsize,
|
||||
.encrypt = helper_rfc4106_encrypt,
|
||||
.decrypt = helper_rfc4106_decrypt,
|
||||
.ivsize = 8,
|
||||
.ivsize = GCM_RFC4106_IV_SIZE,
|
||||
.maxauthsize = 16,
|
||||
.base = {
|
||||
.cra_name = "__gcm-aes-aesni",
|
||||
@ -1149,7 +1151,7 @@ static struct aead_alg aesni_aead_algs[] = { {
|
||||
.setauthsize = rfc4106_set_authsize,
|
||||
.encrypt = rfc4106_encrypt,
|
||||
.decrypt = rfc4106_decrypt,
|
||||
.ivsize = 8,
|
||||
.ivsize = GCM_RFC4106_IV_SIZE,
|
||||
.maxauthsize = 16,
|
||||
.base = {
|
||||
.cra_name = "rfc4106(gcm(aes))",
|
||||
@ -1165,7 +1167,7 @@ static struct aead_alg aesni_aead_algs[] = { {
|
||||
.setauthsize = generic_gcmaes_set_authsize,
|
||||
.encrypt = generic_gcmaes_encrypt,
|
||||
.decrypt = generic_gcmaes_decrypt,
|
||||
.ivsize = 12,
|
||||
.ivsize = GCM_AES_IV_SIZE,
|
||||
.maxauthsize = 16,
|
||||
.base = {
|
||||
.cra_name = "gcm(aes)",
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <asm/inst.h>
|
||||
|
||||
|
||||
.section .rodata
|
||||
.align 16
|
||||
/*
|
||||
* [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
|
||||
@ -111,19 +112,13 @@ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
|
||||
pxor CONSTANT, %xmm1
|
||||
sub $0x40, LEN
|
||||
add $0x40, BUF
|
||||
#ifndef __x86_64__
|
||||
/* This is for position independent code(-fPIC) support for 32bit */
|
||||
call delta
|
||||
delta:
|
||||
pop %ecx
|
||||
#endif
|
||||
cmp $0x40, LEN
|
||||
jb less_64
|
||||
|
||||
#ifdef __x86_64__
|
||||
movdqa .Lconstant_R2R1(%rip), CONSTANT
|
||||
#else
|
||||
movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT
|
||||
movdqa .Lconstant_R2R1, CONSTANT
|
||||
#endif
|
||||
|
||||
loop_64:/* 64 bytes Full cache line folding */
|
||||
@ -172,7 +167,7 @@ less_64:/* Folding cache line into 128bit */
|
||||
#ifdef __x86_64__
|
||||
movdqa .Lconstant_R4R3(%rip), CONSTANT
|
||||
#else
|
||||
movdqa .Lconstant_R4R3 - delta(%ecx), CONSTANT
|
||||
movdqa .Lconstant_R4R3, CONSTANT
|
||||
#endif
|
||||
prefetchnta (BUF)
|
||||
|
||||
@ -220,8 +215,8 @@ fold_64:
|
||||
movdqa .Lconstant_R5(%rip), CONSTANT
|
||||
movdqa .Lconstant_mask32(%rip), %xmm3
|
||||
#else
|
||||
movdqa .Lconstant_R5 - delta(%ecx), CONSTANT
|
||||
movdqa .Lconstant_mask32 - delta(%ecx), %xmm3
|
||||
movdqa .Lconstant_R5, CONSTANT
|
||||
movdqa .Lconstant_mask32, %xmm3
|
||||
#endif
|
||||
psrldq $0x04, %xmm2
|
||||
pand %xmm3, %xmm1
|
||||
@ -232,7 +227,7 @@ fold_64:
|
||||
#ifdef __x86_64__
|
||||
movdqa .Lconstant_RUpoly(%rip), CONSTANT
|
||||
#else
|
||||
movdqa .Lconstant_RUpoly - delta(%ecx), CONSTANT
|
||||
movdqa .Lconstant_RUpoly, CONSTANT
|
||||
#endif
|
||||
movdqa %xmm1, %xmm2
|
||||
pand %xmm3, %xmm1
|
||||
|
@ -860,6 +860,17 @@ config CRYPTO_SHA3
|
||||
References:
|
||||
http://keccak.noekeon.org/
|
||||
|
||||
config CRYPTO_SM3
|
||||
tristate "SM3 digest algorithm"
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
|
||||
It is part of the Chinese Commercial Cryptography suite.
|
||||
|
||||
References:
|
||||
http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
|
||||
https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash
|
||||
|
||||
config CRYPTO_TGR192
|
||||
tristate "Tiger digest algorithms"
|
||||
select CRYPTO_HASH
|
||||
|
@ -71,6 +71,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
|
||||
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
|
||||
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
|
||||
obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o
|
||||
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
|
||||
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
|
||||
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
|
||||
|
@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
|
||||
|
||||
int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
|
||||
{
|
||||
switch (err) {
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
wait_for_completion(&completion->completion);
|
||||
reinit_completion(&completion->completion);
|
||||
err = completion->err;
|
||||
break;
|
||||
};
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
|
||||
|
||||
void af_alg_complete(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct af_alg_completion *completion = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
completion->err = err;
|
||||
complete(&completion->completion);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_complete);
|
||||
|
||||
/**
|
||||
* af_alg_alloc_tsgl - allocate the TX SGL
|
||||
*
|
||||
|
@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req,
|
||||
return err;
|
||||
|
||||
err = op(req);
|
||||
if (err == -EINPROGRESS ||
|
||||
(err == -EBUSY && (ahash_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
|
||||
ahash_restore_req(req, err);
|
||||
@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
|
||||
req->base.complete = ahash_def_finup_done2;
|
||||
|
||||
err = crypto_ahash_reqtfm(req)->final(req);
|
||||
if (err == -EINPROGRESS ||
|
||||
(err == -EBUSY && (ahash_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
|
||||
out:
|
||||
@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req)
|
||||
return err;
|
||||
|
||||
err = tfm->update(req);
|
||||
if (err == -EINPROGRESS ||
|
||||
(err == -EBUSY && (ahash_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
|
||||
return ahash_def_finup_finish1(req, err);
|
||||
|
@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue,
|
||||
int err = -EINPROGRESS;
|
||||
|
||||
if (unlikely(queue->qlen >= queue->max_qlen)) {
|
||||
err = -EBUSY;
|
||||
if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||
if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
|
||||
err = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
err = -EBUSY;
|
||||
if (queue->backlog == &queue->list)
|
||||
queue->backlog = &request->list;
|
||||
}
|
||||
|
@ -122,7 +122,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
|
||||
int notnum = 0;
|
||||
|
||||
name = ++p;
|
||||
len = 0;
|
||||
|
||||
for (; isalnum(*p) || *p == '-' || *p == '_'; p++)
|
||||
notnum |= !isdigit(*p);
|
||||
|
@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
/* Synchronous operation */
|
||||
aead_request_set_callback(&areq->cra_u.aead_req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
af_alg_complete, &ctx->completion);
|
||||
err = af_alg_wait_for_completion(ctx->enc ?
|
||||
crypto_req_done, &ctx->wait);
|
||||
err = crypto_wait_req(ctx->enc ?
|
||||
crypto_aead_encrypt(&areq->cra_u.aead_req) :
|
||||
crypto_aead_decrypt(&areq->cra_u.aead_req),
|
||||
&ctx->completion);
|
||||
&ctx->wait);
|
||||
}
|
||||
|
||||
/* AIO operation in progress */
|
||||
@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
|
||||
ctx->merge = 0;
|
||||
ctx->enc = 0;
|
||||
ctx->aead_assoclen = 0;
|
||||
af_alg_init_completion(&ctx->completion);
|
||||
crypto_init_wait(&ctx->wait);
|
||||
|
||||
ask->private = ctx;
|
||||
|
||||
|
@ -26,7 +26,7 @@ struct hash_ctx {
|
||||
|
||||
u8 *result;
|
||||
|
||||
struct af_alg_completion completion;
|
||||
struct crypto_wait wait;
|
||||
|
||||
unsigned int len;
|
||||
bool more;
|
||||
@ -88,8 +88,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
if ((msg->msg_flags & MSG_MORE))
|
||||
hash_free_result(sk, ctx);
|
||||
|
||||
err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
|
||||
&ctx->completion);
|
||||
err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
@ -110,8 +109,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
|
||||
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
|
||||
|
||||
err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
|
||||
&ctx->completion);
|
||||
err = crypto_wait_req(crypto_ahash_update(&ctx->req),
|
||||
&ctx->wait);
|
||||
af_alg_free_sg(&ctx->sgl);
|
||||
if (err)
|
||||
goto unlock;
|
||||
@ -129,8 +128,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
goto unlock;
|
||||
|
||||
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
|
||||
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
|
||||
&ctx->completion);
|
||||
err = crypto_wait_req(crypto_ahash_final(&ctx->req),
|
||||
&ctx->wait);
|
||||
}
|
||||
|
||||
unlock:
|
||||
@ -171,7 +170,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
|
||||
} else {
|
||||
if (!ctx->more) {
|
||||
err = crypto_ahash_init(&ctx->req);
|
||||
err = af_alg_wait_for_completion(err, &ctx->completion);
|
||||
err = crypto_wait_req(err, &ctx->wait);
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
@ -179,7 +178,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
|
||||
err = crypto_ahash_update(&ctx->req);
|
||||
}
|
||||
|
||||
err = af_alg_wait_for_completion(err, &ctx->completion);
|
||||
err = crypto_wait_req(err, &ctx->wait);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
@ -215,17 +214,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
|
||||
|
||||
if (!result && !ctx->more) {
|
||||
err = af_alg_wait_for_completion(
|
||||
crypto_ahash_init(&ctx->req),
|
||||
&ctx->completion);
|
||||
err = crypto_wait_req(crypto_ahash_init(&ctx->req),
|
||||
&ctx->wait);
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!result || ctx->more) {
|
||||
ctx->more = 0;
|
||||
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
|
||||
&ctx->completion);
|
||||
err = crypto_wait_req(crypto_ahash_final(&ctx->req),
|
||||
&ctx->wait);
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
@ -476,13 +474,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
|
||||
ctx->result = NULL;
|
||||
ctx->len = len;
|
||||
ctx->more = 0;
|
||||
af_alg_init_completion(&ctx->completion);
|
||||
crypto_init_wait(&ctx->wait);
|
||||
|
||||
ask->private = ctx;
|
||||
|
||||
ahash_request_set_tfm(&ctx->req, hash);
|
||||
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
af_alg_complete, &ctx->completion);
|
||||
crypto_req_done, &ctx->wait);
|
||||
|
||||
sk->sk_destruct = hash_sock_destruct;
|
||||
|
||||
|
@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
af_alg_complete,
|
||||
&ctx->completion);
|
||||
err = af_alg_wait_for_completion(ctx->enc ?
|
||||
crypto_req_done, &ctx->wait);
|
||||
err = crypto_wait_req(ctx->enc ?
|
||||
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
|
||||
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
|
||||
&ctx->completion);
|
||||
&ctx->wait);
|
||||
}
|
||||
|
||||
/* AIO operation in progress */
|
||||
@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
|
||||
ctx->more = 0;
|
||||
ctx->merge = 0;
|
||||
ctx->enc = 0;
|
||||
af_alg_init_completion(&ctx->completion);
|
||||
crypto_init_wait(&ctx->wait);
|
||||
|
||||
ask->private = ctx;
|
||||
|
||||
|
13
crypto/api.c
13
crypto/api.c
@ -24,6 +24,7 @@
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/completion.h>
|
||||
#include "internal.h"
|
||||
|
||||
LIST_HEAD(crypto_alg_list);
|
||||
@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_has_alg);
|
||||
|
||||
void crypto_req_done(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct crypto_wait *wait = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
wait->err = err;
|
||||
complete(&wait->completion);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_req_done);
|
||||
|
||||
MODULE_DESCRIPTION("Cryptographic core API");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3)
|
||||
public_key_signature_free(payload3);
|
||||
}
|
||||
|
||||
struct public_key_completion {
|
||||
struct completion completion;
|
||||
int err;
|
||||
};
|
||||
|
||||
static void public_key_verify_done(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct public_key_completion *compl = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
compl->err = err;
|
||||
complete(&compl->completion);
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify a signature using a public key.
|
||||
*/
|
||||
int public_key_verify_signature(const struct public_key *pkey,
|
||||
const struct public_key_signature *sig)
|
||||
{
|
||||
struct public_key_completion compl;
|
||||
struct crypto_wait cwait;
|
||||
struct crypto_akcipher *tfm;
|
||||
struct akcipher_request *req;
|
||||
struct scatterlist sig_sg, digest_sg;
|
||||
@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey,
|
||||
sg_init_one(&digest_sg, output, outlen);
|
||||
akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
|
||||
outlen);
|
||||
init_completion(&compl.completion);
|
||||
crypto_init_wait(&cwait);
|
||||
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
public_key_verify_done, &compl);
|
||||
crypto_req_done, &cwait);
|
||||
|
||||
/* Perform the verification calculation. This doesn't actually do the
|
||||
* verification, but rather calculates the hash expected by the
|
||||
* signature and returns that to us.
|
||||
*/
|
||||
ret = crypto_akcipher_verify(req);
|
||||
if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
|
||||
wait_for_completion(&compl.completion);
|
||||
ret = compl.err;
|
||||
}
|
||||
ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
|
||||
if (ret < 0)
|
||||
goto out_free_output;
|
||||
|
||||
|
@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
|
||||
int cpu, err;
|
||||
struct cryptd_cpu_queue *cpu_queue;
|
||||
atomic_t *refcnt;
|
||||
bool may_backlog;
|
||||
|
||||
cpu = get_cpu();
|
||||
cpu_queue = this_cpu_ptr(queue->cpu_queue);
|
||||
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
||||
|
||||
refcnt = crypto_tfm_ctx(request->tfm);
|
||||
may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||
|
||||
if (err == -EBUSY && !may_backlog)
|
||||
if (err == -ENOSPC)
|
||||
goto out_put_cpu;
|
||||
|
||||
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
||||
|
@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
|
||||
goto out;
|
||||
|
||||
err = cts_cbc_encrypt(req);
|
||||
if (err == -EINPROGRESS ||
|
||||
(err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return;
|
||||
|
||||
out:
|
||||
@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
|
||||
goto out;
|
||||
|
||||
err = cts_cbc_decrypt(req);
|
||||
if (err == -EINPROGRESS ||
|
||||
(err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return;
|
||||
|
||||
out:
|
||||
|
36
crypto/dh.c
36
crypto/dh.c
@ -21,19 +21,12 @@ struct dh_ctx {
|
||||
MPI xa;
|
||||
};
|
||||
|
||||
static inline void dh_clear_params(struct dh_ctx *ctx)
|
||||
static void dh_clear_ctx(struct dh_ctx *ctx)
|
||||
{
|
||||
mpi_free(ctx->p);
|
||||
mpi_free(ctx->g);
|
||||
ctx->p = NULL;
|
||||
ctx->g = NULL;
|
||||
}
|
||||
|
||||
static void dh_free_ctx(struct dh_ctx *ctx)
|
||||
{
|
||||
dh_clear_params(ctx);
|
||||
mpi_free(ctx->xa);
|
||||
ctx->xa = NULL;
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -60,9 +53,6 @@ static int dh_check_params_length(unsigned int p_len)
|
||||
|
||||
static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
|
||||
{
|
||||
if (unlikely(!params->p || !params->g))
|
||||
return -EINVAL;
|
||||
|
||||
if (dh_check_params_length(params->p_size << 3))
|
||||
return -EINVAL;
|
||||
|
||||
@ -71,10 +61,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
|
||||
return -EINVAL;
|
||||
|
||||
ctx->g = mpi_read_raw_data(params->g, params->g_size);
|
||||
if (!ctx->g) {
|
||||
mpi_free(ctx->p);
|
||||
if (!ctx->g)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -86,21 +74,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
|
||||
struct dh params;
|
||||
|
||||
/* Free the old MPI key if any */
|
||||
dh_free_ctx(ctx);
|
||||
dh_clear_ctx(ctx);
|
||||
|
||||
if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
|
||||
return -EINVAL;
|
||||
goto err_clear_ctx;
|
||||
|
||||
if (dh_set_params(ctx, ¶ms) < 0)
|
||||
return -EINVAL;
|
||||
goto err_clear_ctx;
|
||||
|
||||
ctx->xa = mpi_read_raw_data(params.key, params.key_size);
|
||||
if (!ctx->xa) {
|
||||
dh_clear_params(ctx);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!ctx->xa)
|
||||
goto err_clear_ctx;
|
||||
|
||||
return 0;
|
||||
|
||||
err_clear_ctx:
|
||||
dh_clear_ctx(ctx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int dh_compute_value(struct kpp_request *req)
|
||||
@ -158,7 +148,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm)
|
||||
{
|
||||
struct dh_ctx *ctx = dh_get_ctx(tfm);
|
||||
|
||||
dh_free_ctx(ctx);
|
||||
dh_clear_ctx(ctx);
|
||||
}
|
||||
|
||||
static struct kpp_alg dh = {
|
||||
|
@ -28,12 +28,12 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
|
||||
return src + size;
|
||||
}
|
||||
|
||||
static inline int dh_data_size(const struct dh *p)
|
||||
static inline unsigned int dh_data_size(const struct dh *p)
|
||||
{
|
||||
return p->key_size + p->p_size + p->g_size;
|
||||
}
|
||||
|
||||
int crypto_dh_key_len(const struct dh *p)
|
||||
unsigned int crypto_dh_key_len(const struct dh *p)
|
||||
{
|
||||
return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
|
||||
}
|
||||
@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
|
||||
if (secret.len != crypto_dh_key_len(params))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Don't permit the buffer for 'key' or 'g' to be larger than 'p', since
|
||||
* some drivers assume otherwise.
|
||||
*/
|
||||
if (params->key_size > params->p_size ||
|
||||
params->g_size > params->p_size)
|
||||
return -EINVAL;
|
||||
|
||||
/* Don't allocate memory. Set pointers to data within
|
||||
* the given buffer
|
||||
*/
|
||||
@ -90,6 +98,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
|
||||
params->p = (void *)(ptr + params->key_size);
|
||||
params->g = (void *)(ptr + params->key_size + params->p_size);
|
||||
|
||||
/*
|
||||
* Don't permit 'p' to be 0. It's not a prime number, and it's subject
|
||||
* to corner cases such as 'mod 0' being undefined or
|
||||
* crypto_kpp_maxsize() returning 0.
|
||||
*/
|
||||
if (memchr_inv(params->p, 0, params->p_size) == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
|
||||
|
@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
|
||||
{
|
||||
struct drbg_state *drbg = req->data;
|
||||
|
||||
if (error == -EINPROGRESS)
|
||||
return;
|
||||
drbg->ctr_async_err = error;
|
||||
complete(&drbg->ctr_completion);
|
||||
}
|
||||
|
||||
static int drbg_init_sym_kernel(struct drbg_state *drbg)
|
||||
{
|
||||
struct crypto_cipher *tfm;
|
||||
@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
|
||||
return PTR_ERR(sk_tfm);
|
||||
}
|
||||
drbg->ctr_handle = sk_tfm;
|
||||
init_completion(&drbg->ctr_completion);
|
||||
crypto_init_wait(&drbg->ctr_wait);
|
||||
|
||||
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
drbg->ctr_req = req;
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
drbg_skcipher_cb, drbg);
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
crypto_req_done, &drbg->ctr_wait);
|
||||
|
||||
alignmask = crypto_skcipher_alignmask(sk_tfm);
|
||||
drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
|
||||
@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
|
||||
/* Output buffer may not be valid for SGL, use scratchpad */
|
||||
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
|
||||
cryptlen, drbg->V);
|
||||
ret = crypto_skcipher_encrypt(drbg->ctr_req);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
wait_for_completion(&drbg->ctr_completion);
|
||||
if (!drbg->ctr_async_err) {
|
||||
reinit_completion(&drbg->ctr_completion);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
|
||||
&drbg->ctr_wait);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
init_completion(&drbg->ctr_completion);
|
||||
|
||||
crypto_init_wait(&drbg->ctr_wait);
|
||||
|
||||
memcpy(outbuf, drbg->outscratchpad, cryptlen);
|
||||
|
||||
|
@ -131,17 +131,11 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
|
||||
return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
|
||||
}
|
||||
|
||||
static void no_exit_tfm(struct crypto_kpp *tfm)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static struct kpp_alg ecdh = {
|
||||
.set_secret = ecdh_set_secret,
|
||||
.generate_public_key = ecdh_compute_value,
|
||||
.compute_shared_secret = ecdh_compute_value,
|
||||
.max_size = ecdh_max_size,
|
||||
.exit = no_exit_tfm,
|
||||
.base = {
|
||||
.cra_name = "ecdh",
|
||||
.cra_driver_name = "ecdh-generic",
|
||||
|
@ -28,7 +28,7 @@ static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz)
|
||||
return src + sz;
|
||||
}
|
||||
|
||||
int crypto_ecdh_key_len(const struct ecdh *params)
|
||||
unsigned int crypto_ecdh_key_len(const struct ecdh *params)
|
||||
{
|
||||
return ECDH_KPP_SECRET_MIN_SIZE + params->key_size;
|
||||
}
|
||||
|
55
crypto/gcm.c
55
crypto/gcm.c
@ -14,9 +14,9 @@
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/hash.h>
|
||||
#include "internal.h"
|
||||
#include <linux/completion.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -78,11 +78,6 @@ struct crypto_gcm_req_priv_ctx {
|
||||
} u;
|
||||
};
|
||||
|
||||
struct crypto_gcm_setkey_result {
|
||||
int err;
|
||||
struct completion completion;
|
||||
};
|
||||
|
||||
static struct {
|
||||
u8 buf[16];
|
||||
struct scatterlist sg;
|
||||
@ -98,17 +93,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
|
||||
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
|
||||
}
|
||||
|
||||
static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct crypto_gcm_setkey_result *result = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
result->err = err;
|
||||
complete(&result->completion);
|
||||
}
|
||||
|
||||
static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@ -119,7 +103,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
be128 hash;
|
||||
u8 iv[16];
|
||||
|
||||
struct crypto_gcm_setkey_result result;
|
||||
struct crypto_wait wait;
|
||||
|
||||
struct scatterlist sg[1];
|
||||
struct skcipher_request req;
|
||||
@ -140,21 +124,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
init_completion(&data->result.completion);
|
||||
crypto_init_wait(&data->wait);
|
||||
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
|
||||
skcipher_request_set_tfm(&data->req, ctr);
|
||||
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_gcm_setkey_done,
|
||||
&data->result);
|
||||
crypto_req_done,
|
||||
&data->wait);
|
||||
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
|
||||
sizeof(data->hash), data->iv);
|
||||
|
||||
err = crypto_skcipher_encrypt(&data->req);
|
||||
if (err == -EINPROGRESS || err == -EBUSY) {
|
||||
wait_for_completion(&data->result.completion);
|
||||
err = data->result.err;
|
||||
}
|
||||
err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
|
||||
&data->wait);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
@ -197,8 +178,8 @@ static void crypto_gcm_init_common(struct aead_request *req)
|
||||
struct scatterlist *sg;
|
||||
|
||||
memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
|
||||
memcpy(pctx->iv, req->iv, 12);
|
||||
memcpy(pctx->iv + 12, &counter, 4);
|
||||
memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE);
|
||||
memcpy(pctx->iv + GCM_AES_IV_SIZE, &counter, 4);
|
||||
|
||||
sg_init_table(pctx->src, 3);
|
||||
sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
|
||||
@ -695,7 +676,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
|
||||
inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
|
||||
ctr->base.cra_alignmask;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
|
||||
inst->alg.ivsize = 12;
|
||||
inst->alg.ivsize = GCM_AES_IV_SIZE;
|
||||
inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
|
||||
inst->alg.maxauthsize = 16;
|
||||
inst->alg.init = crypto_gcm_init_tfm;
|
||||
@ -832,20 +813,20 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
|
||||
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
|
||||
crypto_aead_alignmask(child) + 1);
|
||||
|
||||
scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0);
|
||||
scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0);
|
||||
|
||||
memcpy(iv, ctx->nonce, 4);
|
||||
memcpy(iv + 4, req->iv, 8);
|
||||
|
||||
sg_init_table(rctx->src, 3);
|
||||
sg_set_buf(rctx->src, iv + 12, req->assoclen - 8);
|
||||
sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
|
||||
sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
|
||||
if (sg != rctx->src + 1)
|
||||
sg_chain(rctx->src, 2, sg);
|
||||
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(rctx->dst, 3);
|
||||
sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8);
|
||||
sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
|
||||
sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
|
||||
if (sg != rctx->dst + 1)
|
||||
sg_chain(rctx->dst, 2, sg);
|
||||
@ -957,7 +938,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
err = -EINVAL;
|
||||
|
||||
/* Underlying IV size must be 12. */
|
||||
if (crypto_aead_alg_ivsize(alg) != 12)
|
||||
if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
|
||||
goto out_drop_alg;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
@ -980,7 +961,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
|
||||
|
||||
inst->alg.ivsize = 8;
|
||||
inst->alg.ivsize = GCM_RFC4106_IV_SIZE;
|
||||
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
|
||||
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
||||
|
||||
@ -1134,7 +1115,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
|
||||
tfm,
|
||||
sizeof(struct crypto_rfc4543_req_ctx) +
|
||||
ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
|
||||
align + 12);
|
||||
align + GCM_AES_IV_SIZE);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1199,7 +1180,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
err = -EINVAL;
|
||||
|
||||
/* Underlying IV size must be 12. */
|
||||
if (crypto_aead_alg_ivsize(alg) != 12)
|
||||
if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
|
||||
goto out_drop_alg;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
@ -1222,7 +1203,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
|
||||
|
||||
inst->alg.ivsize = 8;
|
||||
inst->alg.ivsize = GCM_RFC4543_IV_SIZE;
|
||||
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
|
||||
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
||||
|
||||
|
@ -156,6 +156,19 @@ static void gf128mul_x8_bbe(be128 *x)
|
||||
x->b = cpu_to_be64((b << 8) ^ _tt);
|
||||
}
|
||||
|
||||
void gf128mul_x8_ble(le128 *r, const le128 *x)
|
||||
{
|
||||
u64 a = le64_to_cpu(x->a);
|
||||
u64 b = le64_to_cpu(x->b);
|
||||
|
||||
/* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
|
||||
u64 _tt = gf128mul_table_be[a >> 56];
|
||||
|
||||
r->a = cpu_to_le64((a << 8) | (b >> 56));
|
||||
r->b = cpu_to_le64((b << 8) ^ _tt);
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_x8_ble);
|
||||
|
||||
void gf128mul_lle(be128 *r, const be128 *b)
|
||||
{
|
||||
be128 p[8];
|
||||
|
@ -93,18 +93,10 @@ struct crypto_kw_ctx {
|
||||
|
||||
struct crypto_kw_block {
|
||||
#define SEMIBSIZE 8
|
||||
u8 A[SEMIBSIZE];
|
||||
u8 R[SEMIBSIZE];
|
||||
__be64 A;
|
||||
__be64 R;
|
||||
};
|
||||
|
||||
/* convert 64 bit integer into its string representation */
|
||||
static inline void crypto_kw_cpu_to_be64(u64 val, u8 *buf)
|
||||
{
|
||||
__be64 *a = (__be64 *)buf;
|
||||
|
||||
*a = cpu_to_be64(val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fast forward the SGL to the "end" length minus SEMIBSIZE.
|
||||
* The start in the SGL defined by the fast-forward is returned with
|
||||
@ -139,17 +131,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
|
||||
unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
|
||||
crypto_cipher_alignmask(child));
|
||||
unsigned int i;
|
||||
|
||||
u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
|
||||
struct crypto_kw_block *block = (struct crypto_kw_block *)
|
||||
PTR_ALIGN(blockbuf + 0, alignmask + 1);
|
||||
|
||||
u64 t = 6 * ((nbytes) >> 3);
|
||||
struct crypto_kw_block block;
|
||||
struct scatterlist *lsrc, *ldst;
|
||||
u64 t = 6 * ((nbytes) >> 3);
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
@ -160,7 +145,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
|
||||
return -EINVAL;
|
||||
|
||||
/* Place the IV into block A */
|
||||
memcpy(block->A, desc->info, SEMIBSIZE);
|
||||
memcpy(&block.A, desc->info, SEMIBSIZE);
|
||||
|
||||
/*
|
||||
* src scatterlist is read-only. dst scatterlist is r/w. During the
|
||||
@ -171,32 +156,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
|
||||
ldst = dst;
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
u8 tbe_buffer[SEMIBSIZE + alignmask];
|
||||
/* alignment for the crypto_xor and the _to_be64 operation */
|
||||
u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
|
||||
unsigned int tmp_nbytes = nbytes;
|
||||
struct scatter_walk src_walk, dst_walk;
|
||||
unsigned int tmp_nbytes = nbytes;
|
||||
|
||||
while (tmp_nbytes) {
|
||||
/* move pointer by tmp_nbytes in the SGL */
|
||||
crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes);
|
||||
/* get the source block */
|
||||
scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
|
||||
scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
|
||||
false);
|
||||
|
||||
/* perform KW operation: get counter as byte string */
|
||||
crypto_kw_cpu_to_be64(t, tbe);
|
||||
/* perform KW operation: modify IV with counter */
|
||||
crypto_xor(block->A, tbe, SEMIBSIZE);
|
||||
block.A ^= cpu_to_be64(t);
|
||||
t--;
|
||||
/* perform KW operation: decrypt block */
|
||||
crypto_cipher_decrypt_one(child, (u8*)block,
|
||||
(u8*)block);
|
||||
crypto_cipher_decrypt_one(child, (u8*)&block,
|
||||
(u8*)&block);
|
||||
|
||||
/* move pointer by tmp_nbytes in the SGL */
|
||||
crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes);
|
||||
/* Copy block->R into place */
|
||||
scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
|
||||
scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
|
||||
true);
|
||||
|
||||
tmp_nbytes -= SEMIBSIZE;
|
||||
@ -208,11 +188,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
|
||||
}
|
||||
|
||||
/* Perform authentication check */
|
||||
if (crypto_memneq("\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", block->A,
|
||||
SEMIBSIZE))
|
||||
if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6))
|
||||
ret = -EBADMSG;
|
||||
|
||||
memzero_explicit(block, sizeof(struct crypto_kw_block));
|
||||
memzero_explicit(&block, sizeof(struct crypto_kw_block));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -224,17 +203,10 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
|
||||
unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
|
||||
crypto_cipher_alignmask(child));
|
||||
unsigned int i;
|
||||
|
||||
u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
|
||||
struct crypto_kw_block *block = (struct crypto_kw_block *)
|
||||
PTR_ALIGN(blockbuf + 0, alignmask + 1);
|
||||
|
||||
u64 t = 1;
|
||||
struct crypto_kw_block block;
|
||||
struct scatterlist *lsrc, *ldst;
|
||||
u64 t = 1;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* Require at least 2 semiblocks (note, the 3rd semiblock that is
|
||||
@ -249,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
|
||||
* Place the predefined IV into block A -- for encrypt, the caller
|
||||
* does not need to provide an IV, but he needs to fetch the final IV.
|
||||
*/
|
||||
memcpy(block->A, "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", SEMIBSIZE);
|
||||
block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6);
|
||||
|
||||
/*
|
||||
* src scatterlist is read-only. dst scatterlist is r/w. During the
|
||||
@ -260,30 +232,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
|
||||
ldst = dst;
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
u8 tbe_buffer[SEMIBSIZE + alignmask];
|
||||
u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
|
||||
unsigned int tmp_nbytes = nbytes;
|
||||
struct scatter_walk src_walk, dst_walk;
|
||||
unsigned int tmp_nbytes = nbytes;
|
||||
|
||||
scatterwalk_start(&src_walk, lsrc);
|
||||
scatterwalk_start(&dst_walk, ldst);
|
||||
|
||||
while (tmp_nbytes) {
|
||||
/* get the source block */
|
||||
scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
|
||||
scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
|
||||
false);
|
||||
|
||||
/* perform KW operation: encrypt block */
|
||||
crypto_cipher_encrypt_one(child, (u8 *)block,
|
||||
(u8 *)block);
|
||||
/* perform KW operation: get counter as byte string */
|
||||
crypto_kw_cpu_to_be64(t, tbe);
|
||||
crypto_cipher_encrypt_one(child, (u8 *)&block,
|
||||
(u8 *)&block);
|
||||
/* perform KW operation: modify IV with counter */
|
||||
crypto_xor(block->A, tbe, SEMIBSIZE);
|
||||
block.A ^= cpu_to_be64(t);
|
||||
t++;
|
||||
|
||||
/* Copy block->R into place */
|
||||
scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
|
||||
scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
|
||||
true);
|
||||
|
||||
tmp_nbytes -= SEMIBSIZE;
|
||||
@ -295,9 +263,9 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
|
||||
}
|
||||
|
||||
/* establish the IV for the caller to pick up */
|
||||
memcpy(desc->info, block->A, SEMIBSIZE);
|
||||
memcpy(desc->info, &block.A, SEMIBSIZE);
|
||||
|
||||
memzero_explicit(block, sizeof(struct crypto_kw_block));
|
||||
memzero_explicit(&block, sizeof(struct crypto_kw_block));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
17
crypto/lrw.c
17
crypto/lrw.c
@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err)
|
||||
crypto_skcipher_encrypt(subreq) ?:
|
||||
post_crypt(req);
|
||||
|
||||
if (err == -EINPROGRESS ||
|
||||
(err == -EBUSY &&
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err)
|
||||
crypto_skcipher_decrypt(subreq) ?:
|
||||
post_crypt(req);
|
||||
|
||||
if (err == -EINPROGRESS ||
|
||||
(err == -EBUSY &&
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -610,9 +606,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
ecb_name[len - 1] = 0;
|
||||
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return -ENAMETOOLONG;
|
||||
}
|
||||
"lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
|
||||
err = -ENAMETOOLONG;
|
||||
goto err_drop_spawn;
|
||||
}
|
||||
} else
|
||||
goto err_drop_spawn;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
|
@ -213,8 +213,6 @@ static void rmd128_transform(u32 *state, const __le32 *in)
|
||||
state[2] = state[3] + aa + bbb;
|
||||
state[3] = state[0] + bb + ccc;
|
||||
state[0] = ddd;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int rmd128_init(struct shash_desc *desc)
|
||||
|
@ -256,8 +256,6 @@ static void rmd160_transform(u32 *state, const __le32 *in)
|
||||
state[3] = state[4] + aa + bbb;
|
||||
state[4] = state[0] + bb + ccc;
|
||||
state[0] = ddd;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int rmd160_init(struct shash_desc *desc)
|
||||
|
@ -228,8 +228,6 @@ static void rmd256_transform(u32 *state, const __le32 *in)
|
||||
state[5] += bbb;
|
||||
state[6] += ccc;
|
||||
state[7] += ddd;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int rmd256_init(struct shash_desc *desc)
|
||||
|
@ -275,8 +275,6 @@ static void rmd320_transform(u32 *state, const __le32 *in)
|
||||
state[7] += ccc;
|
||||
state[8] += ddd;
|
||||
state[9] += eee;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int rmd320_init(struct shash_desc *desc)
|
||||
|
@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
|
||||
req->dst, ctx->key_size - 1, req->dst_len);
|
||||
|
||||
err = crypto_akcipher_encrypt(&req_ctx->child_req);
|
||||
if (err != -EINPROGRESS &&
|
||||
(err != -EBUSY ||
|
||||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||
if (err != -EINPROGRESS && err != -EBUSY)
|
||||
return pkcs1pad_encrypt_sign_complete(req, err);
|
||||
|
||||
return err;
|
||||
@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
|
||||
ctx->key_size);
|
||||
|
||||
err = crypto_akcipher_decrypt(&req_ctx->child_req);
|
||||
if (err != -EINPROGRESS &&
|
||||
(err != -EBUSY ||
|
||||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||
if (err != -EINPROGRESS && err != -EBUSY)
|
||||
return pkcs1pad_decrypt_complete(req, err);
|
||||
|
||||
return err;
|
||||
@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req)
|
||||
req->dst, ctx->key_size - 1, req->dst_len);
|
||||
|
||||
err = crypto_akcipher_sign(&req_ctx->child_req);
|
||||
if (err != -EINPROGRESS &&
|
||||
(err != -EBUSY ||
|
||||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||
if (err != -EINPROGRESS && err != -EBUSY)
|
||||
return pkcs1pad_encrypt_sign_complete(req, err);
|
||||
|
||||
return err;
|
||||
@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
|
||||
ctx->key_size);
|
||||
|
||||
err = crypto_akcipher_verify(&req_ctx->child_req);
|
||||
if (err != -EINPROGRESS &&
|
||||
(err != -EBUSY ||
|
||||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||
if (err != -EINPROGRESS && err != -EBUSY)
|
||||
return pkcs1pad_verify_complete(req, err);
|
||||
|
||||
return err;
|
||||
|
210
crypto/sm3_generic.c
Normal file
210
crypto/sm3_generic.c
Normal file
@ -0,0 +1,210 @@
|
||||
/*
|
||||
* SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and
|
||||
* described at https://tools.ietf.org/html/draft-shen-sm3-hash-01
|
||||
*
|
||||
* Copyright (C) 2017 ARM Limited or its affiliates.
|
||||
* Written by Gilad Ben-Yossef <gilad@benyossef.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/sm3.h>
|
||||
#include <crypto/sm3_base.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
|
||||
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
|
||||
0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
|
||||
0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
|
||||
0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
|
||||
|
||||
static inline u32 p0(u32 x)
|
||||
{
|
||||
return x ^ rol32(x, 9) ^ rol32(x, 17);
|
||||
}
|
||||
|
||||
static inline u32 p1(u32 x)
|
||||
{
|
||||
return x ^ rol32(x, 15) ^ rol32(x, 23);
|
||||
}
|
||||
|
||||
static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c)
|
||||
{
|
||||
return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c));
|
||||
}
|
||||
|
||||
static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g)
|
||||
{
|
||||
return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g));
|
||||
}
|
||||
|
||||
static inline u32 t(unsigned int n)
|
||||
{
|
||||
return (n < 16) ? SM3_T1 : SM3_T2;
|
||||
}
|
||||
|
||||
static void sm3_expand(u32 *t, u32 *w, u32 *wt)
|
||||
{
|
||||
int i;
|
||||
unsigned int tmp;
|
||||
|
||||
/* load the input */
|
||||
for (i = 0; i <= 15; i++)
|
||||
w[i] = get_unaligned_be32((__u32 *)t + i);
|
||||
|
||||
for (i = 16; i <= 67; i++) {
|
||||
tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15);
|
||||
w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6];
|
||||
}
|
||||
|
||||
for (i = 0; i <= 63; i++)
|
||||
wt[i] = w[i] ^ w[i + 4];
|
||||
}
|
||||
|
||||
static void sm3_compress(u32 *w, u32 *wt, u32 *m)
|
||||
{
|
||||
u32 ss1;
|
||||
u32 ss2;
|
||||
u32 tt1;
|
||||
u32 tt2;
|
||||
u32 a, b, c, d, e, f, g, h;
|
||||
int i;
|
||||
|
||||
a = m[0];
|
||||
b = m[1];
|
||||
c = m[2];
|
||||
d = m[3];
|
||||
e = m[4];
|
||||
f = m[5];
|
||||
g = m[6];
|
||||
h = m[7];
|
||||
|
||||
for (i = 0; i <= 63; i++) {
|
||||
|
||||
ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
|
||||
|
||||
ss2 = ss1 ^ rol32(a, 12);
|
||||
|
||||
tt1 = ff(i, a, b, c) + d + ss2 + *wt;
|
||||
wt++;
|
||||
|
||||
tt2 = gg(i, e, f, g) + h + ss1 + *w;
|
||||
w++;
|
||||
|
||||
d = c;
|
||||
c = rol32(b, 9);
|
||||
b = a;
|
||||
a = tt1;
|
||||
h = g;
|
||||
g = rol32(f, 19);
|
||||
f = e;
|
||||
e = p0(tt2);
|
||||
}
|
||||
|
||||
m[0] = a ^ m[0];
|
||||
m[1] = b ^ m[1];
|
||||
m[2] = c ^ m[2];
|
||||
m[3] = d ^ m[3];
|
||||
m[4] = e ^ m[4];
|
||||
m[5] = f ^ m[5];
|
||||
m[6] = g ^ m[6];
|
||||
m[7] = h ^ m[7];
|
||||
|
||||
a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0;
|
||||
}
|
||||
|
||||
static void sm3_transform(struct sm3_state *sst, u8 const *src)
|
||||
{
|
||||
unsigned int w[68];
|
||||
unsigned int wt[64];
|
||||
|
||||
sm3_expand((u32 *)src, w, wt);
|
||||
sm3_compress(w, wt, sst->state);
|
||||
|
||||
memzero_explicit(w, sizeof(w));
|
||||
memzero_explicit(wt, sizeof(wt));
|
||||
}
|
||||
|
||||
static void sm3_generic_block_fn(struct sm3_state *sst, u8 const *src,
|
||||
int blocks)
|
||||
{
|
||||
while (blocks--) {
|
||||
sm3_transform(sst, src);
|
||||
src += SM3_BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
return sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
|
||||
}
|
||||
EXPORT_SYMBOL(crypto_sm3_update);
|
||||
|
||||
static int sm3_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
sm3_base_do_finalize(desc, sm3_generic_block_fn);
|
||||
return sm3_base_finish(desc, out);
|
||||
}
|
||||
|
||||
int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *hash)
|
||||
{
|
||||
sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
|
||||
return sm3_final(desc, hash);
|
||||
}
|
||||
EXPORT_SYMBOL(crypto_sm3_finup);
|
||||
|
||||
static struct shash_alg sm3_alg = {
|
||||
.digestsize = SM3_DIGEST_SIZE,
|
||||
.init = sm3_base_init,
|
||||
.update = crypto_sm3_update,
|
||||
.final = sm3_final,
|
||||
.finup = crypto_sm3_finup,
|
||||
.descsize = sizeof(struct sm3_state),
|
||||
.base = {
|
||||
.cra_name = "sm3",
|
||||
.cra_driver_name = "sm3-generic",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SM3_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init sm3_generic_mod_init(void)
|
||||
{
|
||||
return crypto_register_shash(&sm3_alg);
|
||||
}
|
||||
|
||||
static void __exit sm3_generic_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_shash(&sm3_alg);
|
||||
}
|
||||
|
||||
module_init(sm3_generic_mod_init);
|
||||
module_exit(sm3_generic_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("SM3 Secure Hash Algorithm");
|
||||
|
||||
MODULE_ALIAS_CRYPTO("sm3");
|
||||
MODULE_ALIAS_CRYPTO("sm3-generic");
|
209
crypto/tcrypt.c
209
crypto/tcrypt.c
@ -70,7 +70,7 @@ static int mode;
|
||||
static char *tvmem[TVMEMSIZE];
|
||||
|
||||
static char *check[] = {
|
||||
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256",
|
||||
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
|
||||
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
|
||||
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
|
||||
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
|
||||
@ -79,34 +79,11 @@ static char *check[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
struct tcrypt_result {
|
||||
struct completion completion;
|
||||
int err;
|
||||
};
|
||||
|
||||
static void tcrypt_complete(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct tcrypt_result *res = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
res->err = err;
|
||||
complete(&res->completion);
|
||||
}
|
||||
|
||||
static inline int do_one_aead_op(struct aead_request *req, int ret)
|
||||
{
|
||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||
struct tcrypt_result *tr = req->base.data;
|
||||
struct crypto_wait *wait = req->base.data;
|
||||
|
||||
ret = wait_for_completion_interruptible(&tr->completion);
|
||||
if (!ret)
|
||||
ret = tr->err;
|
||||
reinit_completion(&tr->completion);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return crypto_wait_req(ret, wait);
|
||||
}
|
||||
|
||||
static int test_aead_jiffies(struct aead_request *req, int enc,
|
||||
@ -248,7 +225,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||
char *axbuf[XBUFSIZE];
|
||||
unsigned int *b_size;
|
||||
unsigned int iv_len;
|
||||
struct tcrypt_result result;
|
||||
struct crypto_wait wait;
|
||||
|
||||
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
|
||||
if (!iv)
|
||||
@ -284,7 +261,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||
goto out_notfm;
|
||||
}
|
||||
|
||||
init_completion(&result.completion);
|
||||
crypto_init_wait(&wait);
|
||||
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
|
||||
get_driver_name(crypto_aead, tfm), e);
|
||||
|
||||
@ -296,7 +273,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||
}
|
||||
|
||||
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
crypto_req_done, &wait);
|
||||
|
||||
i = 0;
|
||||
do {
|
||||
@ -340,7 +317,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||
}
|
||||
|
||||
sg_init_aead(sg, xbuf,
|
||||
*b_size + (enc ? authsize : 0));
|
||||
*b_size + (enc ? 0 : authsize));
|
||||
|
||||
sg_init_aead(sgout, xoutbuf,
|
||||
*b_size + (enc ? authsize : 0));
|
||||
@ -348,7 +325,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||
sg_set_buf(&sg[0], assoc, aad_size);
|
||||
sg_set_buf(&sgout[0], assoc, aad_size);
|
||||
|
||||
aead_request_set_crypt(req, sg, sgout, *b_size, iv);
|
||||
aead_request_set_crypt(req, sg, sgout,
|
||||
*b_size + (enc ? 0 : authsize),
|
||||
iv);
|
||||
aead_request_set_ad(req, aad_size);
|
||||
|
||||
if (secs)
|
||||
@ -381,7 +360,6 @@ out_noaxbuf:
|
||||
testmgr_free_buf(xbuf);
|
||||
out_noxbuf:
|
||||
kfree(iv);
|
||||
return;
|
||||
}
|
||||
|
||||
static void test_hash_sg_init(struct scatterlist *sg)
|
||||
@ -397,21 +375,16 @@ static void test_hash_sg_init(struct scatterlist *sg)
|
||||
|
||||
static inline int do_one_ahash_op(struct ahash_request *req, int ret)
|
||||
{
|
||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||
struct tcrypt_result *tr = req->base.data;
|
||||
struct crypto_wait *wait = req->base.data;
|
||||
|
||||
wait_for_completion(&tr->completion);
|
||||
reinit_completion(&tr->completion);
|
||||
ret = tr->err;
|
||||
}
|
||||
return ret;
|
||||
return crypto_wait_req(ret, wait);
|
||||
}
|
||||
|
||||
struct test_mb_ahash_data {
|
||||
struct scatterlist sg[TVMEMSIZE];
|
||||
char result[64];
|
||||
struct ahash_request *req;
|
||||
struct tcrypt_result tresult;
|
||||
struct crypto_wait wait;
|
||||
char *xbuf[XBUFSIZE];
|
||||
};
|
||||
|
||||
@ -440,7 +413,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
|
||||
if (testmgr_alloc_buf(data[i].xbuf))
|
||||
goto out;
|
||||
|
||||
init_completion(&data[i].tresult.completion);
|
||||
crypto_init_wait(&data[i].wait);
|
||||
|
||||
data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
|
||||
if (!data[i].req) {
|
||||
@ -449,8 +422,8 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ahash_request_set_callback(data[i].req, 0,
|
||||
tcrypt_complete, &data[i].tresult);
|
||||
ahash_request_set_callback(data[i].req, 0, crypto_req_done,
|
||||
&data[i].wait);
|
||||
test_hash_sg_init(data[i].sg);
|
||||
}
|
||||
|
||||
@ -492,16 +465,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
complete(&data[k].tresult.completion);
|
||||
data[k].tresult.err = 0;
|
||||
crypto_req_done(&data[k].req->base, 0);
|
||||
}
|
||||
|
||||
for (j = 0; j < k; j++) {
|
||||
struct tcrypt_result *tr = &data[j].tresult;
|
||||
struct crypto_wait *wait = &data[j].wait;
|
||||
int wait_ret;
|
||||
|
||||
wait_for_completion(&tr->completion);
|
||||
if (tr->err)
|
||||
ret = tr->err;
|
||||
wait_ret = crypto_wait_req(-EINPROGRESS, wait);
|
||||
if (wait_ret)
|
||||
ret = wait_ret;
|
||||
}
|
||||
|
||||
end = get_cycles();
|
||||
@ -679,7 +652,7 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
|
||||
struct hash_speed *speed, unsigned mask)
|
||||
{
|
||||
struct scatterlist sg[TVMEMSIZE];
|
||||
struct tcrypt_result tresult;
|
||||
struct crypto_wait wait;
|
||||
struct ahash_request *req;
|
||||
struct crypto_ahash *tfm;
|
||||
char *output;
|
||||
@ -708,9 +681,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
|
||||
goto out;
|
||||
}
|
||||
|
||||
init_completion(&tresult.completion);
|
||||
crypto_init_wait(&wait);
|
||||
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &tresult);
|
||||
crypto_req_done, &wait);
|
||||
|
||||
output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
|
||||
if (!output)
|
||||
@ -765,15 +738,9 @@ static void test_hash_speed(const char *algo, unsigned int secs,
|
||||
|
||||
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
|
||||
{
|
||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||
struct tcrypt_result *tr = req->base.data;
|
||||
struct crypto_wait *wait = req->base.data;
|
||||
|
||||
wait_for_completion(&tr->completion);
|
||||
reinit_completion(&tr->completion);
|
||||
ret = tr->err;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return crypto_wait_req(ret, wait);
|
||||
}
|
||||
|
||||
static int test_acipher_jiffies(struct skcipher_request *req, int enc,
|
||||
@ -853,7 +820,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
|
||||
unsigned int tcount, u8 *keysize, bool async)
|
||||
{
|
||||
unsigned int ret, i, j, k, iv_len;
|
||||
struct tcrypt_result tresult;
|
||||
struct crypto_wait wait;
|
||||
const char *key;
|
||||
char iv[128];
|
||||
struct skcipher_request *req;
|
||||
@ -866,7 +833,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
|
||||
else
|
||||
e = "decryption";
|
||||
|
||||
init_completion(&tresult.completion);
|
||||
crypto_init_wait(&wait);
|
||||
|
||||
tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
|
||||
|
||||
@ -887,7 +854,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
|
||||
}
|
||||
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &tresult);
|
||||
crypto_req_done, &wait);
|
||||
|
||||
i = 0;
|
||||
do {
|
||||
@ -1269,6 +1236,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||
ret += tcrypt_test("sha3-512");
|
||||
break;
|
||||
|
||||
case 52:
|
||||
ret += tcrypt_test("sm3");
|
||||
break;
|
||||
|
||||
case 100:
|
||||
ret += tcrypt_test("hmac(md5)");
|
||||
break;
|
||||
@ -1603,115 +1574,116 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||
speed_template_32);
|
||||
break;
|
||||
|
||||
|
||||
case 300:
|
||||
if (alg) {
|
||||
test_hash_speed(alg, sec, generic_hash_speed_template);
|
||||
break;
|
||||
}
|
||||
|
||||
/* fall through */
|
||||
|
||||
case 301:
|
||||
test_hash_speed("md4", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 302:
|
||||
test_hash_speed("md5", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 303:
|
||||
test_hash_speed("sha1", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 304:
|
||||
test_hash_speed("sha256", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 305:
|
||||
test_hash_speed("sha384", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 306:
|
||||
test_hash_speed("sha512", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 307:
|
||||
test_hash_speed("wp256", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 308:
|
||||
test_hash_speed("wp384", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 309:
|
||||
test_hash_speed("wp512", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 310:
|
||||
test_hash_speed("tgr128", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 311:
|
||||
test_hash_speed("tgr160", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 312:
|
||||
test_hash_speed("tgr192", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 313:
|
||||
test_hash_speed("sha224", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 314:
|
||||
test_hash_speed("rmd128", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 315:
|
||||
test_hash_speed("rmd160", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 316:
|
||||
test_hash_speed("rmd256", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 317:
|
||||
test_hash_speed("rmd320", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 318:
|
||||
test_hash_speed("ghash-generic", sec, hash_speed_template_16);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 319:
|
||||
test_hash_speed("crc32c", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 320:
|
||||
test_hash_speed("crct10dif", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 321:
|
||||
test_hash_speed("poly1305", sec, poly1305_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 322:
|
||||
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 323:
|
||||
test_hash_speed("sha3-256", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 324:
|
||||
test_hash_speed("sha3-384", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 325:
|
||||
test_hash_speed("sha3-512", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
/* fall through */
|
||||
case 326:
|
||||
test_hash_speed("sm3", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
/* fall through */
|
||||
case 399:
|
||||
break;
|
||||
|
||||
@ -1720,106 +1692,107 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||
test_ahash_speed(alg, sec, generic_hash_speed_template);
|
||||
break;
|
||||
}
|
||||
|
||||
/* fall through */
|
||||
|
||||
case 401:
|
||||
test_ahash_speed("md4", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 402:
|
||||
test_ahash_speed("md5", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 403:
|
||||
test_ahash_speed("sha1", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 404:
|
||||
test_ahash_speed("sha256", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 405:
|
||||
test_ahash_speed("sha384", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 406:
|
||||
test_ahash_speed("sha512", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 407:
|
||||
test_ahash_speed("wp256", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 408:
|
||||
test_ahash_speed("wp384", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 409:
|
||||
test_ahash_speed("wp512", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 410:
|
||||
test_ahash_speed("tgr128", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 411:
|
||||
test_ahash_speed("tgr160", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 412:
|
||||
test_ahash_speed("tgr192", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 413:
|
||||
test_ahash_speed("sha224", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 414:
|
||||
test_ahash_speed("rmd128", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 415:
|
||||
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 416:
|
||||
test_ahash_speed("rmd256", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 417:
|
||||
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 418:
|
||||
test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 419:
|
||||
test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 420:
|
||||
test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
|
||||
/* fall through */
|
||||
case 421:
|
||||
test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 422:
|
||||
test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 423:
|
||||
test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 424:
|
||||
test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
||||
/* fall through */
|
||||
case 425:
|
||||
test_mb_ahash_speed("sm3", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
/* fall through */
|
||||
case 499:
|
||||
break;
|
||||
|
||||
|
210
crypto/testmgr.c
210
crypto/testmgr.c
@ -76,11 +76,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
|
||||
#define ENCRYPT 1
|
||||
#define DECRYPT 0
|
||||
|
||||
struct tcrypt_result {
|
||||
struct completion completion;
|
||||
int err;
|
||||
};
|
||||
|
||||
struct aead_test_suite {
|
||||
struct {
|
||||
const struct aead_testvec *vecs;
|
||||
@ -155,17 +150,6 @@ static void hexdump(unsigned char *buf, unsigned int len)
|
||||
buf, len, false);
|
||||
}
|
||||
|
||||
static void tcrypt_complete(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct tcrypt_result *res = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
res->err = err;
|
||||
complete(&res->completion);
|
||||
}
|
||||
|
||||
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
|
||||
{
|
||||
int i;
|
||||
@ -193,20 +177,10 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
|
||||
free_page((unsigned long)buf[i]);
|
||||
}
|
||||
|
||||
static int wait_async_op(struct tcrypt_result *tr, int ret)
|
||||
{
|
||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||
wait_for_completion(&tr->completion);
|
||||
reinit_completion(&tr->completion);
|
||||
ret = tr->err;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ahash_partial_update(struct ahash_request **preq,
|
||||
struct crypto_ahash *tfm, const struct hash_testvec *template,
|
||||
void *hash_buff, int k, int temp, struct scatterlist *sg,
|
||||
const char *algo, char *result, struct tcrypt_result *tresult)
|
||||
const char *algo, char *result, struct crypto_wait *wait)
|
||||
{
|
||||
char *state;
|
||||
struct ahash_request *req;
|
||||
@ -236,7 +210,7 @@ static int ahash_partial_update(struct ahash_request **preq,
|
||||
}
|
||||
ahash_request_set_callback(req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, tresult);
|
||||
crypto_req_done, wait);
|
||||
|
||||
memcpy(hash_buff, template->plaintext + temp,
|
||||
template->tap[k]);
|
||||
@ -247,7 +221,7 @@ static int ahash_partial_update(struct ahash_request **preq,
|
||||
pr_err("alg: hash: Failed to import() for %s\n", algo);
|
||||
goto out;
|
||||
}
|
||||
ret = wait_async_op(tresult, crypto_ahash_update(req));
|
||||
ret = crypto_wait_req(crypto_ahash_update(req), wait);
|
||||
if (ret)
|
||||
goto out;
|
||||
*preq = req;
|
||||
@ -272,7 +246,7 @@ static int __test_hash(struct crypto_ahash *tfm,
|
||||
char *result;
|
||||
char *key;
|
||||
struct ahash_request *req;
|
||||
struct tcrypt_result tresult;
|
||||
struct crypto_wait wait;
|
||||
void *hash_buff;
|
||||
char *xbuf[XBUFSIZE];
|
||||
int ret = -ENOMEM;
|
||||
@ -286,7 +260,7 @@ static int __test_hash(struct crypto_ahash *tfm,
|
||||
if (testmgr_alloc_buf(xbuf))
|
||||
goto out_nobuf;
|
||||
|
||||
init_completion(&tresult.completion);
|
||||
crypto_init_wait(&wait);
|
||||
|
||||
req = ahash_request_alloc(tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
@ -295,7 +269,7 @@ static int __test_hash(struct crypto_ahash *tfm,
|
||||
goto out_noreq;
|
||||
}
|
||||
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &tresult);
|
||||
crypto_req_done, &wait);
|
||||
|
||||
j = 0;
|
||||
for (i = 0; i < tcount; i++) {
|
||||
@ -335,26 +309,26 @@ static int __test_hash(struct crypto_ahash *tfm,
|
||||
|
||||
ahash_request_set_crypt(req, sg, result, template[i].psize);
|
||||
if (use_digest) {
|
||||
ret = wait_async_op(&tresult, crypto_ahash_digest(req));
|
||||
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: hash: digest failed on test %d "
|
||||
"for %s: ret=%d\n", j, algo, -ret);
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
ret = wait_async_op(&tresult, crypto_ahash_init(req));
|
||||
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: hash: init failed on test %d "
|
||||
"for %s: ret=%d\n", j, algo, -ret);
|
||||
goto out;
|
||||
}
|
||||
ret = wait_async_op(&tresult, crypto_ahash_update(req));
|
||||
ret = crypto_wait_req(crypto_ahash_update(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: hash: update failed on test %d "
|
||||
"for %s: ret=%d\n", j, algo, -ret);
|
||||
goto out;
|
||||
}
|
||||
ret = wait_async_op(&tresult, crypto_ahash_final(req));
|
||||
ret = crypto_wait_req(crypto_ahash_final(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: hash: final failed on test %d "
|
||||
"for %s: ret=%d\n", j, algo, -ret);
|
||||
@ -420,22 +394,10 @@ static int __test_hash(struct crypto_ahash *tfm,
|
||||
}
|
||||
|
||||
ahash_request_set_crypt(req, sg, result, template[i].psize);
|
||||
ret = crypto_ahash_digest(req);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
wait_for_completion(&tresult.completion);
|
||||
reinit_completion(&tresult.completion);
|
||||
ret = tresult.err;
|
||||
if (!ret)
|
||||
break;
|
||||
/* fall through */
|
||||
default:
|
||||
printk(KERN_ERR "alg: hash: digest failed "
|
||||
"on chunking test %d for %s: "
|
||||
"ret=%d\n", j, algo, -ret);
|
||||
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n",
|
||||
j, algo, -ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -486,13 +448,13 @@ static int __test_hash(struct crypto_ahash *tfm,
|
||||
}
|
||||
|
||||
ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
|
||||
ret = wait_async_op(&tresult, crypto_ahash_init(req));
|
||||
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: hash: init failed on test %d for %s: ret=%d\n",
|
||||
j, algo, -ret);
|
||||
goto out;
|
||||
}
|
||||
ret = wait_async_op(&tresult, crypto_ahash_update(req));
|
||||
ret = crypto_wait_req(crypto_ahash_update(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: hash: update failed on test %d for %s: ret=%d\n",
|
||||
j, algo, -ret);
|
||||
@ -503,7 +465,7 @@ static int __test_hash(struct crypto_ahash *tfm,
|
||||
for (k = 1; k < template[i].np; k++) {
|
||||
ret = ahash_partial_update(&req, tfm, &template[i],
|
||||
hash_buff, k, temp, &sg[0], algo, result,
|
||||
&tresult);
|
||||
&wait);
|
||||
if (ret) {
|
||||
pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n",
|
||||
j, algo, -ret);
|
||||
@ -511,7 +473,7 @@ static int __test_hash(struct crypto_ahash *tfm,
|
||||
}
|
||||
temp += template[i].tap[k];
|
||||
}
|
||||
ret = wait_async_op(&tresult, crypto_ahash_final(req));
|
||||
ret = crypto_wait_req(crypto_ahash_final(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: hash: final failed on test %d for %s: ret=%d\n",
|
||||
j, algo, -ret);
|
||||
@ -580,7 +542,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *sgout;
|
||||
const char *e, *d;
|
||||
struct tcrypt_result result;
|
||||
struct crypto_wait wait;
|
||||
unsigned int authsize, iv_len;
|
||||
void *input;
|
||||
void *output;
|
||||
@ -619,7 +581,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
else
|
||||
e = "decryption";
|
||||
|
||||
init_completion(&result.completion);
|
||||
crypto_init_wait(&wait);
|
||||
|
||||
req = aead_request_alloc(tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
@ -629,7 +591,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
}
|
||||
|
||||
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
crypto_req_done, &wait);
|
||||
|
||||
iv_len = crypto_aead_ivsize(tfm);
|
||||
|
||||
@ -709,7 +671,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
|
||||
aead_request_set_ad(req, template[i].alen);
|
||||
|
||||
ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
|
||||
ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
|
||||
: crypto_aead_decrypt(req), &wait);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
@ -722,13 +685,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
wait_for_completion(&result.completion);
|
||||
reinit_completion(&result.completion);
|
||||
ret = result.err;
|
||||
if (!ret)
|
||||
break;
|
||||
case -EBADMSG:
|
||||
if (template[i].novrfy)
|
||||
/* verification failure was expected */
|
||||
@ -866,7 +822,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
|
||||
aead_request_set_ad(req, template[i].alen);
|
||||
|
||||
ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
|
||||
ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
|
||||
: crypto_aead_decrypt(req), &wait);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
@ -879,13 +836,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
wait_for_completion(&result.completion);
|
||||
reinit_completion(&result.completion);
|
||||
ret = result.err;
|
||||
if (!ret)
|
||||
break;
|
||||
case -EBADMSG:
|
||||
if (template[i].novrfy)
|
||||
/* verification failure was expected */
|
||||
@ -1083,7 +1033,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
struct scatterlist sg[8];
|
||||
struct scatterlist sgout[8];
|
||||
const char *e, *d;
|
||||
struct tcrypt_result result;
|
||||
struct crypto_wait wait;
|
||||
void *data;
|
||||
char iv[MAX_IVLEN];
|
||||
char *xbuf[XBUFSIZE];
|
||||
@ -1107,7 +1057,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
else
|
||||
e = "decryption";
|
||||
|
||||
init_completion(&result.completion);
|
||||
crypto_init_wait(&wait);
|
||||
|
||||
req = skcipher_request_alloc(tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
@ -1117,7 +1067,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
}
|
||||
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
crypto_req_done, &wait);
|
||||
|
||||
j = 0;
|
||||
for (i = 0; i < tcount; i++) {
|
||||
@ -1164,21 +1114,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
|
||||
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
|
||||
template[i].ilen, iv);
|
||||
ret = enc ? crypto_skcipher_encrypt(req) :
|
||||
crypto_skcipher_decrypt(req);
|
||||
ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
|
||||
crypto_skcipher_decrypt(req), &wait);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
wait_for_completion(&result.completion);
|
||||
reinit_completion(&result.completion);
|
||||
ret = result.err;
|
||||
if (!ret)
|
||||
break;
|
||||
/* fall through */
|
||||
default:
|
||||
if (ret) {
|
||||
pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
|
||||
d, e, j, algo, -ret);
|
||||
goto out;
|
||||
@ -1272,21 +1211,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
|
||||
template[i].ilen, iv);
|
||||
|
||||
ret = enc ? crypto_skcipher_encrypt(req) :
|
||||
crypto_skcipher_decrypt(req);
|
||||
ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
|
||||
crypto_skcipher_decrypt(req), &wait);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
wait_for_completion(&result.completion);
|
||||
reinit_completion(&result.completion);
|
||||
ret = result.err;
|
||||
if (!ret)
|
||||
break;
|
||||
/* fall through */
|
||||
default:
|
||||
if (ret) {
|
||||
pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
|
||||
d, e, j, algo, -ret);
|
||||
goto out;
|
||||
@ -1462,7 +1390,7 @@ static int test_acomp(struct crypto_acomp *tfm,
|
||||
int ret;
|
||||
struct scatterlist src, dst;
|
||||
struct acomp_req *req;
|
||||
struct tcrypt_result result;
|
||||
struct crypto_wait wait;
|
||||
|
||||
output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
|
||||
if (!output)
|
||||
@ -1486,7 +1414,7 @@ static int test_acomp(struct crypto_acomp *tfm,
|
||||
}
|
||||
|
||||
memset(output, 0, dlen);
|
||||
init_completion(&result.completion);
|
||||
crypto_init_wait(&wait);
|
||||
sg_init_one(&src, input_vec, ilen);
|
||||
sg_init_one(&dst, output, dlen);
|
||||
|
||||
@ -1501,9 +1429,9 @@ static int test_acomp(struct crypto_acomp *tfm,
|
||||
|
||||
acomp_request_set_params(req, &src, &dst, ilen, dlen);
|
||||
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
crypto_req_done, &wait);
|
||||
|
||||
ret = wait_async_op(&result, crypto_acomp_compress(req));
|
||||
ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
|
||||
i + 1, algo, -ret);
|
||||
@ -1516,10 +1444,10 @@ static int test_acomp(struct crypto_acomp *tfm,
|
||||
dlen = COMP_BUF_SIZE;
|
||||
sg_init_one(&src, output, ilen);
|
||||
sg_init_one(&dst, decomp_out, dlen);
|
||||
init_completion(&result.completion);
|
||||
crypto_init_wait(&wait);
|
||||
acomp_request_set_params(req, &src, &dst, ilen, dlen);
|
||||
|
||||
ret = wait_async_op(&result, crypto_acomp_decompress(req));
|
||||
ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
|
||||
i + 1, algo, -ret);
|
||||
@ -1563,7 +1491,7 @@ static int test_acomp(struct crypto_acomp *tfm,
|
||||
}
|
||||
|
||||
memset(output, 0, dlen);
|
||||
init_completion(&result.completion);
|
||||
crypto_init_wait(&wait);
|
||||
sg_init_one(&src, input_vec, ilen);
|
||||
sg_init_one(&dst, output, dlen);
|
||||
|
||||
@ -1578,9 +1506,9 @@ static int test_acomp(struct crypto_acomp *tfm,
|
||||
|
||||
acomp_request_set_params(req, &src, &dst, ilen, dlen);
|
||||
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
crypto_req_done, &wait);
|
||||
|
||||
ret = wait_async_op(&result, crypto_acomp_decompress(req));
|
||||
ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
|
||||
i + 1, algo, -ret);
|
||||
@ -2000,7 +1928,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
|
||||
void *a_public = NULL;
|
||||
void *a_ss = NULL;
|
||||
void *shared_secret = NULL;
|
||||
struct tcrypt_result result;
|
||||
struct crypto_wait wait;
|
||||
unsigned int out_len_max;
|
||||
int err = -ENOMEM;
|
||||
struct scatterlist src, dst;
|
||||
@ -2009,7 +1937,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
|
||||
if (!req)
|
||||
return err;
|
||||
|
||||
init_completion(&result.completion);
|
||||
crypto_init_wait(&wait);
|
||||
|
||||
err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
|
||||
if (err < 0)
|
||||
@ -2027,10 +1955,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
|
||||
sg_init_one(&dst, output_buf, out_len_max);
|
||||
kpp_request_set_output(req, &dst, out_len_max);
|
||||
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
crypto_req_done, &wait);
|
||||
|
||||
/* Compute party A's public key */
|
||||
err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
|
||||
err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
|
||||
if (err) {
|
||||
pr_err("alg: %s: Party A: generate public key test failed. err %d\n",
|
||||
alg, err);
|
||||
@ -2069,8 +1997,8 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
|
||||
kpp_request_set_input(req, &src, vec->b_public_size);
|
||||
kpp_request_set_output(req, &dst, out_len_max);
|
||||
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
|
||||
crypto_req_done, &wait);
|
||||
err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
|
||||
if (err) {
|
||||
pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n",
|
||||
alg, err);
|
||||
@ -2100,9 +2028,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
|
||||
kpp_request_set_input(req, &src, vec->expected_a_public_size);
|
||||
kpp_request_set_output(req, &dst, out_len_max);
|
||||
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
err = wait_async_op(&result,
|
||||
crypto_kpp_compute_shared_secret(req));
|
||||
crypto_req_done, &wait);
|
||||
err = crypto_wait_req(crypto_kpp_compute_shared_secret(req),
|
||||
&wait);
|
||||
if (err) {
|
||||
pr_err("alg: %s: Party B: compute shared secret failed. err %d\n",
|
||||
alg, err);
|
||||
@ -2179,7 +2107,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
|
||||
struct akcipher_request *req;
|
||||
void *outbuf_enc = NULL;
|
||||
void *outbuf_dec = NULL;
|
||||
struct tcrypt_result result;
|
||||
struct crypto_wait wait;
|
||||
unsigned int out_len_max, out_len = 0;
|
||||
int err = -ENOMEM;
|
||||
struct scatterlist src, dst, src_tab[2];
|
||||
@ -2191,7 +2119,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
|
||||
if (!req)
|
||||
goto free_xbuf;
|
||||
|
||||
init_completion(&result.completion);
|
||||
crypto_init_wait(&wait);
|
||||
|
||||
if (vecs->public_key_vec)
|
||||
err = crypto_akcipher_set_pub_key(tfm, vecs->key,
|
||||
@ -2220,13 +2148,13 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
|
||||
akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
|
||||
out_len_max);
|
||||
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
crypto_req_done, &wait);
|
||||
|
||||
err = wait_async_op(&result, vecs->siggen_sigver_test ?
|
||||
/* Run asymmetric signature generation */
|
||||
crypto_akcipher_sign(req) :
|
||||
/* Run asymmetric encrypt */
|
||||
crypto_akcipher_encrypt(req));
|
||||
err = crypto_wait_req(vecs->siggen_sigver_test ?
|
||||
/* Run asymmetric signature generation */
|
||||
crypto_akcipher_sign(req) :
|
||||
/* Run asymmetric encrypt */
|
||||
crypto_akcipher_encrypt(req), &wait);
|
||||
if (err) {
|
||||
pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
|
||||
goto free_all;
|
||||
@ -2261,14 +2189,14 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
|
||||
|
||||
sg_init_one(&src, xbuf[0], vecs->c_size);
|
||||
sg_init_one(&dst, outbuf_dec, out_len_max);
|
||||
init_completion(&result.completion);
|
||||
crypto_init_wait(&wait);
|
||||
akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
|
||||
|
||||
err = wait_async_op(&result, vecs->siggen_sigver_test ?
|
||||
/* Run asymmetric signature verification */
|
||||
crypto_akcipher_verify(req) :
|
||||
/* Run asymmetric decrypt */
|
||||
crypto_akcipher_decrypt(req));
|
||||
err = crypto_wait_req(vecs->siggen_sigver_test ?
|
||||
/* Run asymmetric signature verification */
|
||||
crypto_akcipher_verify(req) :
|
||||
/* Run asymmetric decrypt */
|
||||
crypto_akcipher_decrypt(req), &wait);
|
||||
if (err) {
|
||||
pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
|
||||
goto free_all;
|
||||
@ -3499,6 +3427,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.hash = __VECS(sha512_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "sm3",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(sm3_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "tgr128",
|
||||
.test = alg_test_hash,
|
||||
|
@ -1497,6 +1497,73 @@ static const struct hash_testvec crct10dif_tv_template[] = {
|
||||
}
|
||||
};
|
||||
|
||||
/* Example vectors below taken from
|
||||
* http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
|
||||
*
|
||||
* The rest taken from
|
||||
* https://github.com/adamws/oscca-sm3
|
||||
*/
|
||||
static const struct hash_testvec sm3_tv_template[] = {
|
||||
{
|
||||
.plaintext = "",
|
||||
.psize = 0,
|
||||
.digest = (u8 *)(u8 []) {
|
||||
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
|
||||
0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
|
||||
0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
|
||||
0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B }
|
||||
}, {
|
||||
.plaintext = "a",
|
||||
.psize = 1,
|
||||
.digest = (u8 *)(u8 []) {
|
||||
0x62, 0x34, 0x76, 0xAC, 0x18, 0xF6, 0x5A, 0x29,
|
||||
0x09, 0xE4, 0x3C, 0x7F, 0xEC, 0x61, 0xB4, 0x9C,
|
||||
0x7E, 0x76, 0x4A, 0x91, 0xA1, 0x8C, 0xCB, 0x82,
|
||||
0xF1, 0x91, 0x7A, 0x29, 0xC8, 0x6C, 0x5E, 0x88 }
|
||||
}, {
|
||||
/* A.1. Example 1 */
|
||||
.plaintext = "abc",
|
||||
.psize = 3,
|
||||
.digest = (u8 *)(u8 []) {
|
||||
0x66, 0xC7, 0xF0, 0xF4, 0x62, 0xEE, 0xED, 0xD9,
|
||||
0xD1, 0xF2, 0xD4, 0x6B, 0xDC, 0x10, 0xE4, 0xE2,
|
||||
0x41, 0x67, 0xC4, 0x87, 0x5C, 0xF2, 0xF7, 0xA2,
|
||||
0x29, 0x7D, 0xA0, 0x2B, 0x8F, 0x4B, 0xA8, 0xE0 }
|
||||
}, {
|
||||
.plaintext = "abcdefghijklmnopqrstuvwxyz",
|
||||
.psize = 26,
|
||||
.digest = (u8 *)(u8 []) {
|
||||
0xB8, 0x0F, 0xE9, 0x7A, 0x4D, 0xA2, 0x4A, 0xFC,
|
||||
0x27, 0x75, 0x64, 0xF6, 0x6A, 0x35, 0x9E, 0xF4,
|
||||
0x40, 0x46, 0x2A, 0xD2, 0x8D, 0xCC, 0x6D, 0x63,
|
||||
0xAD, 0xB2, 0x4D, 0x5C, 0x20, 0xA6, 0x15, 0x95 }
|
||||
}, {
|
||||
/* A.1. Example 2 */
|
||||
.plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab"
|
||||
"cdabcdabcdabcdabcd",
|
||||
.psize = 64,
|
||||
.digest = (u8 *)(u8 []) {
|
||||
0xDE, 0xBE, 0x9F, 0xF9, 0x22, 0x75, 0xB8, 0xA1,
|
||||
0x38, 0x60, 0x48, 0x89, 0xC1, 0x8E, 0x5A, 0x4D,
|
||||
0x6F, 0xDB, 0x70, 0xE5, 0x38, 0x7E, 0x57, 0x65,
|
||||
0x29, 0x3D, 0xCB, 0xA3, 0x9C, 0x0C, 0x57, 0x32 }
|
||||
}, {
|
||||
.plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
|
||||
"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
|
||||
"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
|
||||
"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
|
||||
"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
|
||||
"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
|
||||
"abcdabcdabcdabcdabcdabcdabcdabcd",
|
||||
.psize = 256,
|
||||
.digest = (u8 *)(u8 []) {
|
||||
0xB9, 0x65, 0x76, 0x4C, 0x8B, 0xEB, 0xB0, 0x91,
|
||||
0xC7, 0x60, 0x2B, 0x74, 0xAF, 0xD3, 0x4E, 0xEF,
|
||||
0xB5, 0x31, 0xDC, 0xCB, 0x4E, 0x00, 0x76, 0xD9,
|
||||
0xB7, 0xCD, 0x81, 0x31, 0x99, 0xB4, 0x59, 0x71 }
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* SHA1 test vectors from from FIPS PUB 180-1
|
||||
* Long vector from CAVS 5.0
|
||||
|
@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err)
|
||||
crypto_skcipher_encrypt(subreq) ?:
|
||||
post_crypt(req);
|
||||
|
||||
if (err == -EINPROGRESS ||
|
||||
(err == -EBUSY &&
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err)
|
||||
crypto_skcipher_decrypt(subreq) ?:
|
||||
post_crypt(req);
|
||||
|
||||
if (err == -EINPROGRESS ||
|
||||
(err == -EBUSY &&
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -100,12 +100,12 @@ config HW_RANDOM_BCM2835
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_IPROC_RNG200
|
||||
tristate "Broadcom iProc RNG200 support"
|
||||
depends on ARCH_BCM_IPROC
|
||||
tristate "Broadcom iProc/STB RNG200 support"
|
||||
depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
|
||||
default HW_RANDOM
|
||||
---help---
|
||||
This driver provides kernel-side support for the RNG200
|
||||
hardware found on the Broadcom iProc SoCs.
|
||||
hardware found on the Broadcom iProc and STB SoCs.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called iproc-rng200
|
||||
|
@ -292,26 +292,48 @@ static struct miscdevice rng_miscdev = {
|
||||
.groups = rng_dev_groups,
|
||||
};
|
||||
|
||||
static int enable_best_rng(void)
|
||||
{
|
||||
int ret = -ENODEV;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&rng_mutex));
|
||||
|
||||
/* rng_list is sorted by quality, use the best (=first) one */
|
||||
if (!list_empty(&rng_list)) {
|
||||
struct hwrng *new_rng;
|
||||
|
||||
new_rng = list_entry(rng_list.next, struct hwrng, list);
|
||||
ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
|
||||
if (!ret)
|
||||
cur_rng_set_by_user = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t hwrng_attr_current_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
int err;
|
||||
int err = -ENODEV;
|
||||
struct hwrng *rng;
|
||||
|
||||
err = mutex_lock_interruptible(&rng_mutex);
|
||||
if (err)
|
||||
return -ERESTARTSYS;
|
||||
err = -ENODEV;
|
||||
list_for_each_entry(rng, &rng_list, list) {
|
||||
if (sysfs_streq(rng->name, buf)) {
|
||||
err = 0;
|
||||
cur_rng_set_by_user = 1;
|
||||
if (rng != current_rng)
|
||||
|
||||
if (sysfs_streq(buf, "")) {
|
||||
err = enable_best_rng();
|
||||
} else {
|
||||
list_for_each_entry(rng, &rng_list, list) {
|
||||
if (sysfs_streq(rng->name, buf)) {
|
||||
cur_rng_set_by_user = 1;
|
||||
err = set_current_rng(rng);
|
||||
break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&rng_mutex);
|
||||
|
||||
return err ? : len;
|
||||
@ -423,7 +445,7 @@ static void start_khwrngd(void)
|
||||
{
|
||||
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
|
||||
if (IS_ERR(hwrng_fill)) {
|
||||
pr_err("hwrng_fill thread creation failed");
|
||||
pr_err("hwrng_fill thread creation failed\n");
|
||||
hwrng_fill = NULL;
|
||||
}
|
||||
}
|
||||
@ -493,17 +515,8 @@ void hwrng_unregister(struct hwrng *rng)
|
||||
mutex_lock(&rng_mutex);
|
||||
|
||||
list_del(&rng->list);
|
||||
if (current_rng == rng) {
|
||||
drop_current_rng();
|
||||
cur_rng_set_by_user = 0;
|
||||
/* rng_list is sorted by quality, use the best (=first) one */
|
||||
if (!list_empty(&rng_list)) {
|
||||
struct hwrng *new_rng;
|
||||
|
||||
new_rng = list_entry(rng_list.next, struct hwrng, list);
|
||||
set_current_rng(new_rng);
|
||||
}
|
||||
}
|
||||
if (current_rng == rng)
|
||||
enable_best_rng();
|
||||
|
||||
if (list_empty(&rng_list)) {
|
||||
mutex_unlock(&rng_mutex);
|
||||
|
@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
static const struct of_device_id iproc_rng200_of_match[] = {
|
||||
{ .compatible = "brcm,bcm7278-rng200", },
|
||||
{ .compatible = "brcm,iproc-rng200", },
|
||||
{},
|
||||
};
|
||||
|
@ -72,7 +72,7 @@ static int pseries_rng_remove(struct vio_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct vio_device_id pseries_rng_driver_ids[] = {
|
||||
static const struct vio_device_id pseries_rng_driver_ids[] = {
|
||||
{ "ibm,random-v1", "ibm,random"},
|
||||
{ "", "" }
|
||||
};
|
||||
|
@ -52,13 +52,6 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
|
||||
int retval = 0;
|
||||
int period_us = ktime_to_us(priv->period);
|
||||
|
||||
/*
|
||||
* The RNG provides 32-bits per read. Ensure there is enough space for
|
||||
* at minimum one read.
|
||||
*/
|
||||
if (max < sizeof(u32))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* There may not have been enough time for new data to be generated
|
||||
* since the last request. If the caller doesn't want to wait, let them
|
||||
|
@ -184,7 +184,26 @@ static int virtrng_freeze(struct virtio_device *vdev)
|
||||
|
||||
static int virtrng_restore(struct virtio_device *vdev)
|
||||
{
|
||||
return probe_common(vdev);
|
||||
int err;
|
||||
|
||||
err = probe_common(vdev);
|
||||
if (!err) {
|
||||
struct virtrng_info *vi = vdev->priv;
|
||||
|
||||
/*
|
||||
* Set hwrng_removed to ensure that virtio_read()
|
||||
* does not block waiting for data before the
|
||||
* registration is complete.
|
||||
*/
|
||||
vi->hwrng_removed = true;
|
||||
err = hwrng_register(&vi->hwrng);
|
||||
if (!err) {
|
||||
vi->hwrng_register_done = true;
|
||||
vi->hwrng_removed = false;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -199,22 +199,8 @@ config CRYPTO_CRC32_S390
|
||||
|
||||
It is available with IBM z13 or later.
|
||||
|
||||
config CRYPTO_DEV_MV_CESA
|
||||
tristate "Marvell's Cryptographic Engine"
|
||||
depends on PLAT_ORION
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_HASH
|
||||
select SRAM
|
||||
help
|
||||
This driver allows you to utilize the Cryptographic Engines and
|
||||
Security Accelerator (CESA) which can be found on the Marvell Orion
|
||||
and Kirkwood SoCs, such as QNAP's TS-209.
|
||||
|
||||
Currently the driver supports AES in ECB and CBC mode without DMA.
|
||||
|
||||
config CRYPTO_DEV_MARVELL_CESA
|
||||
tristate "New Marvell's Cryptographic Engine driver"
|
||||
tristate "Marvell's Cryptographic Engine driver"
|
||||
depends on PLAT_ORION || ARCH_MVEBU
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_DES
|
||||
@ -223,12 +209,10 @@ config CRYPTO_DEV_MARVELL_CESA
|
||||
select SRAM
|
||||
help
|
||||
This driver allows you to utilize the Cryptographic Engines and
|
||||
Security Accelerator (CESA) which can be found on the Armada 370.
|
||||
Security Accelerator (CESA) which can be found on MVEBU and ORION
|
||||
platforms.
|
||||
This driver supports CPU offload through DMA transfers.
|
||||
|
||||
This driver is aimed at replacing the mv_cesa driver. This will only
|
||||
happen once it has received proper testing.
|
||||
|
||||
config CRYPTO_DEV_NIAGARA2
|
||||
tristate "Niagara2 Stream Processing Unit driver"
|
||||
select CRYPTO_DES
|
||||
@ -315,6 +299,10 @@ config CRYPTO_DEV_PPC4XX
|
||||
tristate "Driver AMCC PPC4xx crypto accelerator"
|
||||
depends on PPC && 4xx
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_CCM
|
||||
select CRYPTO_GCM
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
This option allows you to have support for AMCC crypto acceleration.
|
||||
@ -439,6 +427,20 @@ config CRYPTO_DEV_S5P
|
||||
Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
|
||||
algorithms execution.
|
||||
|
||||
config CRYPTO_DEV_EXYNOS_HASH
|
||||
bool "Support for Samsung Exynos HASH accelerator"
|
||||
depends on CRYPTO_DEV_S5P
|
||||
depends on !CRYPTO_DEV_EXYNOS_RNG && CRYPTO_DEV_EXYNOS_RNG!=m
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_MD5
|
||||
select CRYPTO_SHA256
|
||||
help
|
||||
Select this to offload Exynos from HASH MD5/SHA1/SHA256.
|
||||
This will select software SHA1, MD5 and SHA256 as they are
|
||||
needed for small and zero-size messages.
|
||||
HASH algorithms will be disabled if EXYNOS_RNG
|
||||
is enabled due to hw conflict.
|
||||
|
||||
config CRYPTO_DEV_NX
|
||||
bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
|
||||
depends on PPC64
|
||||
|
@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
|
||||
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
|
||||
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
|
||||
|
@ -1,3 +1,3 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
|
||||
crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
|
||||
crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o
|
||||
crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
|
||||
|
@ -26,11 +26,14 @@
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include "crypto4xx_reg_def.h"
|
||||
#include "crypto4xx_sa.h"
|
||||
#include "crypto4xx_core.h"
|
||||
#include "crypto4xx_sa.h"
|
||||
|
||||
static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
|
||||
u32 save_iv, u32 ld_h, u32 ld_iv,
|
||||
@ -62,6 +65,7 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
|
||||
sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
|
||||
sa->sa_command_1.bf.feedback_mode = cfb,
|
||||
sa->sa_command_1.bf.sa_rev = 1;
|
||||
sa->sa_command_1.bf.hmac_muting = hmac_mc;
|
||||
sa->sa_command_1.bf.extended_seq_num = esn;
|
||||
sa->sa_command_1.bf.seq_num_mask = sn_mask;
|
||||
sa->sa_command_1.bf.mutable_bit_proc = mute;
|
||||
@ -73,29 +77,29 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
|
||||
int crypto4xx_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
unsigned int ivlen = crypto_ablkcipher_ivsize(
|
||||
crypto_ablkcipher_reqtfm(req));
|
||||
__le32 iv[ivlen];
|
||||
|
||||
ctx->direction = DIR_OUTBOUND;
|
||||
ctx->hash_final = 0;
|
||||
ctx->is_hash = 0;
|
||||
ctx->pd_ctl = 0x1;
|
||||
if (ivlen)
|
||||
crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
|
||||
|
||||
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
|
||||
req->nbytes, req->info,
|
||||
get_dynamic_sa_iv_size(ctx));
|
||||
req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
|
||||
}
|
||||
|
||||
int crypto4xx_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
unsigned int ivlen = crypto_ablkcipher_ivsize(
|
||||
crypto_ablkcipher_reqtfm(req));
|
||||
__le32 iv[ivlen];
|
||||
|
||||
ctx->direction = DIR_INBOUND;
|
||||
ctx->hash_final = 0;
|
||||
ctx->is_hash = 0;
|
||||
ctx->pd_ctl = 1;
|
||||
if (ivlen)
|
||||
crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
|
||||
|
||||
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
|
||||
req->nbytes, req->info,
|
||||
get_dynamic_sa_iv_size(ctx));
|
||||
req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -120,23 +124,15 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
|
||||
}
|
||||
|
||||
/* Create SA */
|
||||
if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
|
||||
if (ctx->sa_in || ctx->sa_out)
|
||||
crypto4xx_free_sa(ctx);
|
||||
|
||||
rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (ctx->state_record_dma_addr == 0) {
|
||||
rc = crypto4xx_alloc_state_record(ctx);
|
||||
if (rc) {
|
||||
crypto4xx_free_sa(ctx);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
/* Setup SA */
|
||||
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
|
||||
ctx->hash_final = 0;
|
||||
sa = ctx->sa_in;
|
||||
|
||||
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
|
||||
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
|
||||
@ -150,18 +146,13 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
|
||||
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
|
||||
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
|
||||
SA_NOT_COPY_HDR);
|
||||
crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
|
||||
key, keylen);
|
||||
sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
|
||||
crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
|
||||
key, keylen);
|
||||
sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
|
||||
sa->sa_command_1.bf.key_len = keylen >> 3;
|
||||
ctx->is_hash = 0;
|
||||
ctx->direction = DIR_INBOUND;
|
||||
memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
|
||||
(void *)&ctx->state_record_dma_addr, 4);
|
||||
ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
|
||||
|
||||
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
|
||||
sa = (struct dynamic_sa_ctl *) ctx->sa_out;
|
||||
sa = ctx->sa_out;
|
||||
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
|
||||
|
||||
return 0;
|
||||
@ -174,6 +165,396 @@ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
|
||||
CRYPTO_FEEDBACK_MODE_NO_FB);
|
||||
}
|
||||
|
||||
int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
|
||||
CRYPTO_FEEDBACK_MODE_128BIT_CFB);
|
||||
}
|
||||
|
||||
int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
|
||||
CRYPTO_FEEDBACK_MODE_NO_FB);
|
||||
}
|
||||
|
||||
int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
|
||||
CRYPTO_FEEDBACK_MODE_64BIT_OFB);
|
||||
}
|
||||
|
||||
int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int rc;
|
||||
|
||||
rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
|
||||
CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen -
|
||||
CTR_RFC3686_NONCE_SIZE]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
__le32 iv[AES_IV_SIZE / 4] = {
|
||||
ctx->iv_nonce,
|
||||
cpu_to_le32p((u32 *) req->info),
|
||||
cpu_to_le32p((u32 *) (req->info + 4)),
|
||||
cpu_to_le32(1) };
|
||||
|
||||
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
|
||||
req->nbytes, iv, AES_IV_SIZE,
|
||||
ctx->sa_out, ctx->sa_len, 0);
|
||||
}
|
||||
|
||||
int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
__le32 iv[AES_IV_SIZE / 4] = {
|
||||
ctx->iv_nonce,
|
||||
cpu_to_le32p((u32 *) req->info),
|
||||
cpu_to_le32p((u32 *) (req->info + 4)),
|
||||
cpu_to_le32(1) };
|
||||
|
||||
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
|
||||
req->nbytes, iv, AES_IV_SIZE,
|
||||
ctx->sa_out, ctx->sa_len, 0);
|
||||
}
|
||||
|
||||
static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
|
||||
bool is_ccm, bool decrypt)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
|
||||
/* authsize has to be a multiple of 4 */
|
||||
if (aead->authsize & 3)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* hardware does not handle cases where cryptlen
|
||||
* is less than a block
|
||||
*/
|
||||
if (req->cryptlen < AES_BLOCK_SIZE)
|
||||
return true;
|
||||
|
||||
/* assoc len needs to be a multiple of 4 */
|
||||
if (req->assoclen & 0x3)
|
||||
return true;
|
||||
|
||||
/* CCM supports only counter field length of 2 and 4 bytes */
|
||||
if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
|
||||
return true;
|
||||
|
||||
/* CCM - fix CBC MAC mismatch in special case */
|
||||
if (is_ccm && decrypt && !req->assoclen)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int crypto4xx_aead_fallback(struct aead_request *req,
|
||||
struct crypto4xx_ctx *ctx, bool do_decrypt)
|
||||
{
|
||||
char aead_req_data[sizeof(struct aead_request) +
|
||||
crypto_aead_reqsize(ctx->sw_cipher.aead)]
|
||||
__aligned(__alignof__(struct aead_request));
|
||||
|
||||
struct aead_request *subreq = (void *) aead_req_data;
|
||||
|
||||
memset(subreq, 0, sizeof(aead_req_data));
|
||||
|
||||
aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
|
||||
aead_request_set_callback(subreq, req->base.flags,
|
||||
req->base.complete, req->base.data);
|
||||
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
|
||||
req->iv);
|
||||
aead_request_set_ad(subreq, req->assoclen);
|
||||
return do_decrypt ? crypto_aead_decrypt(subreq) :
|
||||
crypto_aead_encrypt(subreq);
|
||||
}
|
||||
|
||||
static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx,
|
||||
struct crypto_aead *cipher,
|
||||
const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
int rc;
|
||||
|
||||
crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_aead_set_flags(ctx->sw_cipher.aead,
|
||||
crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
|
||||
rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
|
||||
crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
|
||||
crypto_aead_set_flags(cipher,
|
||||
crypto_aead_get_flags(ctx->sw_cipher.aead) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* AES-CCM Functions
|
||||
*/
|
||||
|
||||
int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct dynamic_sa_ctl *sa;
|
||||
int rc = 0;
|
||||
|
||||
rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (ctx->sa_in || ctx->sa_out)
|
||||
crypto4xx_free_sa(ctx);
|
||||
|
||||
rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Setup SA */
|
||||
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
|
||||
sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
|
||||
|
||||
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
|
||||
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
|
||||
SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
|
||||
SA_CIPHER_ALG_AES,
|
||||
SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
|
||||
SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
|
||||
|
||||
set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
|
||||
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
|
||||
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
|
||||
SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
|
||||
SA_NOT_COPY_HDR);
|
||||
|
||||
sa->sa_command_1.bf.key_len = keylen >> 3;
|
||||
|
||||
crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen);
|
||||
|
||||
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
|
||||
sa = (struct dynamic_sa_ctl *) ctx->sa_out;
|
||||
|
||||
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
|
||||
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
|
||||
SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
|
||||
SA_CIPHER_ALG_AES,
|
||||
SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
|
||||
SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
|
||||
|
||||
set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
|
||||
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
|
||||
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
|
||||
SA_COPY_PAD, SA_COPY_PAYLOAD,
|
||||
SA_NOT_COPY_HDR);
|
||||
|
||||
sa->sa_command_1.bf.key_len = keylen >> 3;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
|
||||
{
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
unsigned int len = req->cryptlen;
|
||||
__le32 iv[16];
|
||||
u32 tmp_sa[ctx->sa_len * 4];
|
||||
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
|
||||
|
||||
if (crypto4xx_aead_need_fallback(req, true, decrypt))
|
||||
return crypto4xx_aead_fallback(req, ctx, decrypt);
|
||||
|
||||
if (decrypt)
|
||||
len -= crypto_aead_authsize(aead);
|
||||
|
||||
memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa));
|
||||
sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
|
||||
|
||||
if (req->iv[0] == 1) {
|
||||
/* CRYPTO_MODE_AES_ICM */
|
||||
sa->sa_command_1.bf.crypto_mode9_8 = 1;
|
||||
}
|
||||
|
||||
iv[3] = cpu_to_le32(0);
|
||||
crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
|
||||
|
||||
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
|
||||
len, iv, sizeof(iv),
|
||||
sa, ctx->sa_len, req->assoclen);
|
||||
}
|
||||
|
||||
int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
|
||||
{
|
||||
return crypto4xx_crypt_aes_ccm(req, false);
|
||||
}
|
||||
|
||||
int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
|
||||
{
|
||||
return crypto4xx_crypt_aes_ccm(req, true);
|
||||
}
|
||||
|
||||
int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
|
||||
}
|
||||
|
||||
/**
|
||||
* AES-GCM Functions
|
||||
*/
|
||||
|
||||
static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
|
||||
{
|
||||
switch (keylen) {
|
||||
case 16:
|
||||
case 24:
|
||||
case 32:
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_cipher *aes_tfm = NULL;
|
||||
uint8_t src[16] = { 0 };
|
||||
int rc = 0;
|
||||
|
||||
aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(aes_tfm)) {
|
||||
rc = PTR_ERR(aes_tfm);
|
||||
pr_warn("could not load aes cipher driver: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = crypto_cipher_setkey(aes_tfm, key, keylen);
|
||||
if (rc) {
|
||||
pr_err("setkey() failed: %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
crypto_cipher_encrypt_one(aes_tfm, src, src);
|
||||
crypto4xx_memcpy_to_le32(hash_start, src, 16);
|
||||
out:
|
||||
crypto_free_cipher(aes_tfm);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct dynamic_sa_ctl *sa;
|
||||
int rc = 0;
|
||||
|
||||
if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
|
||||
crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (ctx->sa_in || ctx->sa_out)
|
||||
crypto4xx_free_sa(ctx);
|
||||
|
||||
rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
|
||||
|
||||
sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2);
|
||||
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
|
||||
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
|
||||
SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
|
||||
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
|
||||
SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
|
||||
DIR_INBOUND);
|
||||
set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
|
||||
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
|
||||
SA_SEQ_MASK_ON, SA_MC_DISABLE,
|
||||
SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
|
||||
SA_NOT_COPY_HDR);
|
||||
|
||||
sa->sa_command_1.bf.key_len = keylen >> 3;
|
||||
|
||||
crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
|
||||
key, keylen);
|
||||
|
||||
rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa),
|
||||
key, keylen);
|
||||
if (rc) {
|
||||
pr_err("GCM hash key setting failed = %d\n", rc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
|
||||
sa = (struct dynamic_sa_ctl *) ctx->sa_out;
|
||||
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
|
||||
sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
crypto4xx_free_sa(ctx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
|
||||
bool decrypt)
|
||||
{
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
unsigned int len = req->cryptlen;
|
||||
__le32 iv[4];
|
||||
|
||||
if (crypto4xx_aead_need_fallback(req, false, decrypt))
|
||||
return crypto4xx_aead_fallback(req, ctx, decrypt);
|
||||
|
||||
crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
|
||||
iv[3] = cpu_to_le32(1);
|
||||
|
||||
if (decrypt)
|
||||
len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
|
||||
|
||||
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
|
||||
len, iv, sizeof(iv),
|
||||
decrypt ? ctx->sa_in : ctx->sa_out,
|
||||
ctx->sa_len, req->assoclen);
|
||||
}
|
||||
|
||||
int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
|
||||
{
|
||||
return crypto4xx_crypt_aes_gcm(req, false);
|
||||
}
|
||||
|
||||
int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
|
||||
{
|
||||
return crypto4xx_crypt_aes_gcm(req, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* HASH SHA1 Functions
|
||||
*/
|
||||
@ -183,53 +564,39 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
|
||||
unsigned char hm)
|
||||
{
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
|
||||
struct crypto4xx_alg *my_alg;
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct dynamic_sa_ctl *sa;
|
||||
struct dynamic_sa_hash160 *sa_in;
|
||||
struct dynamic_sa_hash160 *sa;
|
||||
int rc;
|
||||
|
||||
my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
|
||||
alg.u.hash);
|
||||
ctx->dev = my_alg->dev;
|
||||
ctx->is_hash = 1;
|
||||
ctx->hash_final = 0;
|
||||
|
||||
/* Create SA */
|
||||
if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
|
||||
if (ctx->sa_in || ctx->sa_out)
|
||||
crypto4xx_free_sa(ctx);
|
||||
|
||||
rc = crypto4xx_alloc_sa(ctx, sa_len);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (ctx->state_record_dma_addr == 0) {
|
||||
crypto4xx_alloc_state_record(ctx);
|
||||
if (!ctx->state_record_dma_addr) {
|
||||
crypto4xx_free_sa(ctx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||
sizeof(struct crypto4xx_ctx));
|
||||
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
|
||||
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
|
||||
sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
|
||||
set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
|
||||
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
|
||||
SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
|
||||
SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
|
||||
SA_OPCODE_HASH, DIR_INBOUND);
|
||||
set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
|
||||
set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
|
||||
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
|
||||
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
|
||||
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
|
||||
SA_NOT_COPY_HDR);
|
||||
ctx->direction = DIR_INBOUND;
|
||||
sa->sa_contents = SA_HASH160_CONTENTS;
|
||||
sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
|
||||
/* Need to zero hash digest in SA */
|
||||
memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
|
||||
memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
|
||||
sa_in->state_ptr = ctx->state_record_dma_addr;
|
||||
ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
|
||||
memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
|
||||
memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -240,29 +607,27 @@ int crypto4xx_hash_init(struct ahash_request *req)
|
||||
int ds;
|
||||
struct dynamic_sa_ctl *sa;
|
||||
|
||||
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
|
||||
sa = ctx->sa_in;
|
||||
ds = crypto_ahash_digestsize(
|
||||
__crypto_ahash_cast(req->base.tfm));
|
||||
sa->sa_command_0.bf.digest_len = ds >> 2;
|
||||
sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
|
||||
ctx->is_hash = 1;
|
||||
ctx->direction = DIR_INBOUND;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int crypto4xx_hash_update(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct scatterlist dst;
|
||||
unsigned int ds = crypto_ahash_digestsize(ahash);
|
||||
|
||||
ctx->is_hash = 1;
|
||||
ctx->hash_final = 0;
|
||||
ctx->pd_ctl = 0x11;
|
||||
ctx->direction = DIR_INBOUND;
|
||||
sg_init_one(&dst, req->result, ds);
|
||||
|
||||
return crypto4xx_build_pd(&req->base, ctx, req->src,
|
||||
(struct scatterlist *) req->result,
|
||||
req->nbytes, NULL, 0);
|
||||
return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
|
||||
req->nbytes, NULL, 0, ctx->sa_in,
|
||||
ctx->sa_len, 0);
|
||||
}
|
||||
|
||||
int crypto4xx_hash_final(struct ahash_request *req)
|
||||
@ -272,15 +637,16 @@ int crypto4xx_hash_final(struct ahash_request *req)
|
||||
|
||||
int crypto4xx_hash_digest(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct scatterlist dst;
|
||||
unsigned int ds = crypto_ahash_digestsize(ahash);
|
||||
|
||||
ctx->hash_final = 1;
|
||||
ctx->pd_ctl = 0x11;
|
||||
ctx->direction = DIR_INBOUND;
|
||||
sg_init_one(&dst, req->result, ds);
|
||||
|
||||
return crypto4xx_build_pd(&req->base, ctx, req->src,
|
||||
(struct scatterlist *) req->result,
|
||||
req->nbytes, NULL, 0);
|
||||
return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
|
||||
req->nbytes, NULL, 0, ctx->sa_in,
|
||||
ctx->sa_len, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -291,5 +657,3 @@ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
|
||||
return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
|
||||
SA_HASH_MODE_HASH);
|
||||
}
|
||||
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -22,7 +22,11 @@
|
||||
#ifndef __CRYPTO4XX_CORE_H__
|
||||
#define __CRYPTO4XX_CORE_H__
|
||||
|
||||
#include <linux/ratelimit.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include "crypto4xx_reg_def.h"
|
||||
#include "crypto4xx_sa.h"
|
||||
|
||||
#define MODULE_NAME "crypto4xx"
|
||||
|
||||
@ -34,20 +38,28 @@
|
||||
#define PPC405EX_CE_RESET 0x00000008
|
||||
|
||||
#define CRYPTO4XX_CRYPTO_PRIORITY 300
|
||||
#define PPC4XX_LAST_PD 63
|
||||
#define PPC4XX_NUM_PD 64
|
||||
#define PPC4XX_LAST_GD 1023
|
||||
#define PPC4XX_NUM_PD 256
|
||||
#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1)
|
||||
#define PPC4XX_NUM_GD 1024
|
||||
#define PPC4XX_LAST_SD 63
|
||||
#define PPC4XX_NUM_SD 64
|
||||
#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1)
|
||||
#define PPC4XX_NUM_SD 256
|
||||
#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1)
|
||||
#define PPC4XX_SD_BUFFER_SIZE 2048
|
||||
|
||||
#define PD_ENTRY_INUSE 1
|
||||
#define PD_ENTRY_BUSY BIT(1)
|
||||
#define PD_ENTRY_INUSE BIT(0)
|
||||
#define PD_ENTRY_FREE 0
|
||||
#define ERING_WAS_FULL 0xffffffff
|
||||
|
||||
struct crypto4xx_device;
|
||||
|
||||
union shadow_sa_buf {
|
||||
struct dynamic_sa_ctl sa;
|
||||
|
||||
/* alloc 256 bytes which is enough for any kind of dynamic sa */
|
||||
u8 buf[256];
|
||||
} __packed;
|
||||
|
||||
struct pd_uinfo {
|
||||
struct crypto4xx_device *dev;
|
||||
u32 state;
|
||||
@ -60,9 +72,8 @@ struct pd_uinfo {
|
||||
used by this packet */
|
||||
u32 num_sd; /* number of scatter discriptors
|
||||
used by this packet */
|
||||
void *sa_va; /* shadow sa, when using cp from ctx->sa */
|
||||
u32 sa_pa;
|
||||
void *sr_va; /* state record for shadow sa */
|
||||
struct dynamic_sa_ctl *sa_va; /* shadow sa */
|
||||
struct sa_state_record *sr_va; /* state record for shadow sa */
|
||||
u32 sr_pa;
|
||||
struct scatterlist *dest_va;
|
||||
struct crypto_async_request *async_req; /* base crypto request
|
||||
@ -72,27 +83,21 @@ struct pd_uinfo {
|
||||
struct crypto4xx_device {
|
||||
struct crypto4xx_core_device *core_dev;
|
||||
char *name;
|
||||
u64 ce_phy_address;
|
||||
void __iomem *ce_base;
|
||||
void __iomem *trng_base;
|
||||
|
||||
void *pdr; /* base address of packet
|
||||
descriptor ring */
|
||||
dma_addr_t pdr_pa; /* physical address used to
|
||||
program ce pdr_base_register */
|
||||
void *gdr; /* gather descriptor ring */
|
||||
dma_addr_t gdr_pa; /* physical address used to
|
||||
program ce gdr_base_register */
|
||||
void *sdr; /* scatter descriptor ring */
|
||||
dma_addr_t sdr_pa; /* physical address used to
|
||||
program ce sdr_base_register */
|
||||
struct ce_pd *pdr; /* base address of packet descriptor ring */
|
||||
dma_addr_t pdr_pa; /* physical address of pdr_base_register */
|
||||
struct ce_gd *gdr; /* gather descriptor ring */
|
||||
dma_addr_t gdr_pa; /* physical address of gdr_base_register */
|
||||
struct ce_sd *sdr; /* scatter descriptor ring */
|
||||
dma_addr_t sdr_pa; /* physical address of sdr_base_register */
|
||||
void *scatter_buffer_va;
|
||||
dma_addr_t scatter_buffer_pa;
|
||||
u32 scatter_buffer_size;
|
||||
|
||||
void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */
|
||||
union shadow_sa_buf *shadow_sa_pool;
|
||||
dma_addr_t shadow_sa_pool_pa;
|
||||
void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */
|
||||
struct sa_state_record *shadow_sr_pool;
|
||||
dma_addr_t shadow_sr_pool_pa;
|
||||
u32 pdr_tail;
|
||||
u32 pdr_head;
|
||||
@ -100,9 +105,10 @@ struct crypto4xx_device {
|
||||
u32 gdr_head;
|
||||
u32 sdr_tail;
|
||||
u32 sdr_head;
|
||||
void *pdr_uinfo;
|
||||
struct pd_uinfo *pdr_uinfo;
|
||||
struct list_head alg_list; /* List of algorithm supported
|
||||
by this device */
|
||||
struct ratelimit_state aead_ratelimit;
|
||||
};
|
||||
|
||||
struct crypto4xx_core_device {
|
||||
@ -118,30 +124,13 @@ struct crypto4xx_core_device {
|
||||
|
||||
struct crypto4xx_ctx {
|
||||
struct crypto4xx_device *dev;
|
||||
void *sa_in;
|
||||
dma_addr_t sa_in_dma_addr;
|
||||
void *sa_out;
|
||||
dma_addr_t sa_out_dma_addr;
|
||||
void *state_record;
|
||||
dma_addr_t state_record_dma_addr;
|
||||
struct dynamic_sa_ctl *sa_in;
|
||||
struct dynamic_sa_ctl *sa_out;
|
||||
__le32 iv_nonce;
|
||||
u32 sa_len;
|
||||
u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */
|
||||
u32 direction;
|
||||
u32 next_hdr;
|
||||
u32 save_iv;
|
||||
u32 pd_ctl_len;
|
||||
u32 pd_ctl;
|
||||
u32 bypass;
|
||||
u32 is_hash;
|
||||
u32 hash_final;
|
||||
};
|
||||
|
||||
struct crypto4xx_req_ctx {
|
||||
struct crypto4xx_device *dev; /* Device in which
|
||||
operation to send to */
|
||||
void *sa;
|
||||
u32 sa_dma_addr;
|
||||
u16 sa_len;
|
||||
union {
|
||||
struct crypto_aead *aead;
|
||||
} sw_cipher;
|
||||
};
|
||||
|
||||
struct crypto4xx_alg_common {
|
||||
@ -149,6 +138,7 @@ struct crypto4xx_alg_common {
|
||||
union {
|
||||
struct crypto_alg cipher;
|
||||
struct ahash_alg hash;
|
||||
struct aead_alg aead;
|
||||
} u;
|
||||
};
|
||||
|
||||
@ -158,43 +148,90 @@ struct crypto4xx_alg {
|
||||
struct crypto4xx_device *dev;
|
||||
};
|
||||
|
||||
static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
|
||||
struct crypto_alg *x)
|
||||
{
|
||||
switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_AHASH:
|
||||
return container_of(__crypto_ahash_alg(x),
|
||||
struct crypto4xx_alg, alg.u.hash);
|
||||
}
|
||||
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
|
||||
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
|
||||
void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
|
||||
int crypto4xx_build_pd(struct crypto_async_request *req,
|
||||
struct crypto4xx_ctx *ctx,
|
||||
struct scatterlist *src,
|
||||
struct scatterlist *dst,
|
||||
const unsigned int datalen,
|
||||
const __le32 *iv, const u32 iv_len,
|
||||
const struct dynamic_sa_ctl *sa,
|
||||
const unsigned int sa_len,
|
||||
const unsigned int assoclen);
|
||||
int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_encrypt(struct ablkcipher_request *req);
|
||||
int crypto4xx_decrypt(struct ablkcipher_request *req);
|
||||
int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
|
||||
int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
|
||||
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
|
||||
int crypto4xx_hash_digest(struct ahash_request *req);
|
||||
int crypto4xx_hash_final(struct ahash_request *req);
|
||||
int crypto4xx_hash_update(struct ahash_request *req);
|
||||
int crypto4xx_hash_init(struct ahash_request *req);
|
||||
|
||||
return container_of(x, struct crypto4xx_alg, alg.u.cipher);
|
||||
/**
|
||||
* Note: Only use this function to copy items that is word aligned.
|
||||
*/
|
||||
static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
|
||||
size_t len)
|
||||
{
|
||||
for (; len >= 4; buf += 4, len -= 4)
|
||||
*dst++ = __swab32p((u32 *) buf);
|
||||
|
||||
if (len) {
|
||||
const u8 *tmp = (u8 *)buf;
|
||||
|
||||
switch (len) {
|
||||
case 3:
|
||||
*dst = (tmp[2] << 16) |
|
||||
(tmp[1] << 8) |
|
||||
tmp[0];
|
||||
break;
|
||||
case 2:
|
||||
*dst = (tmp[1] << 8) |
|
||||
tmp[0];
|
||||
break;
|
||||
case 1:
|
||||
*dst = tmp[0];
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
|
||||
extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
|
||||
extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
|
||||
struct crypto4xx_ctx *rctx);
|
||||
extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
|
||||
extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
|
||||
extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
|
||||
extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
|
||||
extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
|
||||
extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
|
||||
extern void crypto4xx_memcpy_le(unsigned int *dst,
|
||||
const unsigned char *buf, int len);
|
||||
extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
|
||||
struct crypto4xx_ctx *ctx,
|
||||
struct scatterlist *src,
|
||||
struct scatterlist *dst,
|
||||
unsigned int datalen,
|
||||
void *iv, u32 iv_len);
|
||||
extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
extern int crypto4xx_encrypt(struct ablkcipher_request *req);
|
||||
extern int crypto4xx_decrypt(struct ablkcipher_request *req);
|
||||
extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
|
||||
extern int crypto4xx_hash_digest(struct ahash_request *req);
|
||||
extern int crypto4xx_hash_final(struct ahash_request *req);
|
||||
extern int crypto4xx_hash_update(struct ahash_request *req);
|
||||
extern int crypto4xx_hash_init(struct ahash_request *req);
|
||||
static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf,
|
||||
size_t len)
|
||||
{
|
||||
crypto4xx_memcpy_swab32(dst, buf, len);
|
||||
}
|
||||
|
||||
static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf,
|
||||
size_t len)
|
||||
{
|
||||
crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
|
||||
}
|
||||
|
||||
int crypto4xx_setauthsize_aead(struct crypto_aead *ciper,
|
||||
unsigned int authsize);
|
||||
int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
|
||||
int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
|
||||
int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
|
||||
int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
|
||||
|
||||
#endif
|
||||
|
@ -261,6 +261,9 @@ union ce_pd_ctl {
|
||||
} bf;
|
||||
u32 w;
|
||||
} __attribute__((packed));
|
||||
#define PD_CTL_HASH_FINAL BIT(4)
|
||||
#define PD_CTL_PE_DONE BIT(1)
|
||||
#define PD_CTL_HOST_READY BIT(0)
|
||||
|
||||
union ce_pd_ctl_len {
|
||||
struct {
|
||||
|
@ -1,85 +0,0 @@
|
||||
/**
|
||||
* AMCC SoC PPC4xx Crypto Driver
|
||||
*
|
||||
* Copyright (c) 2008 Applied Micro Circuits Corporation.
|
||||
* All rights reserved. James Hsiao <jhsiao@amcc.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* @file crypto4xx_sa.c
|
||||
*
|
||||
* This file implements the security context
|
||||
* associate format.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/des.h>
|
||||
#include "crypto4xx_reg_def.h"
|
||||
#include "crypto4xx_sa.h"
|
||||
#include "crypto4xx_core.h"
|
||||
|
||||
u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
|
||||
{
|
||||
u32 offset;
|
||||
union dynamic_sa_contents cts;
|
||||
|
||||
if (ctx->direction == DIR_INBOUND)
|
||||
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
|
||||
else
|
||||
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
|
||||
offset = cts.bf.key_size
|
||||
+ cts.bf.inner_size
|
||||
+ cts.bf.outer_size
|
||||
+ cts.bf.spi
|
||||
+ cts.bf.seq_num0
|
||||
+ cts.bf.seq_num1
|
||||
+ cts.bf.seq_num_mask0
|
||||
+ cts.bf.seq_num_mask1
|
||||
+ cts.bf.seq_num_mask2
|
||||
+ cts.bf.seq_num_mask3
|
||||
+ cts.bf.iv0
|
||||
+ cts.bf.iv1
|
||||
+ cts.bf.iv2
|
||||
+ cts.bf.iv3;
|
||||
|
||||
return sizeof(struct dynamic_sa_ctl) + offset * 4;
|
||||
}
|
||||
|
||||
u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
|
||||
{
|
||||
union dynamic_sa_contents cts;
|
||||
|
||||
if (ctx->direction == DIR_INBOUND)
|
||||
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
|
||||
else
|
||||
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
|
||||
return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
|
||||
}
|
||||
|
||||
u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
|
||||
{
|
||||
union dynamic_sa_contents cts;
|
||||
|
||||
if (ctx->direction == DIR_INBOUND)
|
||||
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
|
||||
else
|
||||
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
|
||||
|
||||
return sizeof(struct dynamic_sa_ctl);
|
||||
}
|
@ -55,6 +55,8 @@ union dynamic_sa_contents {
|
||||
#define SA_OP_GROUP_BASIC 0
|
||||
#define SA_OPCODE_ENCRYPT 0
|
||||
#define SA_OPCODE_DECRYPT 0
|
||||
#define SA_OPCODE_ENCRYPT_HASH 1
|
||||
#define SA_OPCODE_HASH_DECRYPT 1
|
||||
#define SA_OPCODE_HASH 3
|
||||
#define SA_CIPHER_ALG_DES 0
|
||||
#define SA_CIPHER_ALG_3DES 1
|
||||
@ -65,6 +67,8 @@ union dynamic_sa_contents {
|
||||
|
||||
#define SA_HASH_ALG_MD5 0
|
||||
#define SA_HASH_ALG_SHA1 1
|
||||
#define SA_HASH_ALG_GHASH 12
|
||||
#define SA_HASH_ALG_CBC_MAC 14
|
||||
#define SA_HASH_ALG_NULL 15
|
||||
#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
|
||||
|
||||
@ -112,6 +116,9 @@ union sa_command_0 {
|
||||
|
||||
#define CRYPTO_MODE_ECB 0
|
||||
#define CRYPTO_MODE_CBC 1
|
||||
#define CRYPTO_MODE_OFB 2
|
||||
#define CRYPTO_MODE_CFB 3
|
||||
#define CRYPTO_MODE_CTR 4
|
||||
|
||||
#define CRYPTO_FEEDBACK_MODE_NO_FB 0
|
||||
#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0
|
||||
@ -169,7 +176,7 @@ union sa_command_1 {
|
||||
} __attribute__((packed));
|
||||
|
||||
struct dynamic_sa_ctl {
|
||||
u32 sa_contents;
|
||||
union dynamic_sa_contents sa_contents;
|
||||
union sa_command_0 sa_command_0;
|
||||
union sa_command_1 sa_command_1;
|
||||
} __attribute__((packed));
|
||||
@ -178,9 +185,12 @@ struct dynamic_sa_ctl {
|
||||
* State Record for Security Association (SA)
|
||||
*/
|
||||
struct sa_state_record {
|
||||
u32 save_iv[4];
|
||||
u32 save_hash_byte_cnt[2];
|
||||
u32 save_digest[16];
|
||||
__le32 save_iv[4];
|
||||
__le32 save_hash_byte_cnt[2];
|
||||
union {
|
||||
u32 save_digest[16]; /* for MD5/SHA */
|
||||
__le32 save_digest_le32[16]; /* GHASH / CBC */
|
||||
};
|
||||
} __attribute__((packed));
|
||||
|
||||
/**
|
||||
@ -189,8 +199,8 @@ struct sa_state_record {
|
||||
*/
|
||||
struct dynamic_sa_aes128 {
|
||||
struct dynamic_sa_ctl ctrl;
|
||||
u32 key[4];
|
||||
u32 iv[4]; /* for CBC, OFC, and CFB mode */
|
||||
__le32 key[4];
|
||||
__le32 iv[4]; /* for CBC, OFC, and CFB mode */
|
||||
u32 state_ptr;
|
||||
u32 reserved;
|
||||
} __attribute__((packed));
|
||||
@ -203,8 +213,8 @@ struct dynamic_sa_aes128 {
|
||||
*/
|
||||
struct dynamic_sa_aes192 {
|
||||
struct dynamic_sa_ctl ctrl;
|
||||
u32 key[6];
|
||||
u32 iv[4]; /* for CBC, OFC, and CFB mode */
|
||||
__le32 key[6];
|
||||
__le32 iv[4]; /* for CBC, OFC, and CFB mode */
|
||||
u32 state_ptr;
|
||||
u32 reserved;
|
||||
} __attribute__((packed));
|
||||
@ -217,8 +227,8 @@ struct dynamic_sa_aes192 {
|
||||
*/
|
||||
struct dynamic_sa_aes256 {
|
||||
struct dynamic_sa_ctl ctrl;
|
||||
u32 key[8];
|
||||
u32 iv[4]; /* for CBC, OFC, and CFB mode */
|
||||
__le32 key[8];
|
||||
__le32 iv[4]; /* for CBC, OFC, and CFB mode */
|
||||
u32 state_ptr;
|
||||
u32 reserved;
|
||||
} __attribute__((packed));
|
||||
@ -227,17 +237,82 @@ struct dynamic_sa_aes256 {
|
||||
#define SA_AES256_CONTENTS 0x3e000082
|
||||
#define SA_AES_CONTENTS 0x3e000002
|
||||
|
||||
/**
|
||||
* Security Association (SA) for AES128 CCM
|
||||
*/
|
||||
struct dynamic_sa_aes128_ccm {
|
||||
struct dynamic_sa_ctl ctrl;
|
||||
__le32 key[4];
|
||||
__le32 iv[4];
|
||||
u32 state_ptr;
|
||||
u32 reserved;
|
||||
} __packed;
|
||||
#define SA_AES128_CCM_LEN (sizeof(struct dynamic_sa_aes128_ccm)/4)
|
||||
#define SA_AES128_CCM_CONTENTS 0x3e000042
|
||||
#define SA_AES_CCM_CONTENTS 0x3e000002
|
||||
|
||||
/**
|
||||
* Security Association (SA) for AES128_GCM
|
||||
*/
|
||||
struct dynamic_sa_aes128_gcm {
|
||||
struct dynamic_sa_ctl ctrl;
|
||||
__le32 key[4];
|
||||
__le32 inner_digest[4];
|
||||
__le32 iv[4];
|
||||
u32 state_ptr;
|
||||
u32 reserved;
|
||||
} __packed;
|
||||
|
||||
#define SA_AES128_GCM_LEN (sizeof(struct dynamic_sa_aes128_gcm)/4)
|
||||
#define SA_AES128_GCM_CONTENTS 0x3e000442
|
||||
#define SA_AES_GCM_CONTENTS 0x3e000402
|
||||
|
||||
/**
|
||||
* Security Association (SA) for HASH160: HMAC-SHA1
|
||||
*/
|
||||
struct dynamic_sa_hash160 {
|
||||
struct dynamic_sa_ctl ctrl;
|
||||
u32 inner_digest[5];
|
||||
u32 outer_digest[5];
|
||||
__le32 inner_digest[5];
|
||||
__le32 outer_digest[5];
|
||||
u32 state_ptr;
|
||||
u32 reserved;
|
||||
} __attribute__((packed));
|
||||
#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4)
|
||||
#define SA_HASH160_CONTENTS 0x2000a502
|
||||
|
||||
static inline u32
|
||||
get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts)
|
||||
{
|
||||
u32 offset;
|
||||
|
||||
offset = cts->sa_contents.bf.key_size
|
||||
+ cts->sa_contents.bf.inner_size
|
||||
+ cts->sa_contents.bf.outer_size
|
||||
+ cts->sa_contents.bf.spi
|
||||
+ cts->sa_contents.bf.seq_num0
|
||||
+ cts->sa_contents.bf.seq_num1
|
||||
+ cts->sa_contents.bf.seq_num_mask0
|
||||
+ cts->sa_contents.bf.seq_num_mask1
|
||||
+ cts->sa_contents.bf.seq_num_mask2
|
||||
+ cts->sa_contents.bf.seq_num_mask3
|
||||
+ cts->sa_contents.bf.iv0
|
||||
+ cts->sa_contents.bf.iv1
|
||||
+ cts->sa_contents.bf.iv2
|
||||
+ cts->sa_contents.bf.iv3;
|
||||
|
||||
return sizeof(struct dynamic_sa_ctl) + offset * 4;
|
||||
}
|
||||
|
||||
static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
|
||||
{
|
||||
return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
|
||||
}
|
||||
|
||||
static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts)
|
||||
{
|
||||
return (__le32 *) ((unsigned long)cts +
|
||||
sizeof(struct dynamic_sa_ctl) +
|
||||
cts->sa_contents.bf.key_size * 4);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <linux/platform_data/crypto-atmel.h>
|
||||
@ -76,12 +77,11 @@
|
||||
AES_FLAGS_ENCRYPT | \
|
||||
AES_FLAGS_GTAGEN)
|
||||
|
||||
#define AES_FLAGS_INIT BIT(2)
|
||||
#define AES_FLAGS_BUSY BIT(3)
|
||||
#define AES_FLAGS_DUMP_REG BIT(4)
|
||||
#define AES_FLAGS_OWN_SHA BIT(5)
|
||||
|
||||
#define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY)
|
||||
#define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY
|
||||
|
||||
#define ATMEL_AES_QUEUE_LENGTH 50
|
||||
|
||||
@ -110,6 +110,7 @@ struct atmel_aes_base_ctx {
|
||||
int keylen;
|
||||
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
|
||||
u16 block_size;
|
||||
bool is_aead;
|
||||
};
|
||||
|
||||
struct atmel_aes_ctx {
|
||||
@ -156,6 +157,7 @@ struct atmel_aes_authenc_ctx {
|
||||
|
||||
struct atmel_aes_reqctx {
|
||||
unsigned long mode;
|
||||
u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
@ -448,11 +450,8 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!(dd->flags & AES_FLAGS_INIT)) {
|
||||
atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
|
||||
atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
|
||||
dd->flags |= AES_FLAGS_INIT;
|
||||
}
|
||||
atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
|
||||
atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -497,12 +496,34 @@ static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
|
||||
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
|
||||
{
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
atmel_aes_authenc_complete(dd, err);
|
||||
if (dd->ctx->is_aead)
|
||||
atmel_aes_authenc_complete(dd, err);
|
||||
#endif
|
||||
|
||||
clk_disable(dd->iclk);
|
||||
dd->flags &= ~AES_FLAGS_BUSY;
|
||||
|
||||
if (!dd->ctx->is_aead) {
|
||||
struct ablkcipher_request *req =
|
||||
ablkcipher_request_cast(dd->areq);
|
||||
struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
|
||||
struct crypto_ablkcipher *ablkcipher =
|
||||
crypto_ablkcipher_reqtfm(req);
|
||||
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
|
||||
if (rctx->mode & AES_FLAGS_ENCRYPT) {
|
||||
scatterwalk_map_and_copy(req->info, req->dst,
|
||||
req->nbytes - ivsize, ivsize, 0);
|
||||
} else {
|
||||
if (req->src == req->dst) {
|
||||
memcpy(req->info, rctx->lastc, ivsize);
|
||||
} else {
|
||||
scatterwalk_map_and_copy(req->info, req->src,
|
||||
req->nbytes - ivsize, ivsize, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dd->is_async)
|
||||
dd->areq->complete(dd->areq, err);
|
||||
|
||||
@ -1071,11 +1092,11 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
|
||||
|
||||
static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
|
||||
{
|
||||
struct atmel_aes_base_ctx *ctx;
|
||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||
struct atmel_aes_reqctx *rctx;
|
||||
struct atmel_aes_dev *dd;
|
||||
|
||||
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
|
||||
switch (mode & AES_FLAGS_OPMODE_MASK) {
|
||||
case AES_FLAGS_CFB8:
|
||||
ctx->block_size = CFB8_BLOCK_SIZE;
|
||||
@ -1097,6 +1118,7 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
|
||||
ctx->block_size = AES_BLOCK_SIZE;
|
||||
break;
|
||||
}
|
||||
ctx->is_aead = false;
|
||||
|
||||
dd = atmel_aes_find_dev(ctx);
|
||||
if (!dd)
|
||||
@ -1105,6 +1127,13 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
|
||||
rctx = ablkcipher_request_ctx(req);
|
||||
rctx->mode = mode;
|
||||
|
||||
if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
|
||||
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
|
||||
scatterwalk_map_and_copy(rctx->lastc, req->src,
|
||||
(req->nbytes - ivsize), ivsize, 0);
|
||||
}
|
||||
|
||||
return atmel_aes_handle_queue(dd, &req->base);
|
||||
}
|
||||
|
||||
@ -1236,10 +1265,6 @@ static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
}
|
||||
|
||||
static struct crypto_alg aes_algs[] = {
|
||||
{
|
||||
.cra_name = "ecb(aes)",
|
||||
@ -1252,7 +1277,6 @@ static struct crypto_alg aes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -1272,7 +1296,6 @@ static struct crypto_alg aes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -1293,7 +1316,6 @@ static struct crypto_alg aes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -1314,7 +1336,6 @@ static struct crypto_alg aes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -1335,7 +1356,6 @@ static struct crypto_alg aes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -1356,7 +1376,6 @@ static struct crypto_alg aes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -1377,7 +1396,6 @@ static struct crypto_alg aes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -1398,7 +1416,6 @@ static struct crypto_alg aes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_ctr_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -1421,7 +1438,6 @@ static struct crypto_alg aes_cfb64_alg = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -1532,7 +1548,7 @@ static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
|
||||
if (err)
|
||||
return atmel_aes_complete(dd, err);
|
||||
|
||||
if (likely(ivsize == 12)) {
|
||||
if (likely(ivsize == GCM_AES_IV_SIZE)) {
|
||||
memcpy(ctx->j0, iv, ivsize);
|
||||
ctx->j0[3] = cpu_to_be32(1);
|
||||
return atmel_aes_gcm_process(dd);
|
||||
@ -1739,6 +1755,7 @@ static int atmel_aes_gcm_crypt(struct aead_request *req,
|
||||
|
||||
ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
ctx->block_size = AES_BLOCK_SIZE;
|
||||
ctx->is_aead = true;
|
||||
|
||||
dd = atmel_aes_find_dev(ctx);
|
||||
if (!dd)
|
||||
@ -1808,19 +1825,13 @@ static int atmel_aes_gcm_init(struct crypto_aead *tfm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static struct aead_alg aes_gcm_alg = {
|
||||
.setkey = atmel_aes_gcm_setkey,
|
||||
.setauthsize = atmel_aes_gcm_setauthsize,
|
||||
.encrypt = atmel_aes_gcm_encrypt,
|
||||
.decrypt = atmel_aes_gcm_decrypt,
|
||||
.init = atmel_aes_gcm_init,
|
||||
.exit = atmel_aes_gcm_exit,
|
||||
.ivsize = 12,
|
||||
.ivsize = GCM_AES_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
|
||||
.base = {
|
||||
@ -1955,7 +1966,6 @@ static struct crypto_alg aes_xts_alg = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_xts_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
@ -2223,6 +2233,7 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
|
||||
|
||||
rctx->base.mode = mode;
|
||||
ctx->block_size = AES_BLOCK_SIZE;
|
||||
ctx->is_aead = true;
|
||||
|
||||
dd = atmel_aes_find_dev(ctx);
|
||||
if (!dd)
|
||||
@ -2382,7 +2393,6 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
|
||||
struct crypto_platform_data *pdata)
|
||||
{
|
||||
struct at_dma_slave *slave;
|
||||
int err = -ENOMEM;
|
||||
dma_cap_mask_t mask;
|
||||
|
||||
dma_cap_zero(mask);
|
||||
@ -2407,7 +2417,7 @@ err_dma_out:
|
||||
dma_release_channel(dd->src.chan);
|
||||
err_dma_in:
|
||||
dev_warn(dd->dev, "no DMA channel available\n");
|
||||
return err;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
|
||||
@ -2658,8 +2668,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
|
||||
|
||||
crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
|
||||
|
||||
aes_dd->irq = -1;
|
||||
|
||||
/* Get the base address */
|
||||
aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!aes_res) {
|
||||
|
@ -2628,7 +2628,6 @@ static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
|
||||
static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
|
||||
struct crypto_platform_data *pdata)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
dma_cap_mask_t mask_in;
|
||||
|
||||
/* Try to grab DMA channel */
|
||||
@ -2639,7 +2638,7 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
|
||||
atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
|
||||
if (!dd->dma_lch_in.chan) {
|
||||
dev_warn(dd->dev, "no DMA channel available\n");
|
||||
return err;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
|
||||
@ -2778,8 +2777,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
|
||||
|
||||
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
|
||||
|
||||
sha_dd->irq = -1;
|
||||
|
||||
/* Get the base address */
|
||||
sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!sha_res) {
|
||||
|
@ -720,7 +720,6 @@ static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
|
||||
static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
|
||||
struct crypto_platform_data *pdata)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
dma_cap_mask_t mask;
|
||||
|
||||
dma_cap_zero(mask);
|
||||
@ -765,7 +764,7 @@ err_dma_out:
|
||||
dma_release_channel(dd->dma_lch_in.chan);
|
||||
err_dma_in:
|
||||
dev_warn(dd->dev, "no DMA channel available\n");
|
||||
return err;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
|
||||
@ -912,10 +911,6 @@ static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void atmel_tdes_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
}
|
||||
|
||||
static struct crypto_alg tdes_algs[] = {
|
||||
{
|
||||
.cra_name = "ecb(des)",
|
||||
@ -928,7 +923,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
@ -948,7 +942,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
@ -969,7 +962,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
@ -990,7 +982,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
@ -1011,7 +1002,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
@ -1032,7 +1022,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
@ -1053,7 +1042,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
@ -1074,7 +1062,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2 * DES_KEY_SIZE,
|
||||
.max_keysize = 3 * DES_KEY_SIZE,
|
||||
@ -1094,7 +1081,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.max_keysize = 3*DES_KEY_SIZE,
|
||||
@ -1115,7 +1101,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.max_keysize = 2*DES_KEY_SIZE,
|
||||
@ -1136,7 +1121,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.max_keysize = 2*DES_KEY_SIZE,
|
||||
@ -1157,7 +1141,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.max_keysize = 2*DES_KEY_SIZE,
|
||||
@ -1178,7 +1161,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.max_keysize = 2*DES_KEY_SIZE,
|
||||
@ -1199,7 +1181,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_exit = atmel_tdes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.max_keysize = 3*DES_KEY_SIZE,
|
||||
@ -1382,8 +1363,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
|
||||
|
||||
crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
|
||||
|
||||
tdes_dd->irq = -1;
|
||||
|
||||
/* Get the base address */
|
||||
tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!tdes_res) {
|
||||
|
@ -256,6 +256,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
|
||||
u8 chan_idx)
|
||||
{
|
||||
int err;
|
||||
int retry_cnt = 0;
|
||||
struct device *dev = &(iproc_priv.pdev->dev);
|
||||
|
||||
err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
|
||||
if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
|
||||
while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
|
||||
/*
|
||||
* Mailbox queue is full. Since MAY_SLEEP is set, assume
|
||||
* not in atomic context and we can wait and try again.
|
||||
*/
|
||||
retry_cnt++;
|
||||
usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
|
||||
err = mbox_send_message(iproc_priv.mbox[chan_idx],
|
||||
mssg);
|
||||
atomic_inc(&iproc_priv.mb_no_spc);
|
||||
}
|
||||
}
|
||||
if (err < 0) {
|
||||
atomic_inc(&iproc_priv.mb_send_fail);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Check error returned by mailbox controller */
|
||||
err = mssg->error;
|
||||
if (unlikely(err < 0)) {
|
||||
dev_err(dev, "message error %d", err);
|
||||
/* Signal txdone for mailbox channel */
|
||||
}
|
||||
|
||||
/* Signal txdone for mailbox channel */
|
||||
mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
|
||||
* a single SPU request message, starting at the current position in the request
|
||||
@ -293,7 +331,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
|
||||
u32 pad_len; /* total length of all padding */
|
||||
bool update_key = false;
|
||||
struct brcm_message *mssg; /* mailbox message */
|
||||
int retry_cnt = 0;
|
||||
|
||||
/* number of entries in src and dst sg in mailbox message. */
|
||||
u8 rx_frag_num = 2; /* response header and STATUS */
|
||||
@ -462,24 +499,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
|
||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
|
||||
while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
|
||||
/*
|
||||
* Mailbox queue is full. Since MAY_SLEEP is set, assume
|
||||
* not in atomic context and we can wait and try again.
|
||||
*/
|
||||
retry_cnt++;
|
||||
usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
|
||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
|
||||
mssg);
|
||||
atomic_inc(&iproc_priv.mb_no_spc);
|
||||
}
|
||||
}
|
||||
if (unlikely(err < 0)) {
|
||||
atomic_inc(&iproc_priv.mb_send_fail);
|
||||
err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
|
||||
if (unlikely(err < 0))
|
||||
return err;
|
||||
}
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
@ -710,7 +732,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
|
||||
u32 spu_hdr_len;
|
||||
unsigned int digestsize;
|
||||
u16 rem = 0;
|
||||
int retry_cnt = 0;
|
||||
|
||||
/*
|
||||
* number of entries in src and dst sg. Always includes SPU msg header.
|
||||
@ -904,24 +925,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
|
||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
|
||||
while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
|
||||
/*
|
||||
* Mailbox queue is full. Since MAY_SLEEP is set, assume
|
||||
* not in atomic context and we can wait and try again.
|
||||
*/
|
||||
retry_cnt++;
|
||||
usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
|
||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
|
||||
mssg);
|
||||
atomic_inc(&iproc_priv.mb_no_spc);
|
||||
}
|
||||
}
|
||||
if (err < 0) {
|
||||
atomic_inc(&iproc_priv.mb_send_fail);
|
||||
err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
|
||||
if (unlikely(err < 0))
|
||||
return err;
|
||||
}
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
@ -1320,7 +1327,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
|
||||
int assoc_nents = 0;
|
||||
bool incl_icv = false;
|
||||
unsigned int digestsize = ctx->digestsize;
|
||||
int retry_cnt = 0;
|
||||
|
||||
/* number of entries in src and dst sg. Always includes SPU msg header.
|
||||
*/
|
||||
@ -1367,11 +1373,11 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
|
||||
* expects AAD to include just SPI and seqno. So
|
||||
* subtract off the IV len.
|
||||
*/
|
||||
aead_parms.assoc_size -= GCM_ESP_IV_SIZE;
|
||||
aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
|
||||
|
||||
if (rctx->is_encrypt) {
|
||||
aead_parms.return_iv = true;
|
||||
aead_parms.ret_iv_len = GCM_ESP_IV_SIZE;
|
||||
aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
|
||||
aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
|
||||
}
|
||||
} else {
|
||||
@ -1558,24 +1564,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
|
||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
|
||||
while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
|
||||
/*
|
||||
* Mailbox queue is full. Since MAY_SLEEP is set, assume
|
||||
* not in atomic context and we can wait and try again.
|
||||
*/
|
||||
retry_cnt++;
|
||||
usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
|
||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
|
||||
mssg);
|
||||
atomic_inc(&iproc_priv.mb_no_spc);
|
||||
}
|
||||
}
|
||||
if (err < 0) {
|
||||
atomic_inc(&iproc_priv.mb_send_fail);
|
||||
err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
|
||||
if (unlikely(err < 0))
|
||||
return err;
|
||||
}
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
@ -3255,7 +3246,7 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
|
||||
},
|
||||
.setkey = aead_gcm_esp_setkey,
|
||||
.ivsize = GCM_ESP_IV_SIZE,
|
||||
.ivsize = GCM_RFC4106_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.cipher_info = {
|
||||
@ -3301,7 +3292,7 @@ static struct iproc_alg_s driver_algs[] = {
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
|
||||
},
|
||||
.setkey = rfc4543_gcm_esp_setkey,
|
||||
.ivsize = GCM_ESP_IV_SIZE,
|
||||
.ivsize = GCM_RFC4106_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.cipher_info = {
|
||||
@ -4537,7 +4528,7 @@ static int spu_mb_init(struct device *dev)
|
||||
mcl->dev = dev;
|
||||
mcl->tx_block = false;
|
||||
mcl->tx_tout = 0;
|
||||
mcl->knows_txdone = false;
|
||||
mcl->knows_txdone = true;
|
||||
mcl->rx_callback = spu_rx_callback;
|
||||
mcl->tx_done = NULL;
|
||||
|
||||
@ -4818,7 +4809,6 @@ static int spu_dt_read(struct platform_device *pdev)
|
||||
struct device *dev = &pdev->dev;
|
||||
struct spu_hw *spu = &iproc_priv.spu;
|
||||
struct resource *spu_ctrl_regs;
|
||||
const struct of_device_id *match;
|
||||
const struct spu_type_subtype *matched_spu_type;
|
||||
struct device_node *dn = pdev->dev.of_node;
|
||||
int err, i;
|
||||
@ -4826,14 +4816,12 @@ static int spu_dt_read(struct platform_device *pdev)
|
||||
/* Count number of mailbox channels */
|
||||
spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
|
||||
|
||||
match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
|
||||
if (!match) {
|
||||
matched_spu_type = of_device_get_match_data(dev);
|
||||
if (!matched_spu_type) {
|
||||
dev_err(&pdev->dev, "Failed to match device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
matched_spu_type = match->data;
|
||||
|
||||
spu->spu_type = matched_spu_type->type;
|
||||
spu->spu_subtype = matched_spu_type->subtype;
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha3.h>
|
||||
|
||||
@ -39,8 +40,6 @@
|
||||
#define ARC4_STATE_SIZE 4
|
||||
|
||||
#define CCM_AES_IV_SIZE 16
|
||||
#define GCM_AES_IV_SIZE 12
|
||||
#define GCM_ESP_IV_SIZE 8
|
||||
#define CCM_ESP_IV_SIZE 8
|
||||
#define RFC4543_ICV_SIZE 16
|
||||
|
||||
|
@ -271,7 +271,7 @@ int do_shash(unsigned char *name, unsigned char *result,
|
||||
hash = crypto_alloc_shash(name, 0, 0);
|
||||
if (IS_ERR(hash)) {
|
||||
rc = PTR_ERR(hash);
|
||||
pr_err("%s: Crypto %s allocation error %d", __func__, name, rc);
|
||||
pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -279,7 +279,7 @@ int do_shash(unsigned char *name, unsigned char *result,
|
||||
sdesc = kmalloc(size, GFP_KERNEL);
|
||||
if (!sdesc) {
|
||||
rc = -ENOMEM;
|
||||
pr_err("%s: Memory allocation failure", __func__);
|
||||
pr_err("%s: Memory allocation failure\n", __func__);
|
||||
goto do_shash_err;
|
||||
}
|
||||
sdesc->shash.tfm = hash;
|
||||
@ -288,31 +288,31 @@ int do_shash(unsigned char *name, unsigned char *result,
|
||||
if (key_len > 0) {
|
||||
rc = crypto_shash_setkey(hash, key, key_len);
|
||||
if (rc) {
|
||||
pr_err("%s: Could not setkey %s shash", __func__, name);
|
||||
pr_err("%s: Could not setkey %s shash\n", __func__, name);
|
||||
goto do_shash_err;
|
||||
}
|
||||
}
|
||||
|
||||
rc = crypto_shash_init(&sdesc->shash);
|
||||
if (rc) {
|
||||
pr_err("%s: Could not init %s shash", __func__, name);
|
||||
pr_err("%s: Could not init %s shash\n", __func__, name);
|
||||
goto do_shash_err;
|
||||
}
|
||||
rc = crypto_shash_update(&sdesc->shash, data1, data1_len);
|
||||
if (rc) {
|
||||
pr_err("%s: Could not update1", __func__);
|
||||
pr_err("%s: Could not update1\n", __func__);
|
||||
goto do_shash_err;
|
||||
}
|
||||
if (data2 && data2_len) {
|
||||
rc = crypto_shash_update(&sdesc->shash, data2, data2_len);
|
||||
if (rc) {
|
||||
pr_err("%s: Could not update2", __func__);
|
||||
pr_err("%s: Could not update2\n", __func__);
|
||||
goto do_shash_err;
|
||||
}
|
||||
}
|
||||
rc = crypto_shash_final(&sdesc->shash, result);
|
||||
if (rc)
|
||||
pr_err("%s: Could not generate %s hash", __func__, name);
|
||||
pr_err("%s: Could not generate %s hash\n", __func__, name);
|
||||
|
||||
do_shash_err:
|
||||
crypto_free_shash(hash);
|
||||
|
@ -992,7 +992,7 @@ static void init_gcm_job(struct aead_request *req,
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
unsigned int ivsize = crypto_aead_ivsize(aead);
|
||||
u32 *desc = edesc->hw_desc;
|
||||
bool generic_gcm = (ivsize == 12);
|
||||
bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
|
||||
unsigned int last;
|
||||
|
||||
init_aead_job(req, edesc, all_contig, encrypt);
|
||||
@ -1004,7 +1004,7 @@ static void init_gcm_job(struct aead_request *req,
|
||||
|
||||
/* Read GCM IV */
|
||||
append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
|
||||
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
|
||||
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
|
||||
/* Append Salt */
|
||||
if (!generic_gcm)
|
||||
append_data(desc, ctx->key + ctx->cdata.keylen, 4);
|
||||
@ -1953,7 +1953,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
.setauthsize = rfc4106_setauthsize,
|
||||
.encrypt = ipsec_gcm_encrypt,
|
||||
.decrypt = ipsec_gcm_decrypt,
|
||||
.ivsize = 8,
|
||||
.ivsize = GCM_RFC4106_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.caam = {
|
||||
@ -1971,7 +1971,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
.setauthsize = rfc4543_setauthsize,
|
||||
.encrypt = ipsec_gcm_encrypt,
|
||||
.decrypt = ipsec_gcm_decrypt,
|
||||
.ivsize = 8,
|
||||
.ivsize = GCM_RFC4543_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.caam = {
|
||||
@ -1990,7 +1990,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
.setauthsize = gcm_setauthsize,
|
||||
.encrypt = gcm_encrypt,
|
||||
.decrypt = gcm_decrypt,
|
||||
.ivsize = 12,
|
||||
.ivsize = GCM_AES_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.caam = {
|
||||
|
@ -7,7 +7,7 @@
|
||||
*/
|
||||
|
||||
#include "compat.h"
|
||||
|
||||
#include "ctrl.h"
|
||||
#include "regs.h"
|
||||
#include "intern.h"
|
||||
#include "desc_constr.h"
|
||||
@ -2312,6 +2312,11 @@ static int __init caam_qi_algapi_init(void)
|
||||
if (!priv || !priv->qi_present)
|
||||
return -ENODEV;
|
||||
|
||||
if (caam_dpaa2) {
|
||||
dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&alg_list);
|
||||
|
||||
/*
|
||||
|
@ -218,7 +218,7 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
|
||||
}
|
||||
|
||||
/* Map state->caam_ctx, and add it to link table */
|
||||
static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
|
||||
static inline int ctx_map_to_sec4_sg(struct device *jrdev,
|
||||
struct caam_hash_state *state, int ctx_len,
|
||||
struct sec4_sg_entry *sec4_sg, u32 flag)
|
||||
{
|
||||
@ -773,7 +773,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
edesc->src_nents = src_nents;
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
|
||||
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
|
||||
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
|
||||
edesc->sec4_sg, DMA_BIDIRECTIONAL);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
@ -871,9 +871,8 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
desc = edesc->hw_desc;
|
||||
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->src_nents = 0;
|
||||
|
||||
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
|
||||
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
|
||||
edesc->sec4_sg, DMA_TO_DEVICE);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
@ -967,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
|
||||
edesc->src_nents = src_nents;
|
||||
|
||||
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
|
||||
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
|
||||
edesc->sec4_sg, DMA_TO_DEVICE);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
@ -1123,7 +1122,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||
dev_err(jrdev, "unable to map dst\n");
|
||||
goto unmap;
|
||||
}
|
||||
edesc->src_nents = 0;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
@ -1205,7 +1203,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
|
||||
edesc->src_nents = src_nents;
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->dst_dma = 0;
|
||||
|
||||
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
|
||||
if (ret)
|
||||
@ -1417,7 +1414,6 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
}
|
||||
|
||||
edesc->src_nents = src_nents;
|
||||
edesc->dst_dma = 0;
|
||||
|
||||
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
|
||||
to_hash);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/des.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/md5.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
|
@ -1440,7 +1440,7 @@
|
||||
#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
|
||||
#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
|
||||
#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
|
||||
#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
|
||||
#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
|
||||
#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
|
||||
#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
|
||||
#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
|
||||
|
@ -127,7 +127,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
|
||||
* size and interrupt threshold.
|
||||
*/
|
||||
offset = NPS_PKT_IN_INSTR_BADDRX(i);
|
||||
nitrox_write_csr(ndev, NPS_PKT_IN_INSTR_BADDRX(i), cmdq->dma);
|
||||
nitrox_write_csr(ndev, offset, cmdq->dma);
|
||||
|
||||
/* configure ring size */
|
||||
offset = NPS_PKT_IN_INSTR_RSIZEX(i);
|
||||
|
@ -19,13 +19,12 @@
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "ccp-crypto.h"
|
||||
|
||||
#define AES_GCM_IVSIZE 12
|
||||
|
||||
static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
|
||||
{
|
||||
return ret;
|
||||
@ -95,9 +94,9 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
|
||||
*/
|
||||
|
||||
/* Prepare the IV: 12 bytes + an integer (counter) */
|
||||
memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
|
||||
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
|
||||
for (i = 0; i < 3; i++)
|
||||
rctx->iv[i + AES_GCM_IVSIZE] = 0;
|
||||
rctx->iv[i + GCM_AES_IV_SIZE] = 0;
|
||||
rctx->iv[AES_BLOCK_SIZE - 1] = 1;
|
||||
|
||||
/* Set up a scatterlist for the IV */
|
||||
@ -160,7 +159,7 @@ static struct aead_alg ccp_aes_gcm_defaults = {
|
||||
.encrypt = ccp_aes_gcm_encrypt,
|
||||
.decrypt = ccp_aes_gcm_decrypt,
|
||||
.init = ccp_aes_gcm_cra_init,
|
||||
.ivsize = AES_GCM_IVSIZE,
|
||||
.ivsize = GCM_AES_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
|
@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
|
||||
|
||||
/* Check if the cmd can/should be queued */
|
||||
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
|
||||
ret = -EBUSY;
|
||||
if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
|
||||
if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
|
||||
ret = -ENOSPC;
|
||||
goto e_lock;
|
||||
}
|
||||
}
|
||||
|
||||
/* Look for an entry with the same tfm. If there is a cmd
|
||||
@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
|
||||
ret = ccp_enqueue_cmd(crypto_cmd->cmd);
|
||||
if (!ccp_crypto_success(ret))
|
||||
goto e_lock; /* Error, don't queue it */
|
||||
if ((ret == -EBUSY) &&
|
||||
!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
|
||||
goto e_lock; /* Not backlogging, don't queue it */
|
||||
}
|
||||
|
||||
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
|
||||
|
@ -788,13 +788,12 @@ static int ccp5_init(struct ccp_device *ccp)
|
||||
struct ccp_cmd_queue *cmd_q;
|
||||
struct dma_pool *dma_pool;
|
||||
char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
|
||||
unsigned int qmr, qim, i;
|
||||
unsigned int qmr, i;
|
||||
u64 status;
|
||||
u32 status_lo, status_hi;
|
||||
int ret;
|
||||
|
||||
/* Find available queues */
|
||||
qim = 0;
|
||||
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
|
||||
for (i = 0; i < MAX_HW_QUEUES; i++) {
|
||||
|
||||
|
@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd)
|
||||
i = ccp->cmd_q_count;
|
||||
|
||||
if (ccp->cmd_count >= MAX_CMD_QLEN) {
|
||||
ret = -EBUSY;
|
||||
if (cmd->flags & CCP_CMD_MAY_BACKLOG)
|
||||
if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
|
||||
ret = -EBUSY;
|
||||
list_add_tail(&cmd->entry, &ccp->backlog);
|
||||
} else {
|
||||
ret = -ENOSPC;
|
||||
}
|
||||
} else {
|
||||
ret = -EINPROGRESS;
|
||||
ccp->cmd_count++;
|
||||
|
@ -223,6 +223,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
|
||||
desc->tx_desc.cookie, desc->status);
|
||||
|
||||
dma_cookie_complete(tx_desc);
|
||||
dma_descriptor_unmap(tx_desc);
|
||||
}
|
||||
|
||||
desc = __ccp_next_dma_desc(chan, desc);
|
||||
@ -230,9 +231,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
|
||||
if (tx_desc) {
|
||||
if (tx_desc->callback &&
|
||||
(tx_desc->flags & DMA_PREP_INTERRUPT))
|
||||
tx_desc->callback(tx_desc->callback_param);
|
||||
dmaengine_desc_get_callback_invoke(tx_desc, NULL);
|
||||
|
||||
dma_run_dependencies(tx_desc);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -176,21 +176,21 @@
|
||||
KEY_CONTEXT_SALT_PRESENT_V(1) | \
|
||||
KEY_CONTEXT_CTX_LEN_V((ctx_len)))
|
||||
|
||||
#define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \
|
||||
#define FILL_WR_OP_CCTX_SIZE \
|
||||
htonl( \
|
||||
FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \
|
||||
FW_CRYPTO_LOOKASIDE_WR) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len)))
|
||||
FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((0)) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(0) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(0))
|
||||
|
||||
#define FILL_WR_RX_Q_ID(cid, qid, wr_iv, lcb, fid) \
|
||||
#define FILL_WR_RX_Q_ID(cid, qid, lcb, fid) \
|
||||
htonl( \
|
||||
FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_IV_V((IV_NOP)) | \
|
||||
FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid))
|
||||
|
||||
#define FILL_ULPTX_CMD_DEST(cid, qid) \
|
||||
@ -214,27 +214,22 @@
|
||||
calc_tx_flits_ofld(skb) * 8), 16)))
|
||||
|
||||
#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
|
||||
ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1))
|
||||
|
||||
ULP_TX_SC_MORE_V((immdatalen)))
|
||||
#define MAX_NK 8
|
||||
#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
|
||||
#define MAX_WR_SIZE 512
|
||||
#define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0)
|
||||
#define MAX_DSGL_ENT 32
|
||||
#define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 2)
|
||||
#define MIN_CIPHER_SG 1 /* IV */
|
||||
#define MIN_AUTH_SG 2 /*IV + AAD*/
|
||||
#define MIN_GCM_SG 2 /* IV + AAD*/
|
||||
#define MIN_AUTH_SG 1 /* IV */
|
||||
#define MIN_GCM_SG 1 /* IV */
|
||||
#define MIN_DIGEST_SG 1 /*Partial Buffer*/
|
||||
#define MIN_CCM_SG 3 /*IV+AAD+B0*/
|
||||
#define MIN_CCM_SG 2 /*IV+B0*/
|
||||
#define SPACE_LEFT(len) \
|
||||
((MAX_WR_SIZE - WR_MIN_LEN - (len)))
|
||||
((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
|
||||
|
||||
unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40,
|
||||
48, 64, 72, 88,
|
||||
96, 112, 120, 136,
|
||||
144, 160, 168, 184,
|
||||
192};
|
||||
unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
|
||||
96, 112, 120, 136, 144, 160, 168, 184,
|
||||
192, 208, 216, 232, 240, 256, 264, 280,
|
||||
288, 304, 312, 328, 336, 352, 360, 376};
|
||||
unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
|
||||
112, 112, 128, 128, 144, 144, 160, 160,
|
||||
192, 192, 208, 208, 224, 224, 240, 240,
|
||||
@ -258,10 +253,8 @@ struct hash_wr_param {
|
||||
|
||||
struct cipher_wr_param {
|
||||
struct ablkcipher_request *req;
|
||||
struct scatterlist *srcsg;
|
||||
char *iv;
|
||||
int bytes;
|
||||
short int snent;
|
||||
unsigned short qid;
|
||||
};
|
||||
enum {
|
||||
@ -299,31 +292,11 @@ enum {
|
||||
ICV_16 = 16
|
||||
};
|
||||
|
||||
struct hash_op_params {
|
||||
unsigned char mk_size;
|
||||
unsigned char pad_align;
|
||||
unsigned char auth_mode;
|
||||
char hash_name[MAX_HASH_NAME];
|
||||
unsigned short block_size;
|
||||
unsigned short word_size;
|
||||
unsigned short ipad_size;
|
||||
};
|
||||
|
||||
struct phys_sge_pairs {
|
||||
__be16 len[8];
|
||||
__be64 addr[8];
|
||||
};
|
||||
|
||||
struct phys_sge_parm {
|
||||
unsigned int nents;
|
||||
unsigned int obsize;
|
||||
unsigned short qid;
|
||||
};
|
||||
|
||||
struct crypto_result {
|
||||
struct completion completion;
|
||||
int err;
|
||||
};
|
||||
|
||||
static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
|
||||
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
|
||||
|
@ -153,16 +153,16 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
|
||||
{
|
||||
struct uld_ctx *u_ctx;
|
||||
|
||||
/* Create the device and add it in the device list */
|
||||
if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
/* Create the device and add it in the device list */
|
||||
u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
|
||||
if (!u_ctx) {
|
||||
u_ctx = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) {
|
||||
u_ctx = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
u_ctx->lldi = *lld;
|
||||
out:
|
||||
return u_ctx;
|
||||
@ -224,7 +224,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
|
||||
static int __init chcr_crypto_init(void)
|
||||
{
|
||||
if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
|
||||
pr_err("ULD register fail: No chcr crypto support in cxgb4");
|
||||
pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ struct uld_ctx {
|
||||
struct chcr_dev *dev;
|
||||
};
|
||||
|
||||
struct uld_ctx * assign_chcr_device(void);
|
||||
struct uld_ctx *assign_chcr_device(void);
|
||||
int chcr_send_wr(struct sk_buff *skb);
|
||||
int start_crypto(void);
|
||||
int stop_crypto(void);
|
||||
|
@ -149,9 +149,23 @@
|
||||
|
||||
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
|
||||
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
|
||||
#define CHCR_SG_SIZE 2048
|
||||
#define CHCR_SRC_SG_SIZE (0x10000 - sizeof(int))
|
||||
#define CHCR_DST_SG_SIZE 2048
|
||||
|
||||
/* Aligned to 128 bit boundary */
|
||||
static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
|
||||
{
|
||||
return crypto_aead_ctx(tfm);
|
||||
}
|
||||
|
||||
static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
|
||||
{
|
||||
return crypto_ablkcipher_ctx(tfm);
|
||||
}
|
||||
|
||||
static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
|
||||
{
|
||||
return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
|
||||
}
|
||||
|
||||
struct ablk_ctx {
|
||||
struct crypto_skcipher *sw_cipher;
|
||||
@ -165,16 +179,39 @@ struct ablk_ctx {
|
||||
};
|
||||
struct chcr_aead_reqctx {
|
||||
struct sk_buff *skb;
|
||||
struct scatterlist *dst;
|
||||
struct scatterlist *newdstsg;
|
||||
struct scatterlist srcffwd[2];
|
||||
struct scatterlist dstffwd[2];
|
||||
dma_addr_t iv_dma;
|
||||
dma_addr_t b0_dma;
|
||||
unsigned int b0_len;
|
||||
unsigned int op;
|
||||
short int aad_nents;
|
||||
short int src_nents;
|
||||
short int dst_nents;
|
||||
u16 imm;
|
||||
u16 verify;
|
||||
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
|
||||
unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
|
||||
};
|
||||
|
||||
struct ulptx_walk {
|
||||
struct ulptx_sgl *sgl;
|
||||
unsigned int nents;
|
||||
unsigned int pair_idx;
|
||||
unsigned int last_sg_len;
|
||||
struct scatterlist *last_sg;
|
||||
struct ulptx_sge_pair *pair;
|
||||
|
||||
};
|
||||
|
||||
struct dsgl_walk {
|
||||
unsigned int nents;
|
||||
unsigned int last_sg_len;
|
||||
struct scatterlist *last_sg;
|
||||
struct cpl_rx_phys_dsgl *dsgl;
|
||||
struct phys_sge_pairs *to;
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct chcr_gcm_ctx {
|
||||
u8 ghash_h[AEAD_H_SIZE];
|
||||
};
|
||||
@ -195,7 +232,6 @@ struct __aead_ctx {
|
||||
struct chcr_aead_ctx {
|
||||
__be32 key_ctx_hdr;
|
||||
unsigned int enckey_len;
|
||||
struct crypto_skcipher *null;
|
||||
struct crypto_aead *sw_cipher;
|
||||
u8 salt[MAX_SALT];
|
||||
u8 key[CHCR_AES_MAX_KEY_LEN];
|
||||
@ -231,8 +267,11 @@ struct chcr_ahash_req_ctx {
|
||||
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
|
||||
u8 *reqbfr;
|
||||
u8 *skbfr;
|
||||
dma_addr_t dma_addr;
|
||||
u32 dma_len;
|
||||
u8 reqlen;
|
||||
/* DMA the partial hash in it */
|
||||
u8 imm;
|
||||
u8 is_sg_map;
|
||||
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
|
||||
u64 data_len; /* Data len till time */
|
||||
/* SKB which is being sent to the hardware for processing */
|
||||
@ -241,14 +280,15 @@ struct chcr_ahash_req_ctx {
|
||||
|
||||
struct chcr_blkcipher_req_ctx {
|
||||
struct sk_buff *skb;
|
||||
struct scatterlist srcffwd[2];
|
||||
struct scatterlist dstffwd[2];
|
||||
struct scatterlist *dstsg;
|
||||
struct scatterlist *dst;
|
||||
struct scatterlist *newdstsg;
|
||||
unsigned int processed;
|
||||
unsigned int last_req_len;
|
||||
struct scatterlist *srcsg;
|
||||
unsigned int src_ofst;
|
||||
unsigned int dst_ofst;
|
||||
unsigned int op;
|
||||
short int dst_nents;
|
||||
dma_addr_t iv_dma;
|
||||
u16 imm;
|
||||
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
|
||||
};
|
||||
|
||||
@ -262,24 +302,6 @@ struct chcr_alg_template {
|
||||
} alg;
|
||||
};
|
||||
|
||||
struct chcr_req_ctx {
|
||||
union {
|
||||
struct ahash_request *ahash_req;
|
||||
struct aead_request *aead_req;
|
||||
struct ablkcipher_request *ablk_req;
|
||||
} req;
|
||||
union {
|
||||
struct chcr_ahash_req_ctx *ahash_ctx;
|
||||
struct chcr_aead_reqctx *reqctx;
|
||||
struct chcr_blkcipher_req_ctx *ablk_ctx;
|
||||
} ctx;
|
||||
};
|
||||
|
||||
struct sge_opaque_hdr {
|
||||
void *dev;
|
||||
dma_addr_t addr[MAX_SKB_FRAGS + 1];
|
||||
};
|
||||
|
||||
typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
|
||||
unsigned short qid,
|
||||
int size,
|
||||
@ -290,10 +312,39 @@ static int chcr_aead_op(struct aead_request *req_base,
|
||||
int size,
|
||||
create_wr_t create_wr_fn);
|
||||
static inline int get_aead_subtype(struct crypto_aead *aead);
|
||||
static int is_newsg(struct scatterlist *sgl, unsigned int *newents);
|
||||
static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
|
||||
unsigned int nents);
|
||||
static inline void free_new_sg(struct scatterlist *sgl);
|
||||
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
|
||||
unsigned char *input, int err);
|
||||
static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
|
||||
static int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
|
||||
unsigned short op_type);
|
||||
static void chcr_aead_dma_unmap(struct device *dev, struct aead_request
|
||||
*req, unsigned short op_type);
|
||||
static inline void chcr_add_aead_dst_ent(struct aead_request *req,
|
||||
struct cpl_rx_phys_dsgl *phys_cpl,
|
||||
unsigned int assoclen,
|
||||
unsigned short op_type,
|
||||
unsigned short qid);
|
||||
static inline void chcr_add_aead_src_ent(struct aead_request *req,
|
||||
struct ulptx_sgl *ulptx,
|
||||
unsigned int assoclen,
|
||||
unsigned short op_type);
|
||||
static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
|
||||
struct ulptx_sgl *ulptx,
|
||||
struct cipher_wr_param *wrparam);
|
||||
static int chcr_cipher_dma_map(struct device *dev,
|
||||
struct ablkcipher_request *req);
|
||||
static void chcr_cipher_dma_unmap(struct device *dev,
|
||||
struct ablkcipher_request *req);
|
||||
static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
||||
struct cpl_rx_phys_dsgl *phys_cpl,
|
||||
struct cipher_wr_param *wrparam,
|
||||
unsigned short qid);
|
||||
int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
|
||||
static inline void chcr_add_hash_src_ent(struct ahash_request *req,
|
||||
struct ulptx_sgl *ulptx,
|
||||
struct hash_wr_param *param);
|
||||
static inline int chcr_hash_dma_map(struct device *dev,
|
||||
struct ahash_request *req);
|
||||
static inline void chcr_hash_dma_unmap(struct device *dev,
|
||||
struct ahash_request *req);
|
||||
#endif /* __CHCR_CRYPTO_H__ */
|
||||
|
@ -308,10 +308,8 @@ unmap_cache:
|
||||
ctx->base.cache_sz = 0;
|
||||
}
|
||||
free_cache:
|
||||
if (ctx->base.cache) {
|
||||
kfree(ctx->base.cache);
|
||||
ctx->base.cache = NULL;
|
||||
}
|
||||
kfree(ctx->base.cache);
|
||||
ctx->base.cache = NULL;
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&priv->ring[ring].egress_lock);
|
||||
|
@ -534,7 +534,6 @@ static void release_ixp_crypto(struct device *dev)
|
||||
NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
|
||||
crypt_virt, crypt_phys);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void reset_sa_dir(struct ix_sa_dir *dir)
|
||||
|
@ -34,10 +34,6 @@
|
||||
/* Limit of the crypto queue before reaching the backlog */
|
||||
#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
|
||||
|
||||
static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
|
||||
module_param_named(allhwsupport, allhwsupport, int, 0444);
|
||||
MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
|
||||
|
||||
struct mv_cesa_dev *cesa_dev;
|
||||
|
||||
struct crypto_async_request *
|
||||
@ -76,8 +72,6 @@ static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
|
||||
|
||||
ctx = crypto_tfm_ctx(req->tfm);
|
||||
ctx->ops->step(req);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
|
||||
@ -183,8 +177,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req,
|
||||
spin_lock_bh(&engine->lock);
|
||||
ret = crypto_enqueue_request(&engine->queue, req);
|
||||
if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
|
||||
(ret == -EINPROGRESS ||
|
||||
(ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||
(ret == -EINPROGRESS || ret == -EBUSY))
|
||||
mv_cesa_tdma_chain(engine, creq);
|
||||
spin_unlock_bh(&engine->lock);
|
||||
|
||||
@ -202,7 +195,7 @@ static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < cesa->caps->ncipher_algs; i++) {
|
||||
ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
|
||||
ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
|
||||
if (ret)
|
||||
goto err_unregister_crypto;
|
||||
}
|
||||
@ -222,7 +215,7 @@ err_unregister_ahash:
|
||||
|
||||
err_unregister_crypto:
|
||||
for (j = 0; j < i; j++)
|
||||
crypto_unregister_alg(cesa->caps->cipher_algs[j]);
|
||||
crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -235,10 +228,10 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
|
||||
crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
|
||||
|
||||
for (i = 0; i < cesa->caps->ncipher_algs; i++)
|
||||
crypto_unregister_alg(cesa->caps->cipher_algs[i]);
|
||||
crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
|
||||
}
|
||||
|
||||
static struct crypto_alg *orion_cipher_algs[] = {
|
||||
static struct skcipher_alg *orion_cipher_algs[] = {
|
||||
&mv_cesa_ecb_des_alg,
|
||||
&mv_cesa_cbc_des_alg,
|
||||
&mv_cesa_ecb_des3_ede_alg,
|
||||
@ -254,7 +247,7 @@ static struct ahash_alg *orion_ahash_algs[] = {
|
||||
&mv_ahmac_sha1_alg,
|
||||
};
|
||||
|
||||
static struct crypto_alg *armada_370_cipher_algs[] = {
|
||||
static struct skcipher_alg *armada_370_cipher_algs[] = {
|
||||
&mv_cesa_ecb_des_alg,
|
||||
&mv_cesa_cbc_des_alg,
|
||||
&mv_cesa_ecb_des3_ede_alg,
|
||||
@ -459,9 +452,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
|
||||
caps = match->data;
|
||||
}
|
||||
|
||||
if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
|
||||
return -ENOTSUPP;
|
||||
|
||||
cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
|
||||
if (!cesa)
|
||||
return -ENOMEM;
|
||||
@ -599,9 +589,16 @@ static int mv_cesa_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct platform_device_id mv_cesa_plat_id_table[] = {
|
||||
{ .name = "mv_crypto" },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
|
||||
|
||||
static struct platform_driver marvell_cesa = {
|
||||
.probe = mv_cesa_probe,
|
||||
.remove = mv_cesa_remove,
|
||||
.id_table = mv_cesa_plat_id_table,
|
||||
.driver = {
|
||||
.name = "marvell-cesa",
|
||||
.of_match_table = mv_cesa_of_match_table,
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/dmapool.h>
|
||||
@ -373,7 +374,7 @@ struct mv_cesa_engine;
|
||||
struct mv_cesa_caps {
|
||||
int nengines;
|
||||
bool has_tdma;
|
||||
struct crypto_alg **cipher_algs;
|
||||
struct skcipher_alg **cipher_algs;
|
||||
int ncipher_algs;
|
||||
struct ahash_alg **ahash_algs;
|
||||
int nahash_algs;
|
||||
@ -539,12 +540,12 @@ struct mv_cesa_sg_std_iter {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mv_cesa_ablkcipher_std_req - cipher standard request
|
||||
* struct mv_cesa_skcipher_std_req - cipher standard request
|
||||
* @op: operation context
|
||||
* @offset: current operation offset
|
||||
* @size: size of the crypto operation
|
||||
*/
|
||||
struct mv_cesa_ablkcipher_std_req {
|
||||
struct mv_cesa_skcipher_std_req {
|
||||
struct mv_cesa_op_ctx op;
|
||||
unsigned int offset;
|
||||
unsigned int size;
|
||||
@ -552,14 +553,14 @@ struct mv_cesa_ablkcipher_std_req {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mv_cesa_ablkcipher_req - cipher request
|
||||
* struct mv_cesa_skcipher_req - cipher request
|
||||
* @req: type specific request information
|
||||
* @src_nents: number of entries in the src sg list
|
||||
* @dst_nents: number of entries in the dest sg list
|
||||
*/
|
||||
struct mv_cesa_ablkcipher_req {
|
||||
struct mv_cesa_skcipher_req {
|
||||
struct mv_cesa_req base;
|
||||
struct mv_cesa_ablkcipher_std_req std;
|
||||
struct mv_cesa_skcipher_std_req std;
|
||||
int src_nents;
|
||||
int dst_nents;
|
||||
};
|
||||
@ -764,7 +765,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
|
||||
* the backlog and will be processed later. There's no need to
|
||||
* clean it up.
|
||||
*/
|
||||
if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
|
||||
if (ret == -EBUSY)
|
||||
return false;
|
||||
|
||||
/* Request wasn't queued, we need to clean it up */
|
||||
@ -869,11 +870,11 @@ extern struct ahash_alg mv_ahmac_md5_alg;
|
||||
extern struct ahash_alg mv_ahmac_sha1_alg;
|
||||
extern struct ahash_alg mv_ahmac_sha256_alg;
|
||||
|
||||
extern struct crypto_alg mv_cesa_ecb_des_alg;
|
||||
extern struct crypto_alg mv_cesa_cbc_des_alg;
|
||||
extern struct crypto_alg mv_cesa_ecb_des3_ede_alg;
|
||||
extern struct crypto_alg mv_cesa_cbc_des3_ede_alg;
|
||||
extern struct crypto_alg mv_cesa_ecb_aes_alg;
|
||||
extern struct crypto_alg mv_cesa_cbc_aes_alg;
|
||||
extern struct skcipher_alg mv_cesa_ecb_des_alg;
|
||||
extern struct skcipher_alg mv_cesa_cbc_des_alg;
|
||||
extern struct skcipher_alg mv_cesa_ecb_des3_ede_alg;
|
||||
extern struct skcipher_alg mv_cesa_cbc_des3_ede_alg;
|
||||
extern struct skcipher_alg mv_cesa_ecb_aes_alg;
|
||||
extern struct skcipher_alg mv_cesa_cbc_aes_alg;
|
||||
|
||||
#endif /* __MARVELL_CESA_H__ */
|
||||
|
@ -32,23 +32,23 @@ struct mv_cesa_aes_ctx {
|
||||
struct crypto_aes_ctx aes;
|
||||
};
|
||||
|
||||
struct mv_cesa_ablkcipher_dma_iter {
|
||||
struct mv_cesa_skcipher_dma_iter {
|
||||
struct mv_cesa_dma_iter base;
|
||||
struct mv_cesa_sg_dma_iter src;
|
||||
struct mv_cesa_sg_dma_iter dst;
|
||||
};
|
||||
|
||||
static inline void
|
||||
mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
|
||||
struct ablkcipher_request *req)
|
||||
mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
|
||||
struct skcipher_request *req)
|
||||
{
|
||||
mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
|
||||
mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
|
||||
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
|
||||
mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
|
||||
mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
|
||||
{
|
||||
iter->src.op_offset = 0;
|
||||
iter->dst.op_offset = 0;
|
||||
@ -57,9 +57,9 @@ mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
|
||||
}
|
||||
|
||||
static inline void
|
||||
mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
|
||||
mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
||||
|
||||
if (req->dst != req->src) {
|
||||
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
|
||||
@ -73,20 +73,20 @@ mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
|
||||
mv_cesa_dma_cleanup(&creq->base);
|
||||
}
|
||||
|
||||
static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
|
||||
static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
||||
|
||||
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
||||
mv_cesa_ablkcipher_dma_cleanup(req);
|
||||
mv_cesa_skcipher_dma_cleanup(req);
|
||||
}
|
||||
|
||||
static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
|
||||
static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
||||
struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
||||
struct mv_cesa_skcipher_std_req *sreq = &creq->std;
|
||||
struct mv_cesa_engine *engine = creq->base.engine;
|
||||
size_t len = min_t(size_t, req->nbytes - sreq->offset,
|
||||
size_t len = min_t(size_t, req->cryptlen - sreq->offset,
|
||||
CESA_SA_SRAM_PAYLOAD_SIZE);
|
||||
|
||||
mv_cesa_adjust_op(engine, &sreq->op);
|
||||
@ -114,11 +114,11 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
|
||||
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
|
||||
}
|
||||
|
||||
static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
|
||||
u32 status)
|
||||
static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
|
||||
u32 status)
|
||||
{
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
||||
struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
||||
struct mv_cesa_skcipher_std_req *sreq = &creq->std;
|
||||
struct mv_cesa_engine *engine = creq->base.engine;
|
||||
size_t len;
|
||||
|
||||
@ -127,122 +127,130 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
|
||||
sreq->size, sreq->offset);
|
||||
|
||||
sreq->offset += len;
|
||||
if (sreq->offset < req->nbytes)
|
||||
if (sreq->offset < req->cryptlen)
|
||||
return -EINPROGRESS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
|
||||
u32 status)
|
||||
static int mv_cesa_skcipher_process(struct crypto_async_request *req,
|
||||
u32 status)
|
||||
{
|
||||
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
|
||||
struct skcipher_request *skreq = skcipher_request_cast(req);
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
|
||||
struct mv_cesa_req *basereq = &creq->base;
|
||||
|
||||
if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
|
||||
return mv_cesa_ablkcipher_std_process(ablkreq, status);
|
||||
return mv_cesa_skcipher_std_process(skreq, status);
|
||||
|
||||
return mv_cesa_dma_process(basereq, status);
|
||||
}
|
||||
|
||||
static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
|
||||
static void mv_cesa_skcipher_step(struct crypto_async_request *req)
|
||||
{
|
||||
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
|
||||
struct skcipher_request *skreq = skcipher_request_cast(req);
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
|
||||
|
||||
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
||||
mv_cesa_dma_step(&creq->base);
|
||||
else
|
||||
mv_cesa_ablkcipher_std_step(ablkreq);
|
||||
mv_cesa_skcipher_std_step(skreq);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
|
||||
mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
||||
struct mv_cesa_req *basereq = &creq->base;
|
||||
|
||||
mv_cesa_dma_prepare(basereq, basereq->engine);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
|
||||
mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
||||
struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
||||
struct mv_cesa_skcipher_std_req *sreq = &creq->std;
|
||||
|
||||
sreq->size = 0;
|
||||
sreq->offset = 0;
|
||||
}
|
||||
|
||||
static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
|
||||
struct mv_cesa_engine *engine)
|
||||
static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
|
||||
struct mv_cesa_engine *engine)
|
||||
{
|
||||
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
|
||||
struct skcipher_request *skreq = skcipher_request_cast(req);
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
|
||||
creq->base.engine = engine;
|
||||
|
||||
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
||||
mv_cesa_ablkcipher_dma_prepare(ablkreq);
|
||||
mv_cesa_skcipher_dma_prepare(skreq);
|
||||
else
|
||||
mv_cesa_ablkcipher_std_prepare(ablkreq);
|
||||
mv_cesa_skcipher_std_prepare(skreq);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
|
||||
mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
|
||||
{
|
||||
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
||||
struct skcipher_request *skreq = skcipher_request_cast(req);
|
||||
|
||||
mv_cesa_ablkcipher_cleanup(ablkreq);
|
||||
mv_cesa_skcipher_cleanup(skreq);
|
||||
}
|
||||
|
||||
static void
|
||||
mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
|
||||
mv_cesa_skcipher_complete(struct crypto_async_request *req)
|
||||
{
|
||||
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
|
||||
struct skcipher_request *skreq = skcipher_request_cast(req);
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
|
||||
struct mv_cesa_engine *engine = creq->base.engine;
|
||||
unsigned int ivsize;
|
||||
|
||||
atomic_sub(ablkreq->nbytes, &engine->load);
|
||||
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
|
||||
atomic_sub(skreq->cryptlen, &engine->load);
|
||||
ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
|
||||
|
||||
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
|
||||
struct mv_cesa_req *basereq;
|
||||
|
||||
basereq = &creq->base;
|
||||
memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
|
||||
memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
|
||||
ivsize);
|
||||
} else {
|
||||
memcpy_fromio(ablkreq->info,
|
||||
memcpy_fromio(skreq->iv,
|
||||
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
|
||||
ivsize);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
|
||||
.step = mv_cesa_ablkcipher_step,
|
||||
.process = mv_cesa_ablkcipher_process,
|
||||
.cleanup = mv_cesa_ablkcipher_req_cleanup,
|
||||
.complete = mv_cesa_ablkcipher_complete,
|
||||
static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
|
||||
.step = mv_cesa_skcipher_step,
|
||||
.process = mv_cesa_skcipher_process,
|
||||
.cleanup = mv_cesa_skcipher_req_cleanup,
|
||||
.complete = mv_cesa_skcipher_complete,
|
||||
};
|
||||
|
||||
static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
|
||||
static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
void *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
|
||||
memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
|
||||
}
|
||||
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
|
||||
static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
ctx->ops = &mv_cesa_skcipher_req_ops;
|
||||
|
||||
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
|
||||
sizeof(struct mv_cesa_skcipher_req));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
|
||||
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int remaining;
|
||||
int offset;
|
||||
@ -251,7 +259,7 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
|
||||
ret = crypto_aes_expand_key(&ctx->aes, key, len);
|
||||
if (ret) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -264,16 +272,16 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
|
||||
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
int ret;
|
||||
|
||||
if (len != DES_KEY_SIZE) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -288,14 +296,14 @@ static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
|
||||
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int len)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
|
||||
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (len != DES3_EDE_KEY_SIZE) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -304,14 +312,14 @@ static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
|
||||
const struct mv_cesa_op_ctx *op_templ)
|
||||
static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
|
||||
const struct mv_cesa_op_ctx *op_templ)
|
||||
{
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
struct mv_cesa_req *basereq = &creq->base;
|
||||
struct mv_cesa_ablkcipher_dma_iter iter;
|
||||
struct mv_cesa_skcipher_dma_iter iter;
|
||||
bool skip_ctx = false;
|
||||
int ret;
|
||||
unsigned int ivsize;
|
||||
@ -339,7 +347,7 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
|
||||
}
|
||||
|
||||
mv_cesa_tdma_desc_iter_init(&basereq->chain);
|
||||
mv_cesa_ablkcipher_req_iter_init(&iter, req);
|
||||
mv_cesa_skcipher_req_iter_init(&iter, req);
|
||||
|
||||
do {
|
||||
struct mv_cesa_op_ctx *op;
|
||||
@ -370,10 +378,10 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
|
||||
if (ret)
|
||||
goto err_free_tdma;
|
||||
|
||||
} while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
|
||||
} while (mv_cesa_skcipher_req_iter_next_op(&iter));
|
||||
|
||||
/* Add output data for IV */
|
||||
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
|
||||
ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
|
||||
ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
|
||||
CESA_SA_DATA_SRAM_OFFSET,
|
||||
CESA_TDMA_SRC_IN_SRAM, flags);
|
||||
@ -399,11 +407,11 @@ err_unmap_src:
|
||||
}
|
||||
|
||||
static inline int
|
||||
mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
|
||||
const struct mv_cesa_op_ctx *op_templ)
|
||||
mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
|
||||
const struct mv_cesa_op_ctx *op_templ)
|
||||
{
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
||||
struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
||||
struct mv_cesa_skcipher_std_req *sreq = &creq->std;
|
||||
struct mv_cesa_req *basereq = &creq->base;
|
||||
|
||||
sreq->op = *op_templ;
|
||||
@ -414,23 +422,23 @@ mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
|
||||
struct mv_cesa_op_ctx *tmpl)
|
||||
static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
|
||||
struct mv_cesa_op_ctx *tmpl)
|
||||
{
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
unsigned int blksize = crypto_skcipher_blocksize(tfm);
|
||||
int ret;
|
||||
|
||||
if (!IS_ALIGNED(req->nbytes, blksize))
|
||||
if (!IS_ALIGNED(req->cryptlen, blksize))
|
||||
return -EINVAL;
|
||||
|
||||
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
|
||||
creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
|
||||
if (creq->src_nents < 0) {
|
||||
dev_err(cesa_dev->dev, "Invalid number of src SG");
|
||||
return creq->src_nents;
|
||||
}
|
||||
creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
|
||||
creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
|
||||
if (creq->dst_nents < 0) {
|
||||
dev_err(cesa_dev->dev, "Invalid number of dst SG");
|
||||
return creq->dst_nents;
|
||||
@ -440,36 +448,36 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
|
||||
CESA_SA_DESC_CFG_OP_MSK);
|
||||
|
||||
if (cesa_dev->caps->has_tdma)
|
||||
ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
|
||||
ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
|
||||
else
|
||||
ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
|
||||
ret = mv_cesa_skcipher_std_req_init(req, tmpl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
|
||||
struct mv_cesa_op_ctx *tmpl)
|
||||
static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
|
||||
struct mv_cesa_op_ctx *tmpl)
|
||||
{
|
||||
int ret;
|
||||
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
||||
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
||||
struct mv_cesa_engine *engine;
|
||||
|
||||
ret = mv_cesa_ablkcipher_req_init(req, tmpl);
|
||||
ret = mv_cesa_skcipher_req_init(req, tmpl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
engine = mv_cesa_select_engine(req->nbytes);
|
||||
mv_cesa_ablkcipher_prepare(&req->base, engine);
|
||||
engine = mv_cesa_select_engine(req->cryptlen);
|
||||
mv_cesa_skcipher_prepare(&req->base, engine);
|
||||
|
||||
ret = mv_cesa_queue_req(&req->base, &creq->base);
|
||||
|
||||
if (mv_cesa_req_needs_cleanup(&req->base, ret))
|
||||
mv_cesa_ablkcipher_cleanup(req);
|
||||
mv_cesa_skcipher_cleanup(req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mv_cesa_des_op(struct ablkcipher_request *req,
|
||||
static int mv_cesa_des_op(struct skcipher_request *req,
|
||||
struct mv_cesa_op_ctx *tmpl)
|
||||
{
|
||||
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
@ -479,10 +487,10 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
|
||||
|
||||
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
|
||||
|
||||
return mv_cesa_ablkcipher_queue_req(req, tmpl);
|
||||
return mv_cesa_skcipher_queue_req(req, tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -493,7 +501,7 @@ static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_des_op(req, &tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -504,41 +512,38 @@ static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_des_op(req, &tmpl);
|
||||
}
|
||||
|
||||
struct crypto_alg mv_cesa_ecb_des_alg = {
|
||||
.cra_name = "ecb(des)",
|
||||
.cra_driver_name = "mv-ecb-des",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_ablkcipher_cra_init,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.setkey = mv_cesa_des_setkey,
|
||||
.encrypt = mv_cesa_ecb_des_encrypt,
|
||||
.decrypt = mv_cesa_ecb_des_decrypt,
|
||||
},
|
||||
struct skcipher_alg mv_cesa_ecb_des_alg = {
|
||||
.setkey = mv_cesa_des_setkey,
|
||||
.encrypt = mv_cesa_ecb_des_encrypt,
|
||||
.decrypt = mv_cesa_ecb_des_decrypt,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.base = {
|
||||
.cra_name = "ecb(des)",
|
||||
.cra_driver_name = "mv-ecb-des",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_skcipher_cra_init,
|
||||
.cra_exit = mv_cesa_skcipher_cra_exit,
|
||||
},
|
||||
};
|
||||
|
||||
static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
|
||||
static int mv_cesa_cbc_des_op(struct skcipher_request *req,
|
||||
struct mv_cesa_op_ctx *tmpl)
|
||||
{
|
||||
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
|
||||
CESA_SA_DESC_CFG_CRYPTCM_MSK);
|
||||
|
||||
memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
|
||||
memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
|
||||
|
||||
return mv_cesa_des_op(req, tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -547,7 +552,7 @@ static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_cbc_des_op(req, &tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -556,31 +561,28 @@ static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_cbc_des_op(req, &tmpl);
|
||||
}
|
||||
|
||||
struct crypto_alg mv_cesa_cbc_des_alg = {
|
||||
.cra_name = "cbc(des)",
|
||||
.cra_driver_name = "mv-cbc-des",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_ablkcipher_cra_init,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = mv_cesa_des_setkey,
|
||||
.encrypt = mv_cesa_cbc_des_encrypt,
|
||||
.decrypt = mv_cesa_cbc_des_decrypt,
|
||||
},
|
||||
struct skcipher_alg mv_cesa_cbc_des_alg = {
|
||||
.setkey = mv_cesa_des_setkey,
|
||||
.encrypt = mv_cesa_cbc_des_encrypt,
|
||||
.decrypt = mv_cesa_cbc_des_decrypt,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_name = "cbc(des)",
|
||||
.cra_driver_name = "mv-cbc-des",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_skcipher_cra_init,
|
||||
.cra_exit = mv_cesa_skcipher_cra_exit,
|
||||
},
|
||||
};
|
||||
|
||||
static int mv_cesa_des3_op(struct ablkcipher_request *req,
|
||||
static int mv_cesa_des3_op(struct skcipher_request *req,
|
||||
struct mv_cesa_op_ctx *tmpl)
|
||||
{
|
||||
struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
@ -590,10 +592,10 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
|
||||
|
||||
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
|
||||
|
||||
return mv_cesa_ablkcipher_queue_req(req, tmpl);
|
||||
return mv_cesa_skcipher_queue_req(req, tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -605,7 +607,7 @@ static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_des3_op(req, &tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -617,39 +619,36 @@ static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_des3_op(req, &tmpl);
|
||||
}
|
||||
|
||||
struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "mv-ecb-des3-ede",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_ablkcipher_cra_init,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = mv_cesa_des3_ede_setkey,
|
||||
.encrypt = mv_cesa_ecb_des3_ede_encrypt,
|
||||
.decrypt = mv_cesa_ecb_des3_ede_decrypt,
|
||||
},
|
||||
struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
|
||||
.setkey = mv_cesa_des3_ede_setkey,
|
||||
.encrypt = mv_cesa_ecb_des3_ede_encrypt,
|
||||
.decrypt = mv_cesa_ecb_des3_ede_decrypt,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "mv-ecb-des3-ede",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_skcipher_cra_init,
|
||||
.cra_exit = mv_cesa_skcipher_cra_exit,
|
||||
},
|
||||
};
|
||||
|
||||
static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
|
||||
static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
|
||||
struct mv_cesa_op_ctx *tmpl)
|
||||
{
|
||||
memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
|
||||
memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
|
||||
|
||||
return mv_cesa_des3_op(req, tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -661,7 +660,7 @@ static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_cbc_des3_op(req, &tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -673,31 +672,28 @@ static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_cbc_des3_op(req, &tmpl);
|
||||
}
|
||||
|
||||
struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
|
||||
.cra_name = "cbc(des3_ede)",
|
||||
.cra_driver_name = "mv-cbc-des3-ede",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_ablkcipher_cra_init,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = mv_cesa_des3_ede_setkey,
|
||||
.encrypt = mv_cesa_cbc_des3_ede_encrypt,
|
||||
.decrypt = mv_cesa_cbc_des3_ede_decrypt,
|
||||
},
|
||||
struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
|
||||
.setkey = mv_cesa_des3_ede_setkey,
|
||||
.encrypt = mv_cesa_cbc_des3_ede_encrypt,
|
||||
.decrypt = mv_cesa_cbc_des3_ede_decrypt,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_name = "cbc(des3_ede)",
|
||||
.cra_driver_name = "mv-cbc-des3-ede",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_skcipher_cra_init,
|
||||
.cra_exit = mv_cesa_skcipher_cra_exit,
|
||||
},
|
||||
};
|
||||
|
||||
static int mv_cesa_aes_op(struct ablkcipher_request *req,
|
||||
static int mv_cesa_aes_op(struct skcipher_request *req,
|
||||
struct mv_cesa_op_ctx *tmpl)
|
||||
{
|
||||
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
@ -724,10 +720,10 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
|
||||
CESA_SA_DESC_CFG_CRYPTM_MSK |
|
||||
CESA_SA_DESC_CFG_AES_LEN_MSK);
|
||||
|
||||
return mv_cesa_ablkcipher_queue_req(req, tmpl);
|
||||
return mv_cesa_skcipher_queue_req(req, tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -738,7 +734,7 @@ static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_aes_op(req, &tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -749,40 +745,37 @@ static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_aes_op(req, &tmpl);
|
||||
}
|
||||
|
||||
struct crypto_alg mv_cesa_ecb_aes_alg = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "mv-ecb-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_ablkcipher_cra_init,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = mv_cesa_aes_setkey,
|
||||
.encrypt = mv_cesa_ecb_aes_encrypt,
|
||||
.decrypt = mv_cesa_ecb_aes_decrypt,
|
||||
},
|
||||
struct skcipher_alg mv_cesa_ecb_aes_alg = {
|
||||
.setkey = mv_cesa_aes_setkey,
|
||||
.encrypt = mv_cesa_ecb_aes_encrypt,
|
||||
.decrypt = mv_cesa_ecb_aes_decrypt,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.base = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "mv-ecb-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_skcipher_cra_init,
|
||||
.cra_exit = mv_cesa_skcipher_cra_exit,
|
||||
},
|
||||
};
|
||||
|
||||
static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
|
||||
static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
|
||||
struct mv_cesa_op_ctx *tmpl)
|
||||
{
|
||||
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
|
||||
CESA_SA_DESC_CFG_CRYPTCM_MSK);
|
||||
memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
|
||||
memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
|
||||
|
||||
return mv_cesa_aes_op(req, tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -791,7 +784,7 @@ static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_cbc_aes_op(req, &tmpl);
|
||||
}
|
||||
|
||||
static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
|
||||
static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct mv_cesa_op_ctx tmpl;
|
||||
|
||||
@ -800,26 +793,23 @@ static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
|
||||
return mv_cesa_cbc_aes_op(req, &tmpl);
|
||||
}
|
||||
|
||||
struct crypto_alg mv_cesa_cbc_aes_alg = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "mv-cbc-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_ablkcipher_cra_init,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = mv_cesa_aes_setkey,
|
||||
.encrypt = mv_cesa_cbc_aes_encrypt,
|
||||
.decrypt = mv_cesa_cbc_aes_decrypt,
|
||||
},
|
||||
struct skcipher_alg mv_cesa_cbc_aes_alg = {
|
||||
.setkey = mv_cesa_aes_setkey,
|
||||
.encrypt = mv_cesa_cbc_aes_encrypt,
|
||||
.decrypt = mv_cesa_cbc_aes_decrypt,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "mv-cbc-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mv_cesa_skcipher_cra_init,
|
||||
.cra_exit = mv_cesa_skcipher_cra_exit,
|
||||
},
|
||||
};
|
||||
|
@ -304,10 +304,7 @@ int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
|
||||
struct mv_cesa_tdma_desc *tdma;
|
||||
|
||||
tdma = mv_cesa_dma_add_desc(chain, flags);
|
||||
if (IS_ERR(tdma))
|
||||
return PTR_ERR(tdma);
|
||||
|
||||
return 0;
|
||||
return PTR_ERR_OR_ZERO(tdma);
|
||||
}
|
||||
|
||||
int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
|
||||
|
@ -13,6 +13,7 @@
|
||||
*/
|
||||
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include "mtk-platform.h"
|
||||
|
||||
#define AES_QUEUE_SIZE 512
|
||||
@ -137,11 +138,6 @@ struct mtk_aes_gcm_ctx {
|
||||
struct crypto_skcipher *ctr;
|
||||
};
|
||||
|
||||
struct mtk_aes_gcm_setkey_result {
|
||||
int err;
|
||||
struct completion completion;
|
||||
};
|
||||
|
||||
struct mtk_aes_drv {
|
||||
struct list_head dev_list;
|
||||
/* Device list lock */
|
||||
@ -928,25 +924,19 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
|
||||
static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
|
||||
{
|
||||
struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
|
||||
struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
|
||||
|
||||
/* Empty messages are not supported yet */
|
||||
if (!gctx->textlen && !req->assoclen)
|
||||
return -EINVAL;
|
||||
|
||||
rctx->mode = AES_FLAGS_GCM | mode;
|
||||
|
||||
return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
|
||||
&req->base);
|
||||
}
|
||||
|
||||
static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct mtk_aes_gcm_setkey_result *result = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
result->err = err;
|
||||
complete(&result->completion);
|
||||
}
|
||||
|
||||
/*
|
||||
* Because of the hardware limitation, we need to pre-calculate key(H)
|
||||
* for the GHASH operation. The result of the encryption operation
|
||||
@ -962,7 +952,7 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
u32 hash[4];
|
||||
u8 iv[8];
|
||||
|
||||
struct mtk_aes_gcm_setkey_result result;
|
||||
struct crypto_wait wait;
|
||||
|
||||
struct scatterlist sg[1];
|
||||
struct skcipher_request req;
|
||||
@ -1002,22 +992,17 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
init_completion(&data->result.completion);
|
||||
crypto_init_wait(&data->wait);
|
||||
sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
|
||||
skcipher_request_set_tfm(&data->req, ctr);
|
||||
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
mtk_gcm_setkey_done, &data->result);
|
||||
crypto_req_done, &data->wait);
|
||||
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
|
||||
AES_BLOCK_SIZE, data->iv);
|
||||
|
||||
err = crypto_skcipher_encrypt(&data->req);
|
||||
if (err == -EINPROGRESS || err == -EBUSY) {
|
||||
err = wait_for_completion_interruptible(
|
||||
&data->result.completion);
|
||||
if (!err)
|
||||
err = data->result.err;
|
||||
}
|
||||
err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
|
||||
&data->wait);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -1098,7 +1083,7 @@ static struct aead_alg aes_gcm_alg = {
|
||||
.decrypt = mtk_aes_gcm_decrypt,
|
||||
.init = mtk_aes_gcm_init,
|
||||
.exit = mtk_aes_gcm_exit,
|
||||
.ivsize = 12,
|
||||
.ivsize = GCM_AES_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
|
||||
.base = {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,151 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __MV_CRYPTO_H__
|
||||
#define __MV_CRYPTO_H__
|
||||
|
||||
#define DIGEST_INITIAL_VAL_A 0xdd00
|
||||
#define DIGEST_INITIAL_VAL_B 0xdd04
|
||||
#define DIGEST_INITIAL_VAL_C 0xdd08
|
||||
#define DIGEST_INITIAL_VAL_D 0xdd0c
|
||||
#define DIGEST_INITIAL_VAL_E 0xdd10
|
||||
#define DES_CMD_REG 0xdd58
|
||||
|
||||
#define SEC_ACCEL_CMD 0xde00
|
||||
#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
|
||||
#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
|
||||
#define SEC_CMD_DISABLE_SEC (1 << 2)
|
||||
|
||||
#define SEC_ACCEL_DESC_P0 0xde04
|
||||
#define SEC_DESC_P0_PTR(x) (x)
|
||||
|
||||
#define SEC_ACCEL_DESC_P1 0xde14
|
||||
#define SEC_DESC_P1_PTR(x) (x)
|
||||
|
||||
#define SEC_ACCEL_CFG 0xde08
|
||||
#define SEC_CFG_STOP_DIG_ERR (1 << 0)
|
||||
#define SEC_CFG_CH0_W_IDMA (1 << 7)
|
||||
#define SEC_CFG_CH1_W_IDMA (1 << 8)
|
||||
#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
|
||||
#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
|
||||
|
||||
#define SEC_ACCEL_STATUS 0xde0c
|
||||
#define SEC_ST_ACT_0 (1 << 0)
|
||||
#define SEC_ST_ACT_1 (1 << 1)
|
||||
|
||||
/*
|
||||
* FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
|
||||
* 4.12. It looks like that it was part of an IRQ-controller in FPGA and
|
||||
* someone forgot to remove it while switching to the core and moving to
|
||||
* SEC_ACCEL_INT_STATUS.
|
||||
*/
|
||||
#define FPGA_INT_STATUS 0xdd68
|
||||
#define SEC_ACCEL_INT_STATUS 0xde20
|
||||
#define SEC_INT_AUTH_DONE (1 << 0)
|
||||
#define SEC_INT_DES_E_DONE (1 << 1)
|
||||
#define SEC_INT_AES_E_DONE (1 << 2)
|
||||
#define SEC_INT_AES_D_DONE (1 << 3)
|
||||
#define SEC_INT_ENC_DONE (1 << 4)
|
||||
#define SEC_INT_ACCEL0_DONE (1 << 5)
|
||||
#define SEC_INT_ACCEL1_DONE (1 << 6)
|
||||
#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
|
||||
#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
|
||||
|
||||
#define SEC_ACCEL_INT_MASK 0xde24
|
||||
|
||||
#define AES_KEY_LEN (8 * 4)
|
||||
|
||||
struct sec_accel_config {
|
||||
|
||||
u32 config;
|
||||
#define CFG_OP_MAC_ONLY 0
|
||||
#define CFG_OP_CRYPT_ONLY 1
|
||||
#define CFG_OP_MAC_CRYPT 2
|
||||
#define CFG_OP_CRYPT_MAC 3
|
||||
#define CFG_MACM_MD5 (4 << 4)
|
||||
#define CFG_MACM_SHA1 (5 << 4)
|
||||
#define CFG_MACM_HMAC_MD5 (6 << 4)
|
||||
#define CFG_MACM_HMAC_SHA1 (7 << 4)
|
||||
#define CFG_ENCM_DES (1 << 8)
|
||||
#define CFG_ENCM_3DES (2 << 8)
|
||||
#define CFG_ENCM_AES (3 << 8)
|
||||
#define CFG_DIR_ENC (0 << 12)
|
||||
#define CFG_DIR_DEC (1 << 12)
|
||||
#define CFG_ENC_MODE_ECB (0 << 16)
|
||||
#define CFG_ENC_MODE_CBC (1 << 16)
|
||||
#define CFG_3DES_EEE (0 << 20)
|
||||
#define CFG_3DES_EDE (1 << 20)
|
||||
#define CFG_AES_LEN_128 (0 << 24)
|
||||
#define CFG_AES_LEN_192 (1 << 24)
|
||||
#define CFG_AES_LEN_256 (2 << 24)
|
||||
#define CFG_NOT_FRAG (0 << 30)
|
||||
#define CFG_FIRST_FRAG (1 << 30)
|
||||
#define CFG_LAST_FRAG (2 << 30)
|
||||
#define CFG_MID_FRAG (3 << 30)
|
||||
|
||||
u32 enc_p;
|
||||
#define ENC_P_SRC(x) (x)
|
||||
#define ENC_P_DST(x) ((x) << 16)
|
||||
|
||||
u32 enc_len;
|
||||
#define ENC_LEN(x) (x)
|
||||
|
||||
u32 enc_key_p;
|
||||
#define ENC_KEY_P(x) (x)
|
||||
|
||||
u32 enc_iv;
|
||||
#define ENC_IV_POINT(x) ((x) << 0)
|
||||
#define ENC_IV_BUF_POINT(x) ((x) << 16)
|
||||
|
||||
u32 mac_src_p;
|
||||
#define MAC_SRC_DATA_P(x) (x)
|
||||
#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
|
||||
|
||||
u32 mac_digest;
|
||||
#define MAC_DIGEST_P(x) (x)
|
||||
#define MAC_FRAG_LEN(x) ((x) << 16)
|
||||
u32 mac_iv;
|
||||
#define MAC_INNER_IV_P(x) (x)
|
||||
#define MAC_OUTER_IV_P(x) ((x) << 16)
|
||||
}__attribute__ ((packed));
|
||||
/*
|
||||
* /-----------\ 0
|
||||
* | ACCEL CFG | 4 * 8
|
||||
* |-----------| 0x20
|
||||
* | CRYPT KEY | 8 * 4
|
||||
* |-----------| 0x40
|
||||
* | IV IN | 4 * 4
|
||||
* |-----------| 0x40 (inplace)
|
||||
* | IV BUF | 4 * 4
|
||||
* |-----------| 0x80
|
||||
* | DATA IN | 16 * x (max ->max_req_size)
|
||||
* |-----------| 0x80 (inplace operation)
|
||||
* | DATA OUT | 16 * x (max ->max_req_size)
|
||||
* \-----------/ SRAM size
|
||||
*/
|
||||
|
||||
/* Hashing memory map:
|
||||
* /-----------\ 0
|
||||
* | ACCEL CFG | 4 * 8
|
||||
* |-----------| 0x20
|
||||
* | Inner IV | 5 * 4
|
||||
* |-----------| 0x34
|
||||
* | Outer IV | 5 * 4
|
||||
* |-----------| 0x48
|
||||
* | Output BUF| 5 * 4
|
||||
* |-----------| 0x80
|
||||
* | DATA IN | 64 * x (max ->max_req_size)
|
||||
* \-----------/ SRAM size
|
||||
*/
|
||||
#define SRAM_CONFIG 0x00
|
||||
#define SRAM_DATA_KEY_P 0x20
|
||||
#define SRAM_DATA_IV 0x40
|
||||
#define SRAM_DATA_IV_BUF 0x40
|
||||
#define SRAM_DATA_IN_START 0x80
|
||||
#define SRAM_DATA_OUT_START 0x80
|
||||
|
||||
#define SRAM_HMAC_IV_IN 0x20
|
||||
#define SRAM_HMAC_IV_OUT 0x34
|
||||
#define SRAM_DIGEST_BUF 0x48
|
||||
|
||||
#define SRAM_CFG_SPACE 0x80
|
||||
|
||||
#endif
|
@ -1962,10 +1962,8 @@ static struct n2_crypto *alloc_n2cp(void)
|
||||
|
||||
static void free_n2cp(struct n2_crypto *np)
|
||||
{
|
||||
if (np->cwq_info.ino_table) {
|
||||
kfree(np->cwq_info.ino_table);
|
||||
np->cwq_info.ino_table = NULL;
|
||||
}
|
||||
kfree(np->cwq_info.ino_table);
|
||||
np->cwq_info.ino_table = NULL;
|
||||
|
||||
kfree(np);
|
||||
}
|
||||
@ -2079,10 +2077,8 @@ static struct n2_mau *alloc_ncp(void)
|
||||
|
||||
static void free_ncp(struct n2_mau *mp)
|
||||
{
|
||||
if (mp->mau_info.ino_table) {
|
||||
kfree(mp->mau_info.ino_table);
|
||||
mp->mau_info.ino_table = NULL;
|
||||
}
|
||||
kfree(mp->mau_info.ino_table);
|
||||
mp->mau_info.ino_table = NULL;
|
||||
|
||||
kfree(mp);
|
||||
}
|
||||
|
@ -1082,7 +1082,7 @@ static int nx842_remove(struct vio_dev *viodev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct vio_device_id nx842_vio_driver_ids[] = {
|
||||
static const struct vio_device_id nx842_vio_driver_ids[] = {
|
||||
{"ibm,compression-v1", "ibm,compression"},
|
||||
{"", ""},
|
||||
};
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
@ -433,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
|
||||
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
|
||||
char *iv = rctx->iv;
|
||||
|
||||
memcpy(iv, req->iv, 12);
|
||||
memcpy(iv, req->iv, GCM_AES_IV_SIZE);
|
||||
|
||||
return gcm_aes_nx_crypt(req, 1, req->assoclen);
|
||||
}
|
||||
@ -443,7 +444,7 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
|
||||
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
|
||||
char *iv = rctx->iv;
|
||||
|
||||
memcpy(iv, req->iv, 12);
|
||||
memcpy(iv, req->iv, GCM_AES_IV_SIZE);
|
||||
|
||||
return gcm_aes_nx_crypt(req, 0, req->assoclen);
|
||||
}
|
||||
@ -498,7 +499,7 @@ struct aead_alg nx_gcm_aes_alg = {
|
||||
},
|
||||
.init = nx_crypto_ctx_aes_gcm_init,
|
||||
.exit = nx_crypto_ctx_aead_exit,
|
||||
.ivsize = 12,
|
||||
.ivsize = GCM_AES_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.setkey = gcm_aes_nx_set_key,
|
||||
.encrypt = gcm_aes_nx_encrypt,
|
||||
@ -516,7 +517,7 @@ struct aead_alg nx_gcm4106_aes_alg = {
|
||||
},
|
||||
.init = nx_crypto_ctx_aes_gcm_init,
|
||||
.exit = nx_crypto_ctx_aead_exit,
|
||||
.ivsize = 8,
|
||||
.ivsize = GCM_RFC4106_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.setkey = gcm4106_aes_nx_set_key,
|
||||
.setauthsize = gcm4106_aes_nx_setauthsize,
|
||||
|
@ -833,7 +833,7 @@ static void __exit nx_fini(void)
|
||||
vio_unregister_driver(&nx_driver.viodriver);
|
||||
}
|
||||
|
||||
static struct vio_device_id nx_crypto_driver_ids[] = {
|
||||
static const struct vio_device_id nx_crypto_driver_ids[] = {
|
||||
{ "ibm,sym-encryption-v1", "ibm,sym-encryption" },
|
||||
{ "", "" }
|
||||
};
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/omap-dma.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
@ -186,7 +187,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
|
||||
sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
|
||||
if (!sk_req) {
|
||||
pr_err("skcipher: Failed to allocate request\n");
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
init_completion(&result.completion);
|
||||
@ -214,7 +215,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
pr_err("Encryption of IV failed for GCM mode");
|
||||
pr_err("Encryption of IV failed for GCM mode\n");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -311,7 +312,7 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
|
||||
int err, assoclen;
|
||||
|
||||
memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
|
||||
memcpy(rctx->iv + 12, &counter, 4);
|
||||
memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
|
||||
|
||||
err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
|
||||
if (err)
|
||||
@ -339,7 +340,7 @@ int omap_aes_gcm_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
|
||||
|
||||
memcpy(rctx->iv, req->iv, 12);
|
||||
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
|
||||
return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
|
||||
}
|
||||
|
||||
@ -347,7 +348,7 @@ int omap_aes_gcm_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
|
||||
|
||||
memcpy(rctx->iv, req->iv, 12);
|
||||
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
|
||||
return omap_aes_gcm_crypt(req, FLAGS_GCM);
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/engine.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
@ -767,7 +768,7 @@ static struct aead_alg algs_aead_gcm[] = {
|
||||
},
|
||||
.init = omap_aes_gcm_cra_init,
|
||||
.exit = omap_aes_gcm_cra_exit,
|
||||
.ivsize = 12,
|
||||
.ivsize = GCM_AES_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.setkey = omap_aes_gcm_setkey,
|
||||
.encrypt = omap_aes_gcm_encrypt,
|
||||
@ -788,7 +789,7 @@ static struct aead_alg algs_aead_gcm[] = {
|
||||
.init = omap_aes_gcm_cra_init,
|
||||
.exit = omap_aes_gcm_cra_exit,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.ivsize = 8,
|
||||
.ivsize = GCM_RFC4106_IV_SIZE,
|
||||
.setkey = omap_aes_4106gcm_setkey,
|
||||
.encrypt = omap_aes_4106gcm_encrypt,
|
||||
.decrypt = omap_aes_4106gcm_decrypt,
|
||||
@ -974,11 +975,10 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
|
||||
struct device *dev, struct resource *res)
|
||||
{
|
||||
struct device_node *node = dev->of_node;
|
||||
const struct of_device_id *match;
|
||||
int err = 0;
|
||||
|
||||
match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
|
||||
if (!match) {
|
||||
dd->pdata = of_device_get_match_data(dev);
|
||||
if (!dd->pdata) {
|
||||
dev_err(dev, "no compatible OF match\n");
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
@ -991,8 +991,6 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
|
||||
goto err;
|
||||
}
|
||||
|
||||
dd->pdata = match->data;
|
||||
|
||||
err:
|
||||
return err;
|
||||
}
|
||||
|
@ -928,16 +928,13 @@ MODULE_DEVICE_TABLE(of, omap_des_of_match);
|
||||
static int omap_des_get_of(struct omap_des_dev *dd,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
|
||||
match = of_match_device(of_match_ptr(omap_des_of_match), &pdev->dev);
|
||||
if (!match) {
|
||||
dd->pdata = of_device_get_match_data(&pdev->dev);
|
||||
if (!dd->pdata) {
|
||||
dev_err(&pdev->dev, "no compatible OF match\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dd->pdata = match->data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
@ -1944,11 +1944,10 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
|
||||
struct device *dev, struct resource *res)
|
||||
{
|
||||
struct device_node *node = dev->of_node;
|
||||
const struct of_device_id *match;
|
||||
int err = 0;
|
||||
|
||||
match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
|
||||
if (!match) {
|
||||
dd->pdata = of_device_get_match_data(dev);
|
||||
if (!dd->pdata) {
|
||||
dev_err(dev, "no compatible OF match\n");
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
@ -1968,8 +1967,6 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
|
||||
goto err;
|
||||
}
|
||||
|
||||
dd->pdata = match->data;
|
||||
|
||||
err:
|
||||
return err;
|
||||
}
|
||||
|
@ -482,7 +482,7 @@ static struct crypto_alg cbc_aes_alg = {
|
||||
}
|
||||
};
|
||||
|
||||
static struct x86_cpu_id padlock_cpu_id[] = {
|
||||
static const struct x86_cpu_id padlock_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
|
||||
{}
|
||||
};
|
||||
|
@ -509,7 +509,7 @@ static struct shash_alg sha256_alg_nano = {
|
||||
}
|
||||
};
|
||||
|
||||
static struct x86_cpu_id padlock_sha_ids[] = {
|
||||
static const struct x86_cpu_id padlock_sha_ids[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_PHE),
|
||||
{}
|
||||
};
|
||||
|
@ -228,11 +228,8 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
|
||||
list_add_tail(&map->list, &vfs_table);
|
||||
} else if (accel_dev->is_vf && pf) {
|
||||
/* VF on host */
|
||||
struct adf_accel_vf_info *vf_info;
|
||||
struct vf_id_map *map;
|
||||
|
||||
vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
|
||||
|
||||
map = adf_find_vf(adf_get_vf_num(accel_dev));
|
||||
if (map) {
|
||||
struct vf_id_map *next;
|
||||
|
@ -443,9 +443,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
|
||||
struct qat_crypto_instance *inst = ctx->inst;
|
||||
struct device *dev = &GET_DEV(inst->accel_dev);
|
||||
|
||||
if (unlikely(!params->p || !params->g))
|
||||
return -EINVAL;
|
||||
|
||||
if (qat_dh_check_params_length(params->p_size << 3))
|
||||
return -EINVAL;
|
||||
|
||||
@ -462,11 +459,8 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
|
||||
}
|
||||
|
||||
ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
|
||||
if (!ctx->g) {
|
||||
dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
|
||||
ctx->p = NULL;
|
||||
if (!ctx->g)
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
|
||||
params->g_size);
|
||||
|
||||
@ -507,18 +501,22 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
|
||||
|
||||
ret = qat_dh_set_params(ctx, ¶ms);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err_clear_ctx;
|
||||
|
||||
ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
|
||||
GFP_KERNEL);
|
||||
if (!ctx->xa) {
|
||||
qat_dh_clear_ctx(dev, ctx);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err_clear_ctx;
|
||||
}
|
||||
memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
|
||||
params.key_size);
|
||||
|
||||
return 0;
|
||||
|
||||
err_clear_ctx:
|
||||
qat_dh_clear_ctx(dev, ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
|
||||
|
@ -567,26 +567,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
|
||||
code_page->imp_expr_tab_offset);
|
||||
if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
|
||||
imp_expr_tab->entry_num) {
|
||||
pr_err("QAT: UOF can't contain imported variable to be parsed");
|
||||
pr_err("QAT: UOF can't contain imported variable to be parsed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
neigh_reg_tab = (struct icp_qat_uof_objtable *)
|
||||
(encap_uof_obj->beg_uof +
|
||||
code_page->neigh_reg_tab_offset);
|
||||
if (neigh_reg_tab->entry_num) {
|
||||
pr_err("QAT: UOF can't contain shared control store feature");
|
||||
pr_err("QAT: UOF can't contain shared control store feature\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (image->numpages > 1) {
|
||||
pr_err("QAT: UOF can't contain multiple pages");
|
||||
pr_err("QAT: UOF can't contain multiple pages\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
|
||||
pr_err("QAT: UOF can't use shared control store feature");
|
||||
pr_err("QAT: UOF can't use shared control store feature\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
|
||||
pr_err("QAT: UOF can't use reloadable feature");
|
||||
pr_err("QAT: UOF can't use reloadable feature\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
@ -702,7 +702,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
|
||||
}
|
||||
}
|
||||
if (!mflag) {
|
||||
pr_err("QAT: uimage uses AE not set");
|
||||
pr_err("QAT: uimage uses AE not set\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
@ -791,6 +791,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
|
||||
case ICP_GPA_ABS:
|
||||
case ICP_GPB_ABS:
|
||||
ctx_mask = 0;
|
||||
/* fall through */
|
||||
case ICP_GPA_REL:
|
||||
case ICP_GPB_REL:
|
||||
return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
|
||||
@ -800,6 +801,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
|
||||
case ICP_SR_RD_ABS:
|
||||
case ICP_DR_RD_ABS:
|
||||
ctx_mask = 0;
|
||||
/* fall through */
|
||||
case ICP_SR_REL:
|
||||
case ICP_DR_REL:
|
||||
case ICP_SR_RD_REL:
|
||||
@ -809,6 +811,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
|
||||
case ICP_SR_WR_ABS:
|
||||
case ICP_DR_WR_ABS:
|
||||
ctx_mask = 0;
|
||||
/* fall through */
|
||||
case ICP_SR_WR_REL:
|
||||
case ICP_DR_WR_REL:
|
||||
return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user