Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Fix out-of-sync IVs in self-test for IPsec AEAD algorithms Algorithms: - Use formally verified implementation of x86/curve25519 Drivers: - Enhance hwrng support in caam - Use crypto_engine for skcipher/aead/rsa/hash in caam - Add Xilinx AES driver - Add uacce driver - Register zip engine to uacce in hisilicon - Add support for OCTEON TX CPT engine in marvell" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (162 commits) crypto: af_alg - bool type cosmetics crypto: arm[64]/poly1305 - add artifact to .gitignore files crypto: caam - limit single JD RNG output to maximum of 16 bytes crypto: caam - enable prediction resistance in HRWNG bus: fsl-mc: add api to retrieve mc version crypto: caam - invalidate entropy register during RNG initialization crypto: caam - check if RNG job failed crypto: caam - simplify RNG implementation crypto: caam - drop global context pointer and init_done crypto: caam - use struct hwrng's .init for initialization crypto: caam - allocate RNG instantiation descriptor with GFP_DMA crypto: ccree - remove duplicated include from cc_aead.c crypto: chelsio - remove set but not used variable 'adap' crypto: marvell - enable OcteonTX cpt options for build crypto: marvell - add the Virtual Function driver for CPT crypto: marvell - add support for OCTEON TX CPT engine crypto: marvell - create common Kconfig and Makefile for Marvell crypto: arm/neon - memzero_explicit aes-cbc key crypto: bcm - Use scnprintf() for avoiding potential buffer overflow crypto: atmel-i2c - Fix wakeup fail ...
This commit is contained in:
commit
72f35423e8
39
Documentation/ABI/testing/sysfs-driver-uacce
Normal file
39
Documentation/ABI/testing/sysfs-driver-uacce
Normal file
@ -0,0 +1,39 @@
|
||||
What: /sys/class/uacce/<dev_name>/api
|
||||
Date: Feb 2020
|
||||
KernelVersion: 5.7
|
||||
Contact: linux-accelerators@lists.ozlabs.org
|
||||
Description: Api of the device
|
||||
Can be any string and up to userspace to parse.
|
||||
Application use the api to match the correct driver
|
||||
|
||||
What: /sys/class/uacce/<dev_name>/flags
|
||||
Date: Feb 2020
|
||||
KernelVersion: 5.7
|
||||
Contact: linux-accelerators@lists.ozlabs.org
|
||||
Description: Attributes of the device, see UACCE_DEV_xxx flag defined in uacce.h
|
||||
|
||||
What: /sys/class/uacce/<dev_name>/available_instances
|
||||
Date: Feb 2020
|
||||
KernelVersion: 5.7
|
||||
Contact: linux-accelerators@lists.ozlabs.org
|
||||
Description: Available instances left of the device
|
||||
Return -ENODEV if uacce_ops get_available_instances is not provided
|
||||
|
||||
What: /sys/class/uacce/<dev_name>/algorithms
|
||||
Date: Feb 2020
|
||||
KernelVersion: 5.7
|
||||
Contact: linux-accelerators@lists.ozlabs.org
|
||||
Description: Algorithms supported by this accelerator, separated by new line.
|
||||
Can be any string and up to userspace to parse.
|
||||
|
||||
What: /sys/class/uacce/<dev_name>/region_mmio_size
|
||||
Date: Feb 2020
|
||||
KernelVersion: 5.7
|
||||
Contact: linux-accelerators@lists.ozlabs.org
|
||||
Description: Size (bytes) of mmio region queue file
|
||||
|
||||
What: /sys/class/uacce/<dev_name>/region_dus_size
|
||||
Date: Feb 2020
|
||||
KernelVersion: 5.7
|
||||
Contact: linux-accelerators@lists.ozlabs.org
|
||||
Description: Size (bytes) of dus region queue file
|
@ -0,0 +1,37 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/crypto/xlnx,zynqmp-aes.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Xilinx ZynqMP AES-GCM Hardware Accelerator Device Tree Bindings
|
||||
|
||||
maintainers:
|
||||
- Kalyani Akula <kalyani.akula@xilinx.com>
|
||||
- Michal Simek <michal.simek@xilinx.com>
|
||||
|
||||
description: |
|
||||
The ZynqMP AES-GCM hardened cryptographic accelerator is used to
|
||||
encrypt or decrypt the data with provided key and initialization vector.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: xlnx,zynqmp-aes
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
firmware {
|
||||
zynqmp_firmware: zynqmp-firmware {
|
||||
compatible = "xlnx,zynqmp-firmware";
|
||||
method = "smc";
|
||||
xlnx_aes: zynqmp-aes {
|
||||
compatible = "xlnx,zynqmp-aes";
|
||||
};
|
||||
};
|
||||
};
|
||||
...
|
176
Documentation/misc-devices/uacce.rst
Normal file
176
Documentation/misc-devices/uacce.rst
Normal file
@ -0,0 +1,176 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
Introduction of Uacce
|
||||
---------------------
|
||||
|
||||
Uacce (Unified/User-space-access-intended Accelerator Framework) targets to
|
||||
provide Shared Virtual Addressing (SVA) between accelerators and processes.
|
||||
So accelerator can access any data structure of the main cpu.
|
||||
This differs from the data sharing between cpu and io device, which share
|
||||
only data content rather than address.
|
||||
Because of the unified address, hardware and user space of process can
|
||||
share the same virtual address in the communication.
|
||||
Uacce takes the hardware accelerator as a heterogeneous processor, while
|
||||
IOMMU share the same CPU page tables and as a result the same translation
|
||||
from va to pa.
|
||||
|
||||
::
|
||||
|
||||
__________________________ __________________________
|
||||
| | | |
|
||||
| User application (CPU) | | Hardware Accelerator |
|
||||
|__________________________| |__________________________|
|
||||
|
||||
| |
|
||||
| va | va
|
||||
V V
|
||||
__________ __________
|
||||
| | | |
|
||||
| MMU | | IOMMU |
|
||||
|__________| |__________|
|
||||
| |
|
||||
| |
|
||||
V pa V pa
|
||||
_______________________________________
|
||||
| |
|
||||
| Memory |
|
||||
|_______________________________________|
|
||||
|
||||
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
Uacce is the kernel module, taking charge of iommu and address sharing.
|
||||
The user drivers and libraries are called WarpDrive.
|
||||
|
||||
The uacce device, built around the IOMMU SVA API, can access multiple
|
||||
address spaces, including the one without PASID.
|
||||
|
||||
A virtual concept, queue, is used for the communication. It provides a
|
||||
FIFO-like interface. And it maintains a unified address space between the
|
||||
application and all involved hardware.
|
||||
|
||||
::
|
||||
|
||||
___________________ ________________
|
||||
| | user API | |
|
||||
| WarpDrive library | ------------> | user driver |
|
||||
|___________________| |________________|
|
||||
| |
|
||||
| |
|
||||
| queue fd |
|
||||
| |
|
||||
| |
|
||||
v |
|
||||
___________________ _________ |
|
||||
| | | | | mmap memory
|
||||
| Other framework | | uacce | | r/w interface
|
||||
| crypto/nic/others | |_________| |
|
||||
|___________________| |
|
||||
| | |
|
||||
| register | register |
|
||||
| | |
|
||||
| | |
|
||||
| _________________ __________ |
|
||||
| | | | | |
|
||||
------------- | Device Driver | | IOMMU | |
|
||||
|_________________| |__________| |
|
||||
| |
|
||||
| V
|
||||
| ___________________
|
||||
| | |
|
||||
-------------------------- | Device(Hardware) |
|
||||
|___________________|
|
||||
|
||||
|
||||
How does it work
|
||||
----------------
|
||||
|
||||
Uacce uses mmap and IOMMU to play the trick.
|
||||
|
||||
Uacce creates a chrdev for every device registered to it. New queue is
|
||||
created when user application open the chrdev. The file descriptor is used
|
||||
as the user handle of the queue.
|
||||
The accelerator device present itself as an Uacce object, which exports as
|
||||
a chrdev to the user space. The user application communicates with the
|
||||
hardware by ioctl (as control path) or share memory (as data path).
|
||||
|
||||
The control path to the hardware is via file operation, while data path is
|
||||
via mmap space of the queue fd.
|
||||
|
||||
The queue file address space:
|
||||
|
||||
::
|
||||
|
||||
/**
|
||||
* enum uacce_qfrt: qfrt type
|
||||
* @UACCE_QFRT_MMIO: device mmio region
|
||||
* @UACCE_QFRT_DUS: device user share region
|
||||
*/
|
||||
enum uacce_qfrt {
|
||||
UACCE_QFRT_MMIO = 0,
|
||||
UACCE_QFRT_DUS = 1,
|
||||
};
|
||||
|
||||
All regions are optional and differ from device type to type.
|
||||
Each region can be mmapped only once, otherwise -EEXIST returns.
|
||||
|
||||
The device mmio region is mapped to the hardware mmio space. It is generally
|
||||
used for doorbell or other notification to the hardware. It is not fast enough
|
||||
as data channel.
|
||||
|
||||
The device user share region is used for share data buffer between user process
|
||||
and device.
|
||||
|
||||
|
||||
The Uacce register API
|
||||
----------------------
|
||||
|
||||
The register API is defined in uacce.h.
|
||||
|
||||
::
|
||||
|
||||
struct uacce_interface {
|
||||
char name[UACCE_MAX_NAME_SIZE];
|
||||
unsigned int flags;
|
||||
const struct uacce_ops *ops;
|
||||
};
|
||||
|
||||
According to the IOMMU capability, uacce_interface flags can be:
|
||||
|
||||
::
|
||||
|
||||
/**
|
||||
* UACCE Device flags:
|
||||
* UACCE_DEV_SVA: Shared Virtual Addresses
|
||||
* Support PASID
|
||||
* Support device page faults (PCI PRI or SMMU Stall)
|
||||
*/
|
||||
#define UACCE_DEV_SVA BIT(0)
|
||||
|
||||
struct uacce_device *uacce_alloc(struct device *parent,
|
||||
struct uacce_interface *interface);
|
||||
int uacce_register(struct uacce_device *uacce);
|
||||
void uacce_remove(struct uacce_device *uacce);
|
||||
|
||||
uacce_register results can be:
|
||||
|
||||
a. If uacce module is not compiled, ERR_PTR(-ENODEV)
|
||||
|
||||
b. Succeed with the desired flags
|
||||
|
||||
c. Succeed with the negotiated flags, for example
|
||||
|
||||
uacce_interface.flags = UACCE_DEV_SVA but uacce->flags = ~UACCE_DEV_SVA
|
||||
|
||||
So user driver need check return value as well as the negotiated uacce->flags.
|
||||
|
||||
|
||||
The user driver
|
||||
---------------
|
||||
|
||||
The queue file mmap space will need a user driver to wrap the communication
|
||||
protocol. Uacce provides some attributes in sysfs for the user driver to
|
||||
match the right accelerator accordingly.
|
||||
More details in Documentation/ABI/testing/sysfs-driver-uacce.
|
17
MAINTAINERS
17
MAINTAINERS
@ -4577,7 +4577,9 @@ S: Supported
|
||||
F: drivers/scsi/cxgbi/cxgb3i
|
||||
|
||||
CXGB4 CRYPTO DRIVER (chcr)
|
||||
M: Atul Gupta <atul.gupta@chelsio.com>
|
||||
M: Ayush Sawal <ayush.sawal@chelsio.com>
|
||||
M: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
|
||||
M: Rohit Maheshwari <rohitm@chelsio.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
W: http://www.chelsio.com
|
||||
S: Supported
|
||||
@ -10066,6 +10068,7 @@ F: Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt
|
||||
MARVELL CRYPTO DRIVER
|
||||
M: Boris Brezillon <bbrezillon@kernel.org>
|
||||
M: Arnaud Ebalard <arno@natisbad.org>
|
||||
M: Srujana Challa <schalla@marvell.com>
|
||||
F: drivers/crypto/marvell/
|
||||
S: Maintained
|
||||
L: linux-crypto@vger.kernel.org
|
||||
@ -17139,6 +17142,18 @@ W: http://linuxtv.org
|
||||
S: Maintained
|
||||
F: drivers/media/pci/tw686x/
|
||||
|
||||
UACCE ACCELERATOR FRAMEWORK
|
||||
M: Zhangfei Gao <zhangfei.gao@linaro.org>
|
||||
M: Zhou Wang <wangzhou1@hisilicon.com>
|
||||
L: linux-accelerators@lists.ozlabs.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-driver-uacce
|
||||
F: Documentation/misc-devices/uacce.rst
|
||||
F: drivers/misc/uacce/
|
||||
F: include/linux/uacce.h
|
||||
F: include/uapi/misc/uacce/
|
||||
|
||||
UBI FILE SYSTEM (UBIFS)
|
||||
M: Richard Weinberger <richard@nod.at>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
|
1
arch/arm/crypto/.gitignore
vendored
1
arch/arm/crypto/.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
aesbs-core.S
|
||||
sha256-core.S
|
||||
sha512-core.S
|
||||
poly1305-core.S
|
||||
|
@ -138,6 +138,7 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
kernel_neon_begin();
|
||||
aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
|
||||
kernel_neon_end();
|
||||
memzero_explicit(&rk, sizeof(rk));
|
||||
|
||||
return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len);
|
||||
}
|
||||
|
@ -8,6 +8,9 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.arch armv8-a
|
||||
.fpu crypto-neon-fp-armv8
|
||||
|
||||
SHASH .req q0
|
||||
T1 .req q1
|
||||
XL .req q2
|
||||
@ -88,8 +91,6 @@
|
||||
T3_H .req d17
|
||||
|
||||
.text
|
||||
.arch armv8-a
|
||||
.fpu crypto-neon-fp-armv8
|
||||
|
||||
.macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4
|
||||
vmull.p64 \rd, \rn, \rm
|
||||
|
1
arch/arm64/crypto/.gitignore
vendored
1
arch/arm64/crypto/.gitignore
vendored
@ -1,2 +1,3 @@
|
||||
sha256-core.S
|
||||
sha512-core.S
|
||||
poly1305-core.S
|
||||
|
@ -151,6 +151,7 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
kernel_neon_begin();
|
||||
aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
|
||||
kernel_neon_end();
|
||||
memzero_explicit(&rk, sizeof(rk));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -91,12 +91,32 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
|
||||
return sha1_base_finish(desc, out);
|
||||
}
|
||||
|
||||
static int sha1_ce_export(struct shash_desc *desc, void *out)
|
||||
{
|
||||
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
memcpy(out, &sctx->sst, sizeof(struct sha1_state));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sha1_ce_import(struct shash_desc *desc, const void *in)
|
||||
{
|
||||
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
memcpy(&sctx->sst, in, sizeof(struct sha1_state));
|
||||
sctx->finalize = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg alg = {
|
||||
.init = sha1_base_init,
|
||||
.update = sha1_ce_update,
|
||||
.final = sha1_ce_final,
|
||||
.finup = sha1_ce_finup,
|
||||
.import = sha1_ce_import,
|
||||
.export = sha1_ce_export,
|
||||
.descsize = sizeof(struct sha1_ce_state),
|
||||
.statesize = sizeof(struct sha1_state),
|
||||
.digestsize = SHA1_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "sha1",
|
||||
|
@ -109,12 +109,32 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
|
||||
return sha256_base_finish(desc, out);
|
||||
}
|
||||
|
||||
static int sha256_ce_export(struct shash_desc *desc, void *out)
|
||||
{
|
||||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
memcpy(out, &sctx->sst, sizeof(struct sha256_state));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sha256_ce_import(struct shash_desc *desc, const void *in)
|
||||
{
|
||||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
memcpy(&sctx->sst, in, sizeof(struct sha256_state));
|
||||
sctx->finalize = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg algs[] = { {
|
||||
.init = sha224_base_init,
|
||||
.update = sha256_ce_update,
|
||||
.final = sha256_ce_final,
|
||||
.finup = sha256_ce_finup,
|
||||
.export = sha256_ce_export,
|
||||
.import = sha256_ce_import,
|
||||
.descsize = sizeof(struct sha256_ce_state),
|
||||
.statesize = sizeof(struct sha256_state),
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "sha224",
|
||||
@ -128,7 +148,10 @@ static struct shash_alg algs[] = { {
|
||||
.update = sha256_ce_update,
|
||||
.final = sha256_ce_final,
|
||||
.finup = sha256_ce_finup,
|
||||
.export = sha256_ce_export,
|
||||
.import = sha256_ce_import,
|
||||
.descsize = sizeof(struct sha256_ce_state),
|
||||
.statesize = sizeof(struct sha256_state),
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "sha256",
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -821,8 +821,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
struct af_alg_tsgl *sgl;
|
||||
struct af_alg_control con = {};
|
||||
long copied = 0;
|
||||
bool enc = 0;
|
||||
bool init = 0;
|
||||
bool enc = false;
|
||||
bool init = false;
|
||||
int err = 0;
|
||||
|
||||
if (msg->msg_controllen) {
|
||||
@ -830,13 +830,13 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
init = 1;
|
||||
init = true;
|
||||
switch (con.op) {
|
||||
case ALG_OP_ENCRYPT:
|
||||
enc = 1;
|
||||
enc = true;
|
||||
break;
|
||||
case ALG_OP_DECRYPT:
|
||||
enc = 0;
|
||||
enc = false;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -83,7 +83,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ctx->more = 0;
|
||||
ctx->more = false;
|
||||
|
||||
while (msg_data_left(msg)) {
|
||||
int len = msg_data_left(msg);
|
||||
@ -211,7 +211,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
}
|
||||
|
||||
if (!result || ctx->more) {
|
||||
ctx->more = 0;
|
||||
ctx->more = false;
|
||||
err = crypto_wait_req(crypto_ahash_final(&ctx->req),
|
||||
&ctx->wait);
|
||||
if (err)
|
||||
@ -436,7 +436,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
|
||||
|
||||
ctx->result = NULL;
|
||||
ctx->len = len;
|
||||
ctx->more = 0;
|
||||
ctx->more = false;
|
||||
crypto_init_wait(&ctx->wait);
|
||||
|
||||
ask->private = ctx;
|
||||
|
@ -458,7 +458,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
|
||||
inst->alg.encrypt = crypto_authenc_esn_encrypt;
|
||||
inst->alg.decrypt = crypto_authenc_esn_decrypt;
|
||||
|
||||
inst->free = crypto_authenc_esn_free,
|
||||
inst->free = crypto_authenc_esn_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
|
29
crypto/ccm.c
29
crypto/ccm.c
@ -717,7 +717,6 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
|
||||
struct aead_instance *inst;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct aead_alg *alg;
|
||||
const char *ccm_name;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
@ -729,19 +728,15 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
ccm_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(ccm_name))
|
||||
return PTR_ERR(ccm_name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
|
||||
spawn = aead_instance_ctx(inst);
|
||||
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
|
||||
ccm_name, 0, mask);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
goto err_free_inst;
|
||||
|
||||
alg = crypto_spawn_aead_alg(spawn);
|
||||
|
||||
@ -749,11 +744,11 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
|
||||
|
||||
/* We only support 16-byte blocks. */
|
||||
if (crypto_aead_alg_ivsize(alg) != 16)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
if (alg->base.cra_blocksize != 1)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
@ -762,7 +757,7 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
|
||||
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4309(%s)", alg->base.cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
@ -786,17 +781,11 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
|
||||
inst->free = crypto_rfc4309_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_drop_alg;
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
crypto_rfc4309_free(inst);
|
||||
}
|
||||
return err;
|
||||
|
||||
out_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent,
|
||||
|
@ -369,7 +369,6 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
|
||||
struct skcipherd_instance_ctx *ctx;
|
||||
struct skcipher_instance *inst;
|
||||
struct skcipher_alg *alg;
|
||||
const char *name;
|
||||
u32 type;
|
||||
u32 mask;
|
||||
int err;
|
||||
@ -379,10 +378,6 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
|
||||
|
||||
cryptd_check_internal(tb, &type, &mask);
|
||||
|
||||
name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(name))
|
||||
return PTR_ERR(name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
@ -391,14 +386,14 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
|
||||
ctx->queue = queue;
|
||||
|
||||
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
|
||||
name, type, mask);
|
||||
crypto_attr_alg_name(tb[1]), type, mask);
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
goto err_free_inst;
|
||||
|
||||
alg = crypto_spawn_skcipher_alg(&ctx->spawn);
|
||||
err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
|
||||
if (err)
|
||||
goto out_drop_skcipher;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
|
||||
@ -421,10 +416,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
out_drop_skcipher:
|
||||
crypto_drop_skcipher(&ctx->spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
err_free_inst:
|
||||
cryptd_skcipher_free(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@ -694,8 +687,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
err = ahash_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
crypto_drop_shash(&ctx->spawn);
|
||||
kfree(inst);
|
||||
cryptd_hash_free(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@ -833,17 +825,12 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
|
||||
struct aead_instance_ctx *ctx;
|
||||
struct aead_instance *inst;
|
||||
struct aead_alg *alg;
|
||||
const char *name;
|
||||
u32 type = 0;
|
||||
u32 mask = CRYPTO_ALG_ASYNC;
|
||||
int err;
|
||||
|
||||
cryptd_check_internal(tb, &type, &mask);
|
||||
|
||||
name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(name))
|
||||
return PTR_ERR(name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
@ -852,14 +839,14 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
|
||||
ctx->queue = queue;
|
||||
|
||||
err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
|
||||
name, type, mask);
|
||||
crypto_attr_alg_name(tb[1]), type, mask);
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
goto err_free_inst;
|
||||
|
||||
alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
|
||||
err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
|
||||
if (err)
|
||||
goto out_drop_aead;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
|
||||
@ -879,10 +866,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
out_drop_aead:
|
||||
crypto_drop_aead(&ctx->aead_spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
err_free_inst:
|
||||
cryptd_aead_free(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
29
crypto/ctr.c
29
crypto/ctr.c
@ -260,7 +260,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
|
||||
struct skcipher_instance *inst;
|
||||
struct skcipher_alg *alg;
|
||||
struct crypto_skcipher_spawn *spawn;
|
||||
const char *cipher_name;
|
||||
u32 mask;
|
||||
|
||||
int err;
|
||||
@ -272,10 +271,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
||||
return -EINVAL;
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(cipher_name))
|
||||
return PTR_ERR(cipher_name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
@ -287,7 +282,7 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
|
||||
spawn = skcipher_instance_ctx(inst);
|
||||
|
||||
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
|
||||
cipher_name, 0, mask);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
@ -296,20 +291,20 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
|
||||
/* We only support 16-byte blocks. */
|
||||
err = -EINVAL;
|
||||
if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
if (alg->base.cra_blocksize != 1)
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc3686(%s)", alg->base.cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
@ -336,17 +331,11 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
|
||||
inst->free = crypto_rfc3686_free;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
err_drop_spawn:
|
||||
crypto_drop_skcipher(spawn);
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
crypto_rfc3686_free(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_ctr_tmpls[] = {
|
||||
|
27
crypto/cts.c
27
crypto/cts.c
@ -327,7 +327,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
struct skcipher_instance *inst;
|
||||
struct crypto_attr_type *algt;
|
||||
struct skcipher_alg *alg;
|
||||
const char *cipher_name;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
@ -340,10 +339,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(cipher_name))
|
||||
return PTR_ERR(cipher_name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
@ -351,7 +346,7 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
spawn = skcipher_instance_ctx(inst);
|
||||
|
||||
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
|
||||
cipher_name, 0, mask);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
@ -359,15 +354,15 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
err = -EINVAL;
|
||||
if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize)
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
if (strncmp(alg->base.cra_name, "cbc(", 4))
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cts",
|
||||
&alg->base);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
@ -391,17 +386,11 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
inst->free = crypto_cts_free;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
err_drop_spawn:
|
||||
crypto_drop_skcipher(spawn);
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
crypto_cts_free(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_cts_tmpl = {
|
||||
|
66
crypto/gcm.c
66
crypto/gcm.c
@ -840,7 +840,6 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
struct aead_instance *inst;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct aead_alg *alg;
|
||||
const char *ccm_name;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
@ -852,19 +851,15 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
ccm_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(ccm_name))
|
||||
return PTR_ERR(ccm_name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
|
||||
spawn = aead_instance_ctx(inst);
|
||||
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
|
||||
ccm_name, 0, mask);
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
goto err_free_inst;
|
||||
|
||||
alg = crypto_spawn_aead_alg(spawn);
|
||||
|
||||
@ -872,11 +867,11 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
|
||||
/* Underlying IV size must be 12. */
|
||||
if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
if (alg->base.cra_blocksize != 1)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
@ -885,7 +880,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4106(%s)", alg->base.cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
@ -909,17 +904,11 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
inst->free = crypto_rfc4106_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_drop_alg;
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
crypto_rfc4106_free(inst);
|
||||
}
|
||||
return err;
|
||||
|
||||
out_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
|
||||
@ -1071,10 +1060,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct aead_instance *inst;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct aead_alg *alg;
|
||||
struct crypto_rfc4543_instance_ctx *ctx;
|
||||
const char *ccm_name;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
@ -1086,32 +1073,27 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
ccm_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(ccm_name))
|
||||
return PTR_ERR(ccm_name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx = aead_instance_ctx(inst);
|
||||
spawn = &ctx->aead;
|
||||
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
|
||||
ccm_name, 0, mask);
|
||||
err = crypto_grab_aead(&ctx->aead, aead_crypto_instance(inst),
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
goto err_free_inst;
|
||||
|
||||
alg = crypto_spawn_aead_alg(spawn);
|
||||
alg = crypto_spawn_aead_alg(&ctx->aead);
|
||||
|
||||
err = -EINVAL;
|
||||
|
||||
/* Underlying IV size must be 12. */
|
||||
if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
if (alg->base.cra_blocksize != 1)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
@ -1120,7 +1102,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4543(%s)", alg->base.cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
@ -1141,20 +1123,14 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
inst->alg.encrypt = crypto_rfc4543_encrypt;
|
||||
inst->alg.decrypt = crypto_rfc4543_decrypt;
|
||||
|
||||
inst->free = crypto_rfc4543_free,
|
||||
inst->free = crypto_rfc4543_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_drop_alg;
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
crypto_rfc4543_free(inst);
|
||||
}
|
||||
return err;
|
||||
|
||||
out_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_gcm_tmpls[] = {
|
||||
|
@ -41,7 +41,6 @@ static void aead_geniv_free(struct aead_instance *inst)
|
||||
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
struct rtattr **tb, u32 type, u32 mask)
|
||||
{
|
||||
const char *name;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct crypto_attr_type *algt;
|
||||
struct aead_instance *inst;
|
||||
@ -57,10 +56,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(name))
|
||||
return ERR_CAST(name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -71,7 +66,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
mask |= crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
|
||||
name, type, mask);
|
||||
crypto_attr_alg_name(tb[1]), type, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
@ -82,17 +77,17 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
|
||||
err = -EINVAL;
|
||||
if (ivsize < sizeof(u64))
|
||||
goto err_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->base.cra_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_alg;
|
||||
goto err_free_inst;
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->base.cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
@ -111,10 +106,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
out:
|
||||
return inst;
|
||||
|
||||
err_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
aead_geniv_free(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
28
crypto/lrw.c
28
crypto/lrw.c
@ -343,15 +343,15 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
err = -EINVAL;
|
||||
if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
if (crypto_skcipher_alg_ivsize(alg))
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
|
||||
&alg->base);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
err = -EINVAL;
|
||||
cipher_name = alg->base.cra_name;
|
||||
@ -364,20 +364,20 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
|
||||
if (len < 2 || len >= sizeof(ecb_name))
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
if (ecb_name[len - 1] != ')')
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
ecb_name[len - 1] = 0;
|
||||
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
|
||||
err = -ENAMETOOLONG;
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
}
|
||||
} else
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
@ -403,17 +403,11 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
inst->free = free;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
err_drop_spawn:
|
||||
crypto_drop_skcipher(spawn);
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
free(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_tmpl = {
|
||||
|
@ -23,9 +23,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define MD5_DIGEST_WORDS 4
|
||||
#define MD5_MESSAGE_BYTES 64
|
||||
|
||||
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
|
||||
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
|
||||
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
|
||||
|
@ -232,17 +232,12 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
struct crypto_attr_type *algt;
|
||||
struct aead_instance *inst;
|
||||
struct aead_alg *alg;
|
||||
const char *name;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(name))
|
||||
return PTR_ERR(name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
@ -252,21 +247,21 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
ctx = aead_instance_ctx(inst);
|
||||
ctx->psenc = padata_alloc_shell(pencrypt);
|
||||
if (!ctx->psenc)
|
||||
goto out_free_inst;
|
||||
goto err_free_inst;
|
||||
|
||||
ctx->psdec = padata_alloc_shell(pdecrypt);
|
||||
if (!ctx->psdec)
|
||||
goto out_free_psenc;
|
||||
goto err_free_inst;
|
||||
|
||||
err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst),
|
||||
name, 0, 0);
|
||||
crypto_attr_alg_name(tb[1]), 0, 0);
|
||||
if (err)
|
||||
goto out_free_psdec;
|
||||
goto err_free_inst;
|
||||
|
||||
alg = crypto_spawn_aead_alg(&ctx->spawn);
|
||||
err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
|
||||
if (err)
|
||||
goto out_drop_aead;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
|
||||
|
||||
@ -286,21 +281,11 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
inst->free = pcrypt_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_drop_aead;
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
pcrypt_free(inst);
|
||||
}
|
||||
return err;
|
||||
|
||||
out_drop_aead:
|
||||
crypto_drop_aead(&ctx->spawn);
|
||||
out_free_psdec:
|
||||
padata_free_shell(ctx->psdec);
|
||||
out_free_psenc:
|
||||
padata_free_shell(ctx->psenc);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
@ -60,7 +60,7 @@ static int c_show(struct seq_file *m, void *p)
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
|
||||
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_CIPHER:
|
||||
seq_printf(m, "type : cipher\n");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
|
@ -37,12 +37,16 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
|
||||
crypto_stats_get(alg);
|
||||
if (!seed && slen) {
|
||||
buf = kmalloc(slen, GFP_KERNEL);
|
||||
if (!buf)
|
||||
if (!buf) {
|
||||
crypto_alg_put(alg);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = get_random_bytes_wait(buf, slen);
|
||||
if (err)
|
||||
if (err) {
|
||||
crypto_alg_put(alg);
|
||||
goto out;
|
||||
}
|
||||
seed = buf;
|
||||
}
|
||||
|
||||
|
@ -596,14 +596,11 @@ static void pkcs1pad_free(struct akcipher_instance *inst)
|
||||
|
||||
static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
const struct rsa_asn1_template *digest_info;
|
||||
struct crypto_attr_type *algt;
|
||||
u32 mask;
|
||||
struct akcipher_instance *inst;
|
||||
struct pkcs1pad_inst_ctx *ctx;
|
||||
struct crypto_akcipher_spawn *spawn;
|
||||
struct akcipher_alg *rsa_alg;
|
||||
const char *rsa_alg_name;
|
||||
const char *hash_name;
|
||||
int err;
|
||||
|
||||
@ -616,60 +613,49 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
mask = crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
rsa_alg_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(rsa_alg_name))
|
||||
return PTR_ERR(rsa_alg_name);
|
||||
|
||||
hash_name = crypto_attr_alg_name(tb[2]);
|
||||
if (IS_ERR(hash_name))
|
||||
hash_name = NULL;
|
||||
|
||||
if (hash_name) {
|
||||
digest_info = rsa_lookup_asn1(hash_name);
|
||||
if (!digest_info)
|
||||
return -EINVAL;
|
||||
} else
|
||||
digest_info = NULL;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx = akcipher_instance_ctx(inst);
|
||||
spawn = &ctx->spawn;
|
||||
ctx->digest_info = digest_info;
|
||||
|
||||
err = crypto_grab_akcipher(spawn, akcipher_crypto_instance(inst),
|
||||
rsa_alg_name, 0, mask);
|
||||
err = crypto_grab_akcipher(&ctx->spawn, akcipher_crypto_instance(inst),
|
||||
crypto_attr_alg_name(tb[1]), 0, mask);
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
goto err_free_inst;
|
||||
|
||||
rsa_alg = crypto_spawn_akcipher_alg(spawn);
|
||||
rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn);
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
|
||||
if (!hash_name) {
|
||||
hash_name = crypto_attr_alg_name(tb[2]);
|
||||
if (IS_ERR(hash_name)) {
|
||||
if (snprintf(inst->alg.base.cra_name,
|
||||
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
|
||||
rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
if (snprintf(inst->alg.base.cra_driver_name,
|
||||
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
|
||||
rsa_alg->base.cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
} else {
|
||||
ctx->digest_info = rsa_lookup_asn1(hash_name);
|
||||
if (!ctx->digest_info) {
|
||||
err = -EINVAL;
|
||||
goto err_free_inst;
|
||||
}
|
||||
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
|
||||
hash_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
|
||||
if (snprintf(inst->alg.base.cra_driver_name,
|
||||
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
|
||||
rsa_alg->base.cra_driver_name,
|
||||
hash_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
goto err_free_inst;
|
||||
}
|
||||
|
||||
inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
@ -691,15 +677,10 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
inst->free = pkcs1pad_free;
|
||||
|
||||
err = akcipher_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_drop_alg;
|
||||
|
||||
return 0;
|
||||
|
||||
out_drop_alg:
|
||||
crypto_drop_akcipher(spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
pkcs1pad_free(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1514,8 +1514,8 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
|
||||
return;
|
||||
}
|
||||
|
||||
pr_info("\ntesting speed of async %s (%s) %s\n", algo,
|
||||
get_driver_name(crypto_skcipher, tfm), e);
|
||||
pr_info("\ntesting speed of %s %s (%s) %s\n", async ? "async" : "sync",
|
||||
algo, get_driver_name(crypto_skcipher, tfm), e);
|
||||
|
||||
req = skcipher_request_alloc(tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
|
@ -91,10 +91,11 @@ struct aead_test_suite {
|
||||
unsigned int einval_allowed : 1;
|
||||
|
||||
/*
|
||||
* Set if the algorithm intentionally ignores the last 8 bytes of the
|
||||
* AAD buffer during decryption.
|
||||
* Set if this algorithm requires that the IV be located at the end of
|
||||
* the AAD buffer, in addition to being given in the normal way. The
|
||||
* behavior when the two IV copies differ is implementation-defined.
|
||||
*/
|
||||
unsigned int esp_aad : 1;
|
||||
unsigned int aad_iv : 1;
|
||||
};
|
||||
|
||||
struct cipher_test_suite {
|
||||
@ -2167,9 +2168,10 @@ struct aead_extra_tests_ctx {
|
||||
* here means the full ciphertext including the authentication tag. The
|
||||
* authentication tag (and hence also the ciphertext) is assumed to be nonempty.
|
||||
*/
|
||||
static void mutate_aead_message(struct aead_testvec *vec, bool esp_aad)
|
||||
static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
|
||||
unsigned int ivsize)
|
||||
{
|
||||
const unsigned int aad_tail_size = esp_aad ? 8 : 0;
|
||||
const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
|
||||
const unsigned int authsize = vec->clen - vec->plen;
|
||||
|
||||
if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) {
|
||||
@ -2207,6 +2209,9 @@ static void generate_aead_message(struct aead_request *req,
|
||||
|
||||
/* Generate the AAD. */
|
||||
generate_random_bytes((u8 *)vec->assoc, vec->alen);
|
||||
if (suite->aad_iv && vec->alen >= ivsize)
|
||||
/* Avoid implementation-defined behavior. */
|
||||
memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
|
||||
|
||||
if (inauthentic && prandom_u32() % 2 == 0) {
|
||||
/* Generate a random ciphertext. */
|
||||
@ -2242,7 +2247,7 @@ static void generate_aead_message(struct aead_request *req,
|
||||
* Mutate the authentic (ciphertext, AAD) pair to get an
|
||||
* inauthentic one.
|
||||
*/
|
||||
mutate_aead_message(vec, suite->esp_aad);
|
||||
mutate_aead_message(vec, suite->aad_iv, ivsize);
|
||||
}
|
||||
vec->novrfy = 1;
|
||||
if (suite->einval_allowed)
|
||||
@ -2507,11 +2512,11 @@ static int test_aead_extra(const char *driver,
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = test_aead_inauthentic_inputs(ctx);
|
||||
err = test_aead_vs_generic_impl(ctx);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = test_aead_vs_generic_impl(ctx);
|
||||
err = test_aead_inauthentic_inputs(ctx);
|
||||
out:
|
||||
kfree(ctx->vec.key);
|
||||
kfree(ctx->vec.iv);
|
||||
@ -5229,7 +5234,7 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.aead = {
|
||||
____VECS(aes_gcm_rfc4106_tv_template),
|
||||
.einval_allowed = 1,
|
||||
.esp_aad = 1,
|
||||
.aad_iv = 1,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
@ -5241,7 +5246,7 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.aead = {
|
||||
____VECS(aes_ccm_rfc4309_tv_template),
|
||||
.einval_allowed = 1,
|
||||
.esp_aad = 1,
|
||||
.aad_iv = 1,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
@ -5252,6 +5257,7 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.aead = {
|
||||
____VECS(aes_gcm_rfc4543_tv_template),
|
||||
.einval_allowed = 1,
|
||||
.aad_iv = 1,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
@ -5267,7 +5273,7 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.aead = {
|
||||
____VECS(rfc7539esp_tv_template),
|
||||
.einval_allowed = 1,
|
||||
.esp_aad = 1,
|
||||
.aad_iv = 1,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
|
28
crypto/xts.c
28
crypto/xts.c
@ -379,15 +379,15 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
err = -EINVAL;
|
||||
if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
if (crypto_skcipher_alg_ivsize(alg))
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
|
||||
&alg->base);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
err = -EINVAL;
|
||||
cipher_name = alg->base.cra_name;
|
||||
@ -400,20 +400,20 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
|
||||
if (len < 2 || len >= sizeof(ctx->name))
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
if (ctx->name[len - 1] != ')')
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
ctx->name[len - 1] = 0;
|
||||
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
|
||||
err = -ENAMETOOLONG;
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
}
|
||||
} else
|
||||
goto err_drop_spawn;
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
@ -437,17 +437,11 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
inst->free = free;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
err_drop_spawn:
|
||||
crypto_drop_skcipher(&ctx->spawn);
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
free(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_tmpl = {
|
||||
|
@ -26,6 +26,8 @@
|
||||
*/
|
||||
#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
|
||||
|
||||
static struct fsl_mc_version mc_version;
|
||||
|
||||
/**
|
||||
* struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
|
||||
* @root_mc_bus_dev: fsl-mc device representing the root DPRC
|
||||
@ -54,20 +56,6 @@ struct fsl_mc_addr_translation_range {
|
||||
phys_addr_t start_phys_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mc_version
|
||||
* @major: Major version number: incremented on API compatibility changes
|
||||
* @minor: Minor version number: incremented on API additions (that are
|
||||
* backward compatible); reset when major version is incremented
|
||||
* @revision: Internal revision number: incremented on implementation changes
|
||||
* and/or bug fixes that have no impact on API
|
||||
*/
|
||||
struct mc_version {
|
||||
u32 major;
|
||||
u32 minor;
|
||||
u32 revision;
|
||||
};
|
||||
|
||||
/**
|
||||
* fsl_mc_bus_match - device to driver matching callback
|
||||
* @dev: the fsl-mc device to match against
|
||||
@ -338,7 +326,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
|
||||
*/
|
||||
static int mc_get_version(struct fsl_mc_io *mc_io,
|
||||
u32 cmd_flags,
|
||||
struct mc_version *mc_ver_info)
|
||||
struct fsl_mc_version *mc_ver_info)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpmng_rsp_get_version *rsp_params;
|
||||
@ -363,6 +351,20 @@ static int mc_get_version(struct fsl_mc_io *mc_io,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fsl_mc_get_version - function to retrieve the MC f/w version information
|
||||
*
|
||||
* Return: mc version when called after fsl-mc-bus probe; NULL otherwise.
|
||||
*/
|
||||
struct fsl_mc_version *fsl_mc_get_version(void)
|
||||
{
|
||||
if (mc_version.major)
|
||||
return &mc_version;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_mc_get_version);
|
||||
|
||||
/**
|
||||
* fsl_mc_get_root_dprc - function to traverse to the root dprc
|
||||
*/
|
||||
@ -862,7 +864,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
|
||||
int container_id;
|
||||
phys_addr_t mc_portal_phys_addr;
|
||||
u32 mc_portal_size;
|
||||
struct mc_version mc_version;
|
||||
struct resource res;
|
||||
|
||||
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
|
||||
|
@ -244,7 +244,8 @@ config HW_RANDOM_MXC_RNGA
|
||||
|
||||
config HW_RANDOM_IMX_RNGC
|
||||
tristate "Freescale i.MX RNGC Random Number Generator"
|
||||
depends on ARCH_MXC
|
||||
depends on HAS_IOMEM && HAVE_CLK
|
||||
depends on SOC_IMX25 || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
---help---
|
||||
This driver provides kernel-side support for the Random Number
|
||||
@ -466,6 +467,13 @@ config HW_RANDOM_NPCM
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_KEYSTONE
|
||||
depends on ARCH_KEYSTONE || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
tristate "TI Keystone NETCP SA Hardware random number generator"
|
||||
help
|
||||
This option enables Keystone's hardware random generator.
|
||||
|
||||
endif # HW_RANDOM
|
||||
|
||||
config UML_RANDOM
|
||||
@ -482,10 +490,3 @@ config UML_RANDOM
|
||||
(check your distro, or download from
|
||||
http://sourceforge.net/projects/gkernel/). rngd periodically reads
|
||||
/dev/hwrng and injects the entropy into /dev/random.
|
||||
|
||||
config HW_RANDOM_KEYSTONE
|
||||
depends on ARCH_KEYSTONE || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
tristate "TI Keystone NETCP SA Hardware random number generator"
|
||||
help
|
||||
This option enables Keystone's hardware random generator.
|
||||
|
@ -18,12 +18,22 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#define RNGC_VER_ID 0x0000
|
||||
#define RNGC_COMMAND 0x0004
|
||||
#define RNGC_CONTROL 0x0008
|
||||
#define RNGC_STATUS 0x000C
|
||||
#define RNGC_ERROR 0x0010
|
||||
#define RNGC_FIFO 0x0014
|
||||
|
||||
/* the fields in the ver id register */
|
||||
#define RNGC_TYPE_SHIFT 28
|
||||
#define RNGC_VER_MAJ_SHIFT 8
|
||||
|
||||
/* the rng_type field */
|
||||
#define RNGC_TYPE_RNGB 0x1
|
||||
#define RNGC_TYPE_RNGC 0x2
|
||||
|
||||
|
||||
#define RNGC_CMD_CLR_ERR 0x00000020
|
||||
#define RNGC_CMD_CLR_INT 0x00000010
|
||||
#define RNGC_CMD_SEED 0x00000002
|
||||
@ -31,6 +41,7 @@
|
||||
|
||||
#define RNGC_CTRL_MASK_ERROR 0x00000040
|
||||
#define RNGC_CTRL_MASK_DONE 0x00000020
|
||||
#define RNGC_CTRL_AUTO_SEED 0x00000010
|
||||
|
||||
#define RNGC_STATUS_ERROR 0x00010000
|
||||
#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00
|
||||
@ -100,15 +111,11 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
|
||||
writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
|
||||
|
||||
ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
|
||||
if (!ret) {
|
||||
imx_rngc_irq_mask_clear(rngc);
|
||||
imx_rngc_irq_mask_clear(rngc);
|
||||
if (!ret)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (rngc->err_reg != 0)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
return rngc->err_reg ? -EIO : 0;
|
||||
}
|
||||
|
||||
static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
@ -165,17 +172,17 @@ static irqreturn_t imx_rngc_irq(int irq, void *priv)
|
||||
static int imx_rngc_init(struct hwrng *rng)
|
||||
{
|
||||
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
|
||||
u32 cmd;
|
||||
u32 cmd, ctrl;
|
||||
int ret;
|
||||
|
||||
/* clear error */
|
||||
cmd = readl(rngc->base + RNGC_COMMAND);
|
||||
writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND);
|
||||
|
||||
imx_rngc_irq_unmask(rngc);
|
||||
|
||||
/* create seed, repeat while there is some statistical error */
|
||||
do {
|
||||
imx_rngc_irq_unmask(rngc);
|
||||
|
||||
/* seed creation */
|
||||
cmd = readl(rngc->base + RNGC_COMMAND);
|
||||
writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
|
||||
@ -184,13 +191,42 @@ static int imx_rngc_init(struct hwrng *rng)
|
||||
RNGC_TIMEOUT);
|
||||
|
||||
if (!ret) {
|
||||
imx_rngc_irq_mask_clear(rngc);
|
||||
return -ETIMEDOUT;
|
||||
ret = -ETIMEDOUT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
} while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
|
||||
|
||||
return rngc->err_reg ? -EIO : 0;
|
||||
if (rngc->err_reg) {
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* enable automatic seeding, the rngc creates a new seed automatically
|
||||
* after serving 2^20 random 160-bit words
|
||||
*/
|
||||
ctrl = readl(rngc->base + RNGC_CONTROL);
|
||||
ctrl |= RNGC_CTRL_AUTO_SEED;
|
||||
writel(ctrl, rngc->base + RNGC_CONTROL);
|
||||
|
||||
/*
|
||||
* if initialisation was successful, we keep the interrupt
|
||||
* unmasked until imx_rngc_cleanup is called
|
||||
* we mask the interrupt ourselves if we return an error
|
||||
*/
|
||||
return 0;
|
||||
|
||||
err:
|
||||
imx_rngc_irq_mask_clear(rngc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void imx_rngc_cleanup(struct hwrng *rng)
|
||||
{
|
||||
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
|
||||
|
||||
imx_rngc_irq_mask_clear(rngc);
|
||||
}
|
||||
|
||||
static int imx_rngc_probe(struct platform_device *pdev)
|
||||
@ -198,6 +234,8 @@ static int imx_rngc_probe(struct platform_device *pdev)
|
||||
struct imx_rngc *rngc;
|
||||
int ret;
|
||||
int irq;
|
||||
u32 ver_id;
|
||||
u8 rng_type;
|
||||
|
||||
rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL);
|
||||
if (!rngc)
|
||||
@ -223,6 +261,17 @@ static int imx_rngc_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ver_id = readl(rngc->base + RNGC_VER_ID);
|
||||
rng_type = ver_id >> RNGC_TYPE_SHIFT;
|
||||
/*
|
||||
* This driver supports only RNGC and RNGB. (There's a different
|
||||
* driver for RNGA.)
|
||||
*/
|
||||
if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev,
|
||||
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
|
||||
if (ret) {
|
||||
@ -235,6 +284,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
|
||||
rngc->rng.name = pdev->name;
|
||||
rngc->rng.init = imx_rngc_init;
|
||||
rngc->rng.read = imx_rngc_read;
|
||||
rngc->rng.cleanup = imx_rngc_cleanup;
|
||||
|
||||
rngc->dev = &pdev->dev;
|
||||
platform_set_drvdata(pdev, rngc);
|
||||
@ -244,18 +294,21 @@ static int imx_rngc_probe(struct platform_device *pdev)
|
||||
if (self_test) {
|
||||
ret = imx_rngc_self_test(rngc);
|
||||
if (ret) {
|
||||
dev_err(rngc->dev, "FSL RNGC self test failed.\n");
|
||||
dev_err(rngc->dev, "self test failed\n");
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hwrng_register(&rngc->rng);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "FSL RNGC registering failed (%d)\n", ret);
|
||||
dev_err(&pdev->dev, "hwrng registration failed\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "Freescale RNGC registered.\n");
|
||||
dev_info(&pdev->dev,
|
||||
"Freescale RNG%c registered (HW revision %d.%02d)\n",
|
||||
rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
|
||||
(ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -233,20 +233,6 @@ config CRYPTO_CRC32_S390
|
||||
|
||||
It is available with IBM z13 or later.
|
||||
|
||||
config CRYPTO_DEV_MARVELL_CESA
|
||||
tristate "Marvell's Cryptographic Engine driver"
|
||||
depends on PLAT_ORION || ARCH_MVEBU
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_LIB_DES
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_HASH
|
||||
select SRAM
|
||||
help
|
||||
This driver allows you to utilize the Cryptographic Engines and
|
||||
Security Accelerator (CESA) which can be found on MVEBU and ORION
|
||||
platforms.
|
||||
This driver supports CPU offload through DMA transfers.
|
||||
|
||||
config CRYPTO_DEV_NIAGARA2
|
||||
tristate "Niagara2 Stream Processing Unit driver"
|
||||
select CRYPTO_LIB_DES
|
||||
@ -606,6 +592,7 @@ config CRYPTO_DEV_MXS_DCP
|
||||
source "drivers/crypto/qat/Kconfig"
|
||||
source "drivers/crypto/cavium/cpt/Kconfig"
|
||||
source "drivers/crypto/cavium/nitrox/Kconfig"
|
||||
source "drivers/crypto/marvell/Kconfig"
|
||||
|
||||
config CRYPTO_DEV_CAVIUM_ZIP
|
||||
tristate "Cavium ZIP driver"
|
||||
@ -685,6 +672,29 @@ choice
|
||||
|
||||
endchoice
|
||||
|
||||
config CRYPTO_DEV_QCE_SW_MAX_LEN
|
||||
int "Default maximum request size to use software for AES"
|
||||
depends on CRYPTO_DEV_QCE && CRYPTO_DEV_QCE_SKCIPHER
|
||||
default 512
|
||||
help
|
||||
This sets the default maximum request size to perform AES requests
|
||||
using software instead of the crypto engine. It can be changed by
|
||||
setting the aes_sw_max_len parameter.
|
||||
|
||||
Small blocks are processed faster in software than hardware.
|
||||
Considering the 256-bit ciphers, software is 2-3 times faster than
|
||||
qce at 256-bytes, 30% faster at 512, and about even at 768-bytes.
|
||||
With 128-bit keys, the break-even point would be around 1024-bytes.
|
||||
|
||||
The default is set a little lower, to 512 bytes, to balance the
|
||||
cost in CPU usage. The minimum recommended setting is 16-bytes
|
||||
(1 AES block), since AES-GCM will fail if you set it lower.
|
||||
Setting this to zero will send all requests to the hardware.
|
||||
|
||||
Note that 192-bit keys are not supported by the hardware and are
|
||||
always processed by the software fallback, and all DES requests
|
||||
are done by the hardware.
|
||||
|
||||
config CRYPTO_DEV_QCOM_RNG
|
||||
tristate "Qualcomm Random Number Generator Driver"
|
||||
depends on ARCH_QCOM || COMPILE_TEST
|
||||
@ -731,6 +741,18 @@ config CRYPTO_DEV_ROCKCHIP
|
||||
This driver interfaces with the hardware crypto accelerator.
|
||||
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
|
||||
|
||||
config CRYPTO_DEV_ZYNQMP_AES
|
||||
tristate "Support for Xilinx ZynqMP AES hw accelerator"
|
||||
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_ENGINE
|
||||
select CRYPTO_AEAD
|
||||
help
|
||||
Xilinx ZynqMP has AES-GCM engine used for symmetric key
|
||||
encryption and decryption. This driver interfaces with AES hw
|
||||
accelerator. Select this if you want to use the ZynqMP module
|
||||
for AES algorithms.
|
||||
|
||||
config CRYPTO_DEV_MEDIATEK
|
||||
tristate "MediaTek's EIP97 Cryptographic Engine driver"
|
||||
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
|
||||
|
@ -18,7 +18,7 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
|
||||
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
|
||||
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
|
||||
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
|
||||
@ -47,5 +47,6 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
|
||||
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
|
||||
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
|
||||
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
|
||||
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += xilinx/
|
||||
obj-y += hisilicon/
|
||||
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
|
||||
|
@ -565,10 +565,8 @@ static int sun8i_ce_probe(struct platform_device *pdev)
|
||||
|
||||
/* Get Non Secure IRQ */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(ce->dev, "Cannot get CryptoEngine Non-secure IRQ\n");
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
|
||||
ce->reset = devm_reset_control_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(ce->reset)) {
|
||||
|
@ -214,7 +214,7 @@ struct sun8i_cipher_tfm_ctx {
|
||||
* this template
|
||||
* @alg: one of sub struct must be used
|
||||
* @stat_req: number of request done on this template
|
||||
* @stat_fb: total of all data len done on this template
|
||||
* @stat_fb: number of request which has fallbacked
|
||||
*/
|
||||
struct sun8i_ce_alg_template {
|
||||
u32 type;
|
||||
|
@ -186,7 +186,7 @@ struct sun8i_cipher_tfm_ctx {
|
||||
* this template
|
||||
* @alg: one of sub struct must be used
|
||||
* @stat_req: number of request done on this template
|
||||
* @stat_fb: total of all data len done on this template
|
||||
* @stat_fb: number of request which has fallbacked
|
||||
*/
|
||||
struct sun8i_ss_alg_template {
|
||||
u32 type;
|
||||
|
@ -176,7 +176,8 @@ static int atmel_i2c_wakeup(struct i2c_client *client)
|
||||
* device is idle, asleep or during waking up. Don't check for error
|
||||
* when waking up the device.
|
||||
*/
|
||||
i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
|
||||
i2c_transfer_buffer_flags(client, i2c_priv->wake_token,
|
||||
i2c_priv->wake_token_sz, I2C_M_IGNORE_NAK);
|
||||
|
||||
/*
|
||||
* Wait to wake the device. Typical execution times for ecdh and genkey
|
||||
|
@ -366,88 +366,88 @@ static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
|
||||
|
||||
ipriv = filp->private_data;
|
||||
out_offset = 0;
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Number of SPUs.........%u\n",
|
||||
ipriv->spu.num_spu);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Current sessions.......%u\n",
|
||||
atomic_read(&ipriv->session_count));
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Session count..........%u\n",
|
||||
atomic_read(&ipriv->stream_count));
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Cipher setkey..........%u\n",
|
||||
atomic_read(&ipriv->setkey_cnt[SPU_OP_CIPHER]));
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Cipher Ops.............%u\n",
|
||||
atomic_read(&ipriv->op_counts[SPU_OP_CIPHER]));
|
||||
for (alg = 0; alg < CIPHER_ALG_LAST; alg++) {
|
||||
for (mode = 0; mode < CIPHER_MODE_LAST; mode++) {
|
||||
op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]);
|
||||
if (op_cnt) {
|
||||
out_offset += snprintf(buf + out_offset,
|
||||
out_offset += scnprintf(buf + out_offset,
|
||||
out_count - out_offset,
|
||||
" %-13s%11u\n",
|
||||
spu_alg_name(alg, mode), op_cnt);
|
||||
}
|
||||
}
|
||||
}
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Hash Ops...............%u\n",
|
||||
atomic_read(&ipriv->op_counts[SPU_OP_HASH]));
|
||||
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
|
||||
op_cnt = atomic_read(&ipriv->hash_cnt[alg]);
|
||||
if (op_cnt) {
|
||||
out_offset += snprintf(buf + out_offset,
|
||||
out_offset += scnprintf(buf + out_offset,
|
||||
out_count - out_offset,
|
||||
" %-13s%11u\n",
|
||||
hash_alg_name[alg], op_cnt);
|
||||
}
|
||||
}
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"HMAC setkey............%u\n",
|
||||
atomic_read(&ipriv->setkey_cnt[SPU_OP_HMAC]));
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"HMAC Ops...............%u\n",
|
||||
atomic_read(&ipriv->op_counts[SPU_OP_HMAC]));
|
||||
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
|
||||
op_cnt = atomic_read(&ipriv->hmac_cnt[alg]);
|
||||
if (op_cnt) {
|
||||
out_offset += snprintf(buf + out_offset,
|
||||
out_offset += scnprintf(buf + out_offset,
|
||||
out_count - out_offset,
|
||||
" %-13s%11u\n",
|
||||
hash_alg_name[alg], op_cnt);
|
||||
}
|
||||
}
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"AEAD setkey............%u\n",
|
||||
atomic_read(&ipriv->setkey_cnt[SPU_OP_AEAD]));
|
||||
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"AEAD Ops...............%u\n",
|
||||
atomic_read(&ipriv->op_counts[SPU_OP_AEAD]));
|
||||
for (alg = 0; alg < AEAD_TYPE_LAST; alg++) {
|
||||
op_cnt = atomic_read(&ipriv->aead_cnt[alg]);
|
||||
if (op_cnt) {
|
||||
out_offset += snprintf(buf + out_offset,
|
||||
out_offset += scnprintf(buf + out_offset,
|
||||
out_count - out_offset,
|
||||
" %-13s%11u\n",
|
||||
aead_alg_name[alg], op_cnt);
|
||||
}
|
||||
}
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Bytes of req data......%llu\n",
|
||||
(u64)atomic64_read(&ipriv->bytes_out));
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Bytes of resp data.....%llu\n",
|
||||
(u64)atomic64_read(&ipriv->bytes_in));
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Mailbox full...........%u\n",
|
||||
atomic_read(&ipriv->mb_no_spc));
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Mailbox send failures..%u\n",
|
||||
atomic_read(&ipriv->mb_send_fail));
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
|
||||
"Check ICV errors.......%u\n",
|
||||
atomic_read(&ipriv->bad_icv));
|
||||
if (ipriv->spu.spu_type == SPU_TYPE_SPUM)
|
||||
@ -455,7 +455,7 @@ static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
|
||||
spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] +
|
||||
SPU_OFIFO_CTRL);
|
||||
fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK;
|
||||
out_offset += snprintf(buf + out_offset,
|
||||
out_offset += scnprintf(buf + out_offset,
|
||||
out_count - out_offset,
|
||||
"SPU %d output FIFO high water.....%u\n",
|
||||
i, fifo_len);
|
||||
|
@ -13,6 +13,7 @@ config CRYPTO_DEV_FSL_CAAM
|
||||
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
|
||||
select SOC_BUS
|
||||
select CRYPTO_DEV_FSL_CAAM_COMMON
|
||||
imply FSL_MC_BUS
|
||||
help
|
||||
Enables the driver module for Freescale's Cryptographic Accelerator
|
||||
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
|
||||
@ -33,6 +34,7 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
|
||||
|
||||
menuconfig CRYPTO_DEV_FSL_CAAM_JR
|
||||
tristate "Freescale CAAM Job Ring driver backend"
|
||||
select CRYPTO_ENGINE
|
||||
default y
|
||||
help
|
||||
Enables the driver module for Job Rings which are part of
|
||||
|
@ -56,6 +56,7 @@
|
||||
#include "sg_sw_sec4.h"
|
||||
#include "key_gen.h"
|
||||
#include "caamalg_desc.h"
|
||||
#include <crypto/engine.h>
|
||||
|
||||
/*
|
||||
* crypto alg
|
||||
@ -101,6 +102,7 @@ struct caam_skcipher_alg {
|
||||
* per-session context
|
||||
*/
|
||||
struct caam_ctx {
|
||||
struct crypto_engine_ctx enginectx;
|
||||
u32 sh_desc_enc[DESC_MAX_USED_LEN];
|
||||
u32 sh_desc_dec[DESC_MAX_USED_LEN];
|
||||
u8 key[CAAM_MAX_KEY_SIZE];
|
||||
@ -114,6 +116,14 @@ struct caam_ctx {
|
||||
unsigned int authsize;
|
||||
};
|
||||
|
||||
struct caam_skcipher_req_ctx {
|
||||
struct skcipher_edesc *edesc;
|
||||
};
|
||||
|
||||
struct caam_aead_req_ctx {
|
||||
struct aead_edesc *edesc;
|
||||
};
|
||||
|
||||
static int aead_null_set_sh_desc(struct crypto_aead *aead)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
@ -858,6 +868,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
||||
* @mapped_src_nents: number of segments in input h/w link table
|
||||
* @mapped_dst_nents: number of segments in output h/w link table
|
||||
* @sec4_sg_bytes: length of dma mapped sec4_sg space
|
||||
* @bklog: stored to determine if the request needs backlog
|
||||
* @sec4_sg_dma: bus physical mapped address of h/w link table
|
||||
* @sec4_sg: pointer to h/w link table
|
||||
* @hw_desc: the h/w job descriptor followed by any referenced link tables
|
||||
@ -868,6 +879,7 @@ struct aead_edesc {
|
||||
int mapped_src_nents;
|
||||
int mapped_dst_nents;
|
||||
int sec4_sg_bytes;
|
||||
bool bklog;
|
||||
dma_addr_t sec4_sg_dma;
|
||||
struct sec4_sg_entry *sec4_sg;
|
||||
u32 hw_desc[];
|
||||
@ -881,6 +893,7 @@ struct aead_edesc {
|
||||
* @mapped_dst_nents: number of segments in output h/w link table
|
||||
* @iv_dma: dma address of iv for checking continuity and link table
|
||||
* @sec4_sg_bytes: length of dma mapped sec4_sg space
|
||||
* @bklog: stored to determine if the request needs backlog
|
||||
* @sec4_sg_dma: bus physical mapped address of h/w link table
|
||||
* @sec4_sg: pointer to h/w link table
|
||||
* @hw_desc: the h/w job descriptor followed by any referenced link tables
|
||||
@ -893,9 +906,10 @@ struct skcipher_edesc {
|
||||
int mapped_dst_nents;
|
||||
dma_addr_t iv_dma;
|
||||
int sec4_sg_bytes;
|
||||
bool bklog;
|
||||
dma_addr_t sec4_sg_dma;
|
||||
struct sec4_sg_entry *sec4_sg;
|
||||
u32 hw_desc[0];
|
||||
u32 hw_desc[];
|
||||
};
|
||||
|
||||
static void caam_unmap(struct device *dev, struct scatterlist *src,
|
||||
@ -941,16 +955,18 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
|
||||
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
|
||||
}
|
||||
|
||||
static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct aead_request *req = context;
|
||||
struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
|
||||
struct aead_edesc *edesc;
|
||||
int ecode = 0;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
|
||||
edesc = rctx->edesc;
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
@ -959,82 +975,30 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
|
||||
kfree(edesc);
|
||||
|
||||
aead_request_complete(req, ecode);
|
||||
}
|
||||
|
||||
static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct aead_request *req = context;
|
||||
struct aead_edesc *edesc;
|
||||
int ecode = 0;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
aead_unmap(jrdev, edesc, req);
|
||||
|
||||
kfree(edesc);
|
||||
|
||||
aead_request_complete(req, ecode);
|
||||
}
|
||||
|
||||
static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct skcipher_request *req = context;
|
||||
struct skcipher_edesc *edesc;
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
int ecode = 0;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
skcipher_unmap(jrdev, edesc, req);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->iv) to the last
|
||||
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||
* This is used e.g. by the CTS mode.
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (ivsize && !ecode) {
|
||||
memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
|
||||
ivsize);
|
||||
print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||
}
|
||||
|
||||
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||
|
||||
kfree(edesc);
|
||||
|
||||
skcipher_request_complete(req, ecode);
|
||||
if (!edesc->bklog)
|
||||
aead_request_complete(req, ecode);
|
||||
else
|
||||
crypto_finalize_aead_request(jrp->engine, req, ecode);
|
||||
}
|
||||
|
||||
static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct skcipher_request *req = context;
|
||||
struct skcipher_edesc *edesc;
|
||||
struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
int ecode = 0;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
|
||||
edesc = rctx->edesc;
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
@ -1060,7 +1024,14 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
|
||||
kfree(edesc);
|
||||
|
||||
skcipher_request_complete(req, ecode);
|
||||
/*
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
skcipher_request_complete(req, ecode);
|
||||
else
|
||||
crypto_finalize_skcipher_request(jrp->engine, req, ecode);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1306,6 +1277,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||
@ -1406,6 +1378,9 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
edesc->mapped_dst_nents = mapped_dst_nents;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
|
||||
desc_bytes;
|
||||
|
||||
rctx->edesc = edesc;
|
||||
|
||||
*all_contig_ptr = !(mapped_src_nents > 1);
|
||||
|
||||
sec4_sg_index = 0;
|
||||
@ -1436,7 +1411,34 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
return edesc;
|
||||
}
|
||||
|
||||
static int gcm_encrypt(struct aead_request *req)
|
||||
static int aead_enqueue_req(struct device *jrdev, struct aead_request *req)
|
||||
{
|
||||
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
|
||||
struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
|
||||
struct aead_edesc *edesc = rctx->edesc;
|
||||
u32 *desc = edesc->hw_desc;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Only the backlog request are sent to crypto-engine since the others
|
||||
* can be handled by CAAM, if free, especially since JR has up to 1024
|
||||
* entries (more than the 10 entries from crypto-engine).
|
||||
*/
|
||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
|
||||
ret = crypto_transfer_aead_request_to_engine(jrpriv->engine,
|
||||
req);
|
||||
else
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
|
||||
|
||||
if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
|
||||
aead_unmap(jrdev, edesc, req);
|
||||
kfree(rctx->edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
|
||||
{
|
||||
struct aead_edesc *edesc;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
@ -1444,96 +1446,120 @@ static int gcm_encrypt(struct aead_request *req)
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
bool all_contig;
|
||||
u32 *desc;
|
||||
int ret = 0;
|
||||
|
||||
edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
|
||||
encrypt);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
|
||||
init_chachapoly_job(req, edesc, all_contig, encrypt);
|
||||
print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
return aead_enqueue_req(jrdev, req);
|
||||
}
|
||||
|
||||
static int chachapoly_encrypt(struct aead_request *req)
|
||||
{
|
||||
return chachapoly_crypt(req, true);
|
||||
}
|
||||
|
||||
static int chachapoly_decrypt(struct aead_request *req)
|
||||
{
|
||||
return chachapoly_crypt(req, false);
|
||||
}
|
||||
|
||||
static inline int aead_crypt(struct aead_request *req, bool encrypt)
|
||||
{
|
||||
struct aead_edesc *edesc;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
bool all_contig;
|
||||
|
||||
/* allocate extended descriptor */
|
||||
edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
|
||||
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
|
||||
&all_contig, encrypt);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/* Create and submit job descriptor */
|
||||
init_gcm_job(req, edesc, all_contig, true);
|
||||
init_authenc_job(req, edesc, all_contig, encrypt);
|
||||
|
||||
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
return aead_enqueue_req(jrdev, req);
|
||||
}
|
||||
|
||||
static int aead_encrypt(struct aead_request *req)
|
||||
{
|
||||
return aead_crypt(req, true);
|
||||
}
|
||||
|
||||
static int aead_decrypt(struct aead_request *req)
|
||||
{
|
||||
return aead_crypt(req, false);
|
||||
}
|
||||
|
||||
static int aead_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
{
|
||||
struct aead_request *req = aead_request_cast(areq);
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
|
||||
u32 *desc = rctx->edesc->hw_desc;
|
||||
int ret;
|
||||
|
||||
rctx->edesc->bklog = true;
|
||||
|
||||
ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req);
|
||||
|
||||
if (ret != -EINPROGRESS) {
|
||||
aead_unmap(ctx->jrdev, rctx->edesc, req);
|
||||
kfree(rctx->edesc);
|
||||
} else {
|
||||
aead_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int chachapoly_encrypt(struct aead_request *req)
|
||||
static inline int gcm_crypt(struct aead_request *req, bool encrypt)
|
||||
{
|
||||
struct aead_edesc *edesc;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
bool all_contig;
|
||||
u32 *desc;
|
||||
int ret;
|
||||
|
||||
edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
|
||||
true);
|
||||
/* allocate extended descriptor */
|
||||
edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig,
|
||||
encrypt);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
/* Create and submit job descriptor */
|
||||
init_gcm_job(req, edesc, all_contig, encrypt);
|
||||
|
||||
init_chachapoly_job(req, edesc, all_contig, true);
|
||||
print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
aead_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return aead_enqueue_req(jrdev, req);
|
||||
}
|
||||
|
||||
static int chachapoly_decrypt(struct aead_request *req)
|
||||
static int gcm_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct aead_edesc *edesc;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
bool all_contig;
|
||||
u32 *desc;
|
||||
int ret;
|
||||
return gcm_crypt(req, true);
|
||||
}
|
||||
|
||||
edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
|
||||
false);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
|
||||
init_chachapoly_job(req, edesc, all_contig, false);
|
||||
print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
aead_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
static int gcm_decrypt(struct aead_request *req)
|
||||
{
|
||||
return gcm_crypt(req, false);
|
||||
}
|
||||
|
||||
static int ipsec_gcm_encrypt(struct aead_request *req)
|
||||
@ -1541,119 +1567,11 @@ static int ipsec_gcm_encrypt(struct aead_request *req)
|
||||
return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
|
||||
}
|
||||
|
||||
static int aead_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct aead_edesc *edesc;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
bool all_contig;
|
||||
u32 *desc;
|
||||
int ret = 0;
|
||||
|
||||
/* allocate extended descriptor */
|
||||
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
|
||||
&all_contig, true);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/* Create and submit job descriptor */
|
||||
init_authenc_job(req, edesc, all_contig, true);
|
||||
|
||||
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
aead_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gcm_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct aead_edesc *edesc;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
bool all_contig;
|
||||
u32 *desc;
|
||||
int ret = 0;
|
||||
|
||||
/* allocate extended descriptor */
|
||||
edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/* Create and submit job descriptor*/
|
||||
init_gcm_job(req, edesc, all_contig, false);
|
||||
|
||||
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
aead_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipsec_gcm_decrypt(struct aead_request *req)
|
||||
{
|
||||
return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
|
||||
}
|
||||
|
||||
static int aead_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct aead_edesc *edesc;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
bool all_contig;
|
||||
u32 *desc;
|
||||
int ret = 0;
|
||||
|
||||
caam_dump_sg("dec src@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
|
||||
req->assoclen + req->cryptlen, 1);
|
||||
|
||||
/* allocate extended descriptor */
|
||||
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
|
||||
&all_contig, false);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/* Create and submit job descriptor*/
|
||||
init_authenc_job(req, edesc, all_contig, false);
|
||||
|
||||
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
aead_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate and map the skcipher extended descriptor for skcipher
|
||||
*/
|
||||
@ -1662,6 +1580,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||
struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
@ -1760,6 +1679,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
|
||||
desc_bytes);
|
||||
rctx->edesc = edesc;
|
||||
|
||||
/* Make sure IV is located in a DMAable area */
|
||||
if (ivsize) {
|
||||
@ -1815,12 +1735,35 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
return edesc;
|
||||
}
|
||||
|
||||
static int skcipher_encrypt(struct skcipher_request *req)
|
||||
static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
{
|
||||
struct skcipher_request *req = skcipher_request_cast(areq);
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
|
||||
u32 *desc = rctx->edesc->hw_desc;
|
||||
int ret;
|
||||
|
||||
rctx->edesc->bklog = true;
|
||||
|
||||
ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
|
||||
|
||||
if (ret != -EINPROGRESS) {
|
||||
skcipher_unmap(ctx->jrdev, rctx->edesc, req);
|
||||
kfree(rctx->edesc);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
|
||||
{
|
||||
struct skcipher_edesc *edesc;
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
|
||||
u32 *desc;
|
||||
int ret = 0;
|
||||
|
||||
@ -1833,18 +1776,25 @@ static int skcipher_encrypt(struct skcipher_request *req)
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/* Create and submit job descriptor*/
|
||||
init_skcipher_job(req, edesc, true);
|
||||
init_skcipher_job(req, edesc, encrypt);
|
||||
|
||||
print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
|
||||
/*
|
||||
* Only the backlog request are sent to crypto-engine since the others
|
||||
* can be handled by CAAM, if free, especially since JR has up to 1024
|
||||
* entries (more than the 10 entries from crypto-engine).
|
||||
*/
|
||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
|
||||
ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine,
|
||||
req);
|
||||
else
|
||||
ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
|
||||
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
|
||||
skcipher_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
}
|
||||
@ -1852,40 +1802,14 @@ static int skcipher_encrypt(struct skcipher_request *req)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int skcipher_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return skcipher_crypt(req, true);
|
||||
}
|
||||
|
||||
static int skcipher_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct skcipher_edesc *edesc;
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
u32 *desc;
|
||||
int ret = 0;
|
||||
|
||||
if (!req->cryptlen)
|
||||
return 0;
|
||||
|
||||
/* allocate extended descriptor */
|
||||
edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/* Create and submit job descriptor*/
|
||||
init_skcipher_job(req, edesc, false);
|
||||
desc = edesc->hw_desc;
|
||||
|
||||
print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
skcipher_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return skcipher_crypt(req, false);
|
||||
}
|
||||
|
||||
static struct caam_skcipher_alg driver_algs[] = {
|
||||
@ -3391,6 +3315,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
|
||||
{
|
||||
dma_addr_t dma_addr;
|
||||
struct caam_drv_private *priv;
|
||||
const size_t sh_desc_enc_offset = offsetof(struct caam_ctx,
|
||||
sh_desc_enc);
|
||||
|
||||
ctx->jrdev = caam_jr_alloc();
|
||||
if (IS_ERR(ctx->jrdev)) {
|
||||
@ -3406,7 +3332,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
|
||||
|
||||
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
|
||||
offsetof(struct caam_ctx,
|
||||
sh_desc_enc_dma),
|
||||
sh_desc_enc_dma) -
|
||||
sh_desc_enc_offset,
|
||||
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
|
||||
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
|
||||
@ -3416,8 +3343,10 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
|
||||
|
||||
ctx->sh_desc_enc_dma = dma_addr;
|
||||
ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
|
||||
sh_desc_dec);
|
||||
ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
|
||||
sh_desc_dec) -
|
||||
sh_desc_enc_offset;
|
||||
ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) -
|
||||
sh_desc_enc_offset;
|
||||
|
||||
/* copy descriptor header template value */
|
||||
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
|
||||
@ -3431,6 +3360,11 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||
struct caam_skcipher_alg *caam_alg =
|
||||
container_of(alg, typeof(*caam_alg), skcipher);
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
|
||||
|
||||
ctx->enginectx.op.do_one_request = skcipher_do_one_req;
|
||||
|
||||
return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
|
||||
false);
|
||||
@ -3443,13 +3377,18 @@ static int caam_aead_init(struct crypto_aead *tfm)
|
||||
container_of(alg, struct caam_aead_alg, aead);
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
|
||||
|
||||
ctx->enginectx.op.do_one_request = aead_do_one_req;
|
||||
|
||||
return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
|
||||
}
|
||||
|
||||
static void caam_exit_common(struct caam_ctx *ctx)
|
||||
{
|
||||
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
|
||||
offsetof(struct caam_ctx, sh_desc_enc_dma),
|
||||
offsetof(struct caam_ctx, sh_desc_enc_dma) -
|
||||
offsetof(struct caam_ctx, sh_desc_enc),
|
||||
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
caam_jr_free(ctx->jrdev);
|
||||
}
|
||||
|
@ -1379,6 +1379,9 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
|
||||
const u32 ctx1_iv_off)
|
||||
{
|
||||
u32 *key_jump_cmd;
|
||||
u32 options = cdata->algtype | OP_ALG_AS_INIT | OP_ALG_ENCRYPT;
|
||||
bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) ==
|
||||
OP_ALG_ALGSEL_CHACHA20);
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
|
||||
/* Skip if already shared */
|
||||
@ -1417,14 +1420,15 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
|
||||
LDST_OFFSET_SHIFT));
|
||||
|
||||
/* Load operation */
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
|
||||
OP_ALG_ENCRYPT);
|
||||
if (is_chacha20)
|
||||
options |= OP_ALG_AS_FINALIZE;
|
||||
append_operation(desc, options);
|
||||
|
||||
/* Perform operation */
|
||||
skcipher_append_src_dst(desc);
|
||||
|
||||
/* Store IV */
|
||||
if (ivsize)
|
||||
if (!is_chacha20 && ivsize)
|
||||
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
|
||||
LDST_CLASS_1_CCB | (ctx1_iv_off <<
|
||||
LDST_OFFSET_SHIFT));
|
||||
@ -1451,6 +1455,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
|
||||
const u32 ctx1_iv_off)
|
||||
{
|
||||
u32 *key_jump_cmd;
|
||||
bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) ==
|
||||
OP_ALG_ALGSEL_CHACHA20);
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
|
||||
/* Skip if already shared */
|
||||
@ -1499,7 +1505,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
|
||||
skcipher_append_src_dst(desc);
|
||||
|
||||
/* Store IV */
|
||||
if (ivsize)
|
||||
if (!is_chacha20 && ivsize)
|
||||
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
|
||||
LDST_CLASS_1_CCB | (ctx1_iv_off <<
|
||||
LDST_OFFSET_SHIFT));
|
||||
@ -1518,7 +1524,13 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
|
||||
*/
|
||||
void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
|
||||
{
|
||||
__be64 sector_size = cpu_to_be64(512);
|
||||
/*
|
||||
* Set sector size to a big value, practically disabling
|
||||
* sector size segmentation in xts implementation. We cannot
|
||||
* take full advantage of this HW feature with existing
|
||||
* crypto API / dm-crypt SW architecture.
|
||||
*/
|
||||
__be64 sector_size = cpu_to_be64(BIT(15));
|
||||
u32 *key_jump_cmd;
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
|
||||
@ -1571,7 +1583,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
|
||||
*/
|
||||
void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
|
||||
{
|
||||
__be64 sector_size = cpu_to_be64(512);
|
||||
/*
|
||||
* Set sector size to a big value, practically disabling
|
||||
* sector size segmentation in xts implementation. We cannot
|
||||
* take full advantage of this HW feature with existing
|
||||
* crypto API / dm-crypt SW architecture.
|
||||
*/
|
||||
__be64 sector_size = cpu_to_be64(BIT(15));
|
||||
u32 *key_jump_cmd;
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
|
||||
|
@ -783,7 +783,7 @@ struct aead_edesc {
|
||||
unsigned int assoclen;
|
||||
dma_addr_t assoclen_dma;
|
||||
struct caam_drv_req drv_req;
|
||||
struct qm_sg_entry sgt[0];
|
||||
struct qm_sg_entry sgt[];
|
||||
};
|
||||
|
||||
/*
|
||||
@ -803,7 +803,7 @@ struct skcipher_edesc {
|
||||
int qm_sg_bytes;
|
||||
dma_addr_t qm_sg_dma;
|
||||
struct caam_drv_req drv_req;
|
||||
struct qm_sg_entry sgt[0];
|
||||
struct qm_sg_entry sgt[];
|
||||
};
|
||||
|
||||
static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
|
||||
|
@ -114,7 +114,7 @@ struct aead_edesc {
|
||||
dma_addr_t qm_sg_dma;
|
||||
unsigned int assoclen;
|
||||
dma_addr_t assoclen_dma;
|
||||
struct dpaa2_sg_entry sgt[0];
|
||||
struct dpaa2_sg_entry sgt[];
|
||||
};
|
||||
|
||||
/*
|
||||
@ -132,7 +132,7 @@ struct skcipher_edesc {
|
||||
dma_addr_t iv_dma;
|
||||
int qm_sg_bytes;
|
||||
dma_addr_t qm_sg_dma;
|
||||
struct dpaa2_sg_entry sgt[0];
|
||||
struct dpaa2_sg_entry sgt[];
|
||||
};
|
||||
|
||||
/*
|
||||
@ -146,7 +146,7 @@ struct ahash_edesc {
|
||||
dma_addr_t qm_sg_dma;
|
||||
int src_nents;
|
||||
int qm_sg_bytes;
|
||||
struct dpaa2_sg_entry sgt[0];
|
||||
struct dpaa2_sg_entry sgt[];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -65,6 +65,7 @@
|
||||
#include "sg_sw_sec4.h"
|
||||
#include "key_gen.h"
|
||||
#include "caamhash_desc.h"
|
||||
#include <crypto/engine.h>
|
||||
|
||||
#define CAAM_CRA_PRIORITY 3000
|
||||
|
||||
@ -86,6 +87,7 @@ static struct list_head hash_list;
|
||||
|
||||
/* ahash per-session context */
|
||||
struct caam_hash_ctx {
|
||||
struct crypto_engine_ctx enginectx;
|
||||
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
||||
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
||||
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
||||
@ -111,9 +113,12 @@ struct caam_hash_state {
|
||||
int buflen;
|
||||
int next_buflen;
|
||||
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
|
||||
int (*update)(struct ahash_request *req);
|
||||
int (*update)(struct ahash_request *req) ____cacheline_aligned;
|
||||
int (*final)(struct ahash_request *req);
|
||||
int (*finup)(struct ahash_request *req);
|
||||
struct ahash_edesc *edesc;
|
||||
void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context);
|
||||
};
|
||||
|
||||
struct caam_export_state {
|
||||
@ -395,7 +400,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
|
||||
init_completion(&result.completion);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
||||
if (!ret) {
|
||||
if (ret == -EINPROGRESS) {
|
||||
/* in progress */
|
||||
wait_for_completion(&result.completion);
|
||||
ret = result.err;
|
||||
@ -521,6 +526,7 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
|
||||
* @sec4_sg_dma: physical mapped address of h/w link table
|
||||
* @src_nents: number of segments in input scatterlist
|
||||
* @sec4_sg_bytes: length of dma mapped sec4_sg space
|
||||
* @bklog: stored to determine if the request needs backlog
|
||||
* @hw_desc: the h/w job descriptor followed by any referenced link tables
|
||||
* @sec4_sg: h/w link table
|
||||
*/
|
||||
@ -528,8 +534,9 @@ struct ahash_edesc {
|
||||
dma_addr_t sec4_sg_dma;
|
||||
int src_nents;
|
||||
int sec4_sg_bytes;
|
||||
bool bklog;
|
||||
u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
|
||||
struct sec4_sg_entry sec4_sg[0];
|
||||
struct sec4_sg_entry sec4_sg[];
|
||||
};
|
||||
|
||||
static inline void ahash_unmap(struct device *dev,
|
||||
@ -565,10 +572,11 @@ static inline void ahash_unmap_ctx(struct device *dev,
|
||||
ahash_unmap(dev, edesc, req, dst_len);
|
||||
}
|
||||
|
||||
static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context, enum dma_data_direction dir)
|
||||
{
|
||||
struct ahash_request *req = context;
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
|
||||
struct ahash_edesc *edesc;
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
@ -578,11 +586,12 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
edesc = state->edesc;
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
|
||||
memcpy(req->result, state->caam_ctx, digestsize);
|
||||
kfree(edesc);
|
||||
|
||||
@ -590,81 +599,33 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
|
||||
req->base.complete(&req->base, ecode);
|
||||
/*
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
req->base.complete(&req->base, ecode);
|
||||
else
|
||||
crypto_finalize_hash_request(jrp->engine, req, ecode);
|
||||
}
|
||||
|
||||
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct ahash_request *req = context;
|
||||
struct ahash_edesc *edesc;
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
int ecode = 0;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
|
||||
kfree(edesc);
|
||||
|
||||
scatterwalk_map_and_copy(state->buf, req->src,
|
||||
req->nbytes - state->next_buflen,
|
||||
state->next_buflen, 0);
|
||||
state->buflen = state->next_buflen;
|
||||
|
||||
print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
|
||||
state->buflen, 1);
|
||||
|
||||
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump_debug("result@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
|
||||
req->base.complete(&req->base, ecode);
|
||||
ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct ahash_request *req = context;
|
||||
struct ahash_edesc *edesc;
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
int ecode = 0;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
||||
memcpy(req->result, state->caam_ctx, digestsize);
|
||||
kfree(edesc);
|
||||
|
||||
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
|
||||
req->base.complete(&req->base, ecode);
|
||||
ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context, enum dma_data_direction dir)
|
||||
{
|
||||
struct ahash_request *req = context;
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
|
||||
struct ahash_edesc *edesc;
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
@ -674,11 +635,11 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
edesc = state->edesc;
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
|
||||
kfree(edesc);
|
||||
|
||||
scatterwalk_map_and_copy(state->buf, req->src,
|
||||
@ -698,18 +659,42 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
|
||||
req->base.complete(&req->base, ecode);
|
||||
/*
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
req->base.complete(&req->base, ecode);
|
||||
else
|
||||
crypto_finalize_hash_request(jrp->engine, req, ecode);
|
||||
|
||||
}
|
||||
|
||||
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate an enhanced descriptor, which contains the hardware descriptor
|
||||
* and space for hardware scatter table containing sg_num entries.
|
||||
*/
|
||||
static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
|
||||
static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
|
||||
int sg_num, u32 *sh_desc,
|
||||
dma_addr_t sh_desc_dma,
|
||||
gfp_t flags)
|
||||
dma_addr_t sh_desc_dma)
|
||||
{
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
struct ahash_edesc *edesc;
|
||||
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
|
||||
|
||||
@ -719,6 +704,8 @@ static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
state->edesc = edesc;
|
||||
|
||||
init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
|
||||
HDR_SHARE_DEFER | HDR_REVERSE);
|
||||
|
||||
@ -761,6 +748,62 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
{
|
||||
struct ahash_request *req = ahash_request_cast(areq);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
u32 *desc = state->edesc->hw_desc;
|
||||
int ret;
|
||||
|
||||
state->edesc->bklog = true;
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
|
||||
|
||||
if (ret != -EINPROGRESS) {
|
||||
ahash_unmap(jrdev, state->edesc, req, 0);
|
||||
kfree(state->edesc);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ahash_enqueue_req(struct device *jrdev,
|
||||
void (*cbk)(struct device *jrdev, u32 *desc,
|
||||
u32 err, void *context),
|
||||
struct ahash_request *req,
|
||||
int dst_len, enum dma_data_direction dir)
|
||||
{
|
||||
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct ahash_edesc *edesc = state->edesc;
|
||||
u32 *desc = edesc->hw_desc;
|
||||
int ret;
|
||||
|
||||
state->ahash_op_done = cbk;
|
||||
|
||||
/*
|
||||
* Only the backlog request are sent to crypto-engine since the others
|
||||
* can be handled by CAAM, if free, especially since JR has up to 1024
|
||||
* entries (more than the 10 entries from crypto-engine).
|
||||
*/
|
||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
|
||||
ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
|
||||
req);
|
||||
else
|
||||
ret = caam_jr_enqueue(jrdev, desc, cbk, req);
|
||||
|
||||
if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
|
||||
ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* submit update job descriptor */
|
||||
static int ahash_update_ctx(struct ahash_request *req)
|
||||
{
|
||||
@ -768,8 +811,6 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
u8 *buf = state->buf;
|
||||
int *buflen = &state->buflen;
|
||||
int *next_buflen = &state->next_buflen;
|
||||
@ -823,8 +864,8 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
* allocate space for base edesc and hw desc commands,
|
||||
* link tables
|
||||
*/
|
||||
edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
|
||||
ctx->sh_desc_update_dma, flags);
|
||||
edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
|
||||
ctx->sh_desc_update_dma);
|
||||
if (!edesc) {
|
||||
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
||||
return -ENOMEM;
|
||||
@ -870,11 +911,8 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
ret = -EINPROGRESS;
|
||||
ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
|
||||
ctx->ctx_len, DMA_BIDIRECTIONAL);
|
||||
} else if (*next_buflen) {
|
||||
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
|
||||
req->nbytes, 0);
|
||||
@ -898,8 +936,6 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int buflen = state->buflen;
|
||||
u32 *desc;
|
||||
int sec4_sg_bytes;
|
||||
@ -911,8 +947,8 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
sizeof(struct sec4_sg_entry);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
|
||||
ctx->sh_desc_fin_dma, flags);
|
||||
edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
|
||||
ctx->sh_desc_fin_dma);
|
||||
if (!edesc)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -947,11 +983,8 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
return -EINPROGRESS;
|
||||
return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
|
||||
digestsize, DMA_BIDIRECTIONAL);
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
||||
kfree(edesc);
|
||||
@ -964,8 +997,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int buflen = state->buflen;
|
||||
u32 *desc;
|
||||
int sec4_sg_src_index;
|
||||
@ -994,9 +1025,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
|
||||
ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
|
||||
flags);
|
||||
edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
|
||||
ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
|
||||
if (!edesc) {
|
||||
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
||||
return -ENOMEM;
|
||||
@ -1027,11 +1057,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
return -EINPROGRESS;
|
||||
return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
|
||||
digestsize, DMA_BIDIRECTIONAL);
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
||||
kfree(edesc);
|
||||
@ -1044,8 +1071,6 @@ static int ahash_digest(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
u32 *desc;
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
int src_nents, mapped_nents;
|
||||
@ -1072,9 +1097,8 @@ static int ahash_digest(struct ahash_request *req)
|
||||
}
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
|
||||
ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
|
||||
flags);
|
||||
edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
|
||||
ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
|
||||
if (!edesc) {
|
||||
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
||||
return -ENOMEM;
|
||||
@ -1103,15 +1127,8 @@ static int ahash_digest(struct ahash_request *req)
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
/* submit ahash final if it the first job descriptor */
|
||||
@ -1121,8 +1138,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
u8 *buf = state->buf;
|
||||
int buflen = state->buflen;
|
||||
u32 *desc;
|
||||
@ -1131,8 +1146,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||
int ret;
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
|
||||
ctx->sh_desc_digest_dma, flags);
|
||||
edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
|
||||
ctx->sh_desc_digest_dma);
|
||||
if (!edesc)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1157,20 +1172,12 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return ahash_enqueue_req(jrdev, ahash_done, req,
|
||||
digestsize, DMA_FROM_DEVICE);
|
||||
unmap:
|
||||
ahash_unmap(jrdev, edesc, req, digestsize);
|
||||
kfree(edesc);
|
||||
return -ENOMEM;
|
||||
|
||||
}
|
||||
|
||||
/* submit ahash update if it the first job descriptor after update */
|
||||
@ -1180,8 +1187,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
u8 *buf = state->buf;
|
||||
int *buflen = &state->buflen;
|
||||
int *next_buflen = &state->next_buflen;
|
||||
@ -1234,10 +1239,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
* allocate space for base edesc and hw desc commands,
|
||||
* link tables
|
||||
*/
|
||||
edesc = ahash_edesc_alloc(ctx, pad_nents,
|
||||
edesc = ahash_edesc_alloc(req, pad_nents,
|
||||
ctx->sh_desc_update_first,
|
||||
ctx->sh_desc_update_first_dma,
|
||||
flags);
|
||||
ctx->sh_desc_update_first_dma);
|
||||
if (!edesc) {
|
||||
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
||||
return -ENOMEM;
|
||||
@ -1273,11 +1277,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
ret = -EINPROGRESS;
|
||||
ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
|
||||
ctx->ctx_len, DMA_TO_DEVICE);
|
||||
if ((ret != -EINPROGRESS) && (ret != -EBUSY))
|
||||
return ret;
|
||||
state->update = ahash_update_ctx;
|
||||
state->finup = ahash_finup_ctx;
|
||||
state->final = ahash_final_ctx;
|
||||
@ -1305,8 +1308,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int buflen = state->buflen;
|
||||
u32 *desc;
|
||||
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
|
||||
@ -1336,9 +1337,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
sizeof(struct sec4_sg_entry);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
|
||||
ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
|
||||
flags);
|
||||
edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
|
||||
ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
|
||||
if (!edesc) {
|
||||
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
||||
return -ENOMEM;
|
||||
@ -1368,15 +1368,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return ahash_enqueue_req(jrdev, ahash_done, req,
|
||||
digestsize, DMA_FROM_DEVICE);
|
||||
unmap:
|
||||
ahash_unmap(jrdev, edesc, req, digestsize);
|
||||
kfree(edesc);
|
||||
@ -1391,8 +1384,6 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
u8 *buf = state->buf;
|
||||
int *buflen = &state->buflen;
|
||||
int *next_buflen = &state->next_buflen;
|
||||
@ -1440,11 +1431,10 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
* allocate space for base edesc and hw desc commands,
|
||||
* link tables
|
||||
*/
|
||||
edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
|
||||
edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
|
||||
mapped_nents : 0,
|
||||
ctx->sh_desc_update_first,
|
||||
ctx->sh_desc_update_first_dma,
|
||||
flags);
|
||||
ctx->sh_desc_update_first_dma);
|
||||
if (!edesc) {
|
||||
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
||||
return -ENOMEM;
|
||||
@ -1467,11 +1457,10 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
ret = -EINPROGRESS;
|
||||
ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
|
||||
ctx->ctx_len, DMA_TO_DEVICE);
|
||||
if ((ret != -EINPROGRESS) && (ret != -EBUSY))
|
||||
return ret;
|
||||
state->update = ahash_update_ctx;
|
||||
state->finup = ahash_finup_ctx;
|
||||
state->final = ahash_final_ctx;
|
||||
@ -1774,6 +1763,8 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
|
||||
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
|
||||
HASH_MSG_LEN + 64,
|
||||
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
|
||||
const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
|
||||
sh_desc_update);
|
||||
dma_addr_t dma_addr;
|
||||
struct caam_drv_private *priv;
|
||||
|
||||
@ -1826,7 +1817,8 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
|
||||
}
|
||||
|
||||
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
|
||||
offsetof(struct caam_hash_ctx, key),
|
||||
offsetof(struct caam_hash_ctx, key) -
|
||||
sh_desc_update_offset,
|
||||
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
|
||||
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
|
||||
@ -1844,11 +1836,16 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
|
||||
ctx->sh_desc_update_dma = dma_addr;
|
||||
ctx->sh_desc_update_first_dma = dma_addr +
|
||||
offsetof(struct caam_hash_ctx,
|
||||
sh_desc_update_first);
|
||||
sh_desc_update_first) -
|
||||
sh_desc_update_offset;
|
||||
ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
|
||||
sh_desc_fin);
|
||||
sh_desc_fin) -
|
||||
sh_desc_update_offset;
|
||||
ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
|
||||
sh_desc_digest);
|
||||
sh_desc_digest) -
|
||||
sh_desc_update_offset;
|
||||
|
||||
ctx->enginectx.op.do_one_request = ahash_do_one_req;
|
||||
|
||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||
sizeof(struct caam_hash_state));
|
||||
@ -1865,7 +1862,8 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
|
||||
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
|
||||
offsetof(struct caam_hash_ctx, key),
|
||||
offsetof(struct caam_hash_ctx, key) -
|
||||
offsetof(struct caam_hash_ctx, sh_desc_update),
|
||||
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (ctx->key_dir != DMA_NONE)
|
||||
dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
|
||||
|
@ -117,76 +117,69 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
|
||||
static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
|
||||
{
|
||||
struct akcipher_request *req = context;
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
|
||||
struct rsa_edesc *edesc;
|
||||
int ecode = 0;
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(dev, err);
|
||||
|
||||
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
|
||||
edesc = req_ctx->edesc;
|
||||
|
||||
rsa_pub_unmap(dev, edesc, req);
|
||||
rsa_io_unmap(dev, edesc, req);
|
||||
kfree(edesc);
|
||||
|
||||
akcipher_request_complete(req, ecode);
|
||||
/*
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
akcipher_request_complete(req, ecode);
|
||||
else
|
||||
crypto_finalize_akcipher_request(jrp->engine, req, ecode);
|
||||
}
|
||||
|
||||
static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct akcipher_request *req = context;
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct caam_rsa_key *key = &ctx->key;
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
struct rsa_edesc *edesc;
|
||||
int ecode = 0;
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(dev, err);
|
||||
|
||||
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
|
||||
edesc = req_ctx->edesc;
|
||||
|
||||
switch (key->priv_form) {
|
||||
case FORM1:
|
||||
rsa_priv_f1_unmap(dev, edesc, req);
|
||||
break;
|
||||
case FORM2:
|
||||
rsa_priv_f2_unmap(dev, edesc, req);
|
||||
break;
|
||||
case FORM3:
|
||||
rsa_priv_f3_unmap(dev, edesc, req);
|
||||
}
|
||||
|
||||
rsa_priv_f1_unmap(dev, edesc, req);
|
||||
rsa_io_unmap(dev, edesc, req);
|
||||
kfree(edesc);
|
||||
|
||||
akcipher_request_complete(req, ecode);
|
||||
}
|
||||
|
||||
static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct akcipher_request *req = context;
|
||||
struct rsa_edesc *edesc;
|
||||
int ecode = 0;
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(dev, err);
|
||||
|
||||
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
|
||||
|
||||
rsa_priv_f2_unmap(dev, edesc, req);
|
||||
rsa_io_unmap(dev, edesc, req);
|
||||
kfree(edesc);
|
||||
|
||||
akcipher_request_complete(req, ecode);
|
||||
}
|
||||
|
||||
static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct akcipher_request *req = context;
|
||||
struct rsa_edesc *edesc;
|
||||
int ecode = 0;
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(dev, err);
|
||||
|
||||
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
|
||||
|
||||
rsa_priv_f3_unmap(dev, edesc, req);
|
||||
rsa_io_unmap(dev, edesc, req);
|
||||
kfree(edesc);
|
||||
|
||||
akcipher_request_complete(req, ecode);
|
||||
/*
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
akcipher_request_complete(req, ecode);
|
||||
else
|
||||
crypto_finalize_akcipher_request(jrp->engine, req, ecode);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -334,6 +327,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
||||
edesc->src_nents = src_nents;
|
||||
edesc->dst_nents = dst_nents;
|
||||
|
||||
req_ctx->edesc = edesc;
|
||||
|
||||
if (!sec4_sg_bytes)
|
||||
return edesc;
|
||||
|
||||
@ -364,6 +359,33 @@ src_fail:
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
{
|
||||
struct akcipher_request *req = container_of(areq,
|
||||
struct akcipher_request,
|
||||
base);
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct device *jrdev = ctx->dev;
|
||||
u32 *desc = req_ctx->edesc->hw_desc;
|
||||
int ret;
|
||||
|
||||
req_ctx->edesc->bklog = true;
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
|
||||
|
||||
if (ret != -EINPROGRESS) {
|
||||
rsa_pub_unmap(jrdev, req_ctx->edesc, req);
|
||||
rsa_io_unmap(jrdev, req_ctx->edesc, req);
|
||||
kfree(req_ctx->edesc);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int set_rsa_pub_pdb(struct akcipher_request *req,
|
||||
struct rsa_edesc *edesc)
|
||||
{
|
||||
@ -627,6 +649,53 @@ unmap_p:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int akcipher_enqueue_req(struct device *jrdev,
|
||||
void (*cbk)(struct device *jrdev, u32 *desc,
|
||||
u32 err, void *context),
|
||||
struct akcipher_request *req)
|
||||
{
|
||||
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct caam_rsa_key *key = &ctx->key;
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
struct rsa_edesc *edesc = req_ctx->edesc;
|
||||
u32 *desc = edesc->hw_desc;
|
||||
int ret;
|
||||
|
||||
req_ctx->akcipher_op_done = cbk;
|
||||
/*
|
||||
* Only the backlog request are sent to crypto-engine since the others
|
||||
* can be handled by CAAM, if free, especially since JR has up to 1024
|
||||
* entries (more than the 10 entries from crypto-engine).
|
||||
*/
|
||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
|
||||
ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
|
||||
req);
|
||||
else
|
||||
ret = caam_jr_enqueue(jrdev, desc, cbk, req);
|
||||
|
||||
if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
|
||||
switch (key->priv_form) {
|
||||
case FORM1:
|
||||
rsa_priv_f1_unmap(jrdev, edesc, req);
|
||||
break;
|
||||
case FORM2:
|
||||
rsa_priv_f2_unmap(jrdev, edesc, req);
|
||||
break;
|
||||
case FORM3:
|
||||
rsa_priv_f3_unmap(jrdev, edesc, req);
|
||||
break;
|
||||
default:
|
||||
rsa_pub_unmap(jrdev, edesc, req);
|
||||
}
|
||||
rsa_io_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int caam_rsa_enc(struct akcipher_request *req)
|
||||
{
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
@ -658,11 +727,7 @@ static int caam_rsa_enc(struct akcipher_request *req)
|
||||
/* Initialize Job Descriptor */
|
||||
init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
|
||||
if (!ret)
|
||||
return -EINPROGRESS;
|
||||
|
||||
rsa_pub_unmap(jrdev, edesc, req);
|
||||
return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
|
||||
|
||||
init_fail:
|
||||
rsa_io_unmap(jrdev, edesc, req);
|
||||
@ -691,11 +756,7 @@ static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
|
||||
/* Initialize Job Descriptor */
|
||||
init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
|
||||
if (!ret)
|
||||
return -EINPROGRESS;
|
||||
|
||||
rsa_priv_f1_unmap(jrdev, edesc, req);
|
||||
return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
|
||||
|
||||
init_fail:
|
||||
rsa_io_unmap(jrdev, edesc, req);
|
||||
@ -724,11 +785,7 @@ static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
|
||||
/* Initialize Job Descriptor */
|
||||
init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
|
||||
if (!ret)
|
||||
return -EINPROGRESS;
|
||||
|
||||
rsa_priv_f2_unmap(jrdev, edesc, req);
|
||||
return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
|
||||
|
||||
init_fail:
|
||||
rsa_io_unmap(jrdev, edesc, req);
|
||||
@ -757,11 +814,7 @@ static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
|
||||
/* Initialize Job Descriptor */
|
||||
init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
|
||||
if (!ret)
|
||||
return -EINPROGRESS;
|
||||
|
||||
rsa_priv_f3_unmap(jrdev, edesc, req);
|
||||
return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
|
||||
|
||||
init_fail:
|
||||
rsa_io_unmap(jrdev, edesc, req);
|
||||
@ -1054,6 +1107,8 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctx->enginectx.op.do_one_request = akcipher_do_one_req;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#define _PKC_DESC_H_
|
||||
#include "compat.h"
|
||||
#include "pdb.h"
|
||||
#include <crypto/engine.h>
|
||||
|
||||
/**
|
||||
* caam_priv_key_form - CAAM RSA private key representation
|
||||
@ -87,11 +88,13 @@ struct caam_rsa_key {
|
||||
|
||||
/**
|
||||
* caam_rsa_ctx - per session context.
|
||||
* @enginectx : crypto engine context
|
||||
* @key : RSA key in DMA zone
|
||||
* @dev : device structure
|
||||
* @padding_dma : dma address of padding, for adding it to the input
|
||||
*/
|
||||
struct caam_rsa_ctx {
|
||||
struct crypto_engine_ctx enginectx;
|
||||
struct caam_rsa_key key;
|
||||
struct device *dev;
|
||||
dma_addr_t padding_dma;
|
||||
@ -103,11 +106,16 @@ struct caam_rsa_ctx {
|
||||
* @src : input scatterlist (stripped of leading zeros)
|
||||
* @fixup_src : input scatterlist (that might be stripped of leading zeros)
|
||||
* @fixup_src_len : length of the fixup_src input scatterlist
|
||||
* @edesc : s/w-extended rsa descriptor
|
||||
* @akcipher_op_done : callback used when operation is done
|
||||
*/
|
||||
struct caam_rsa_req_ctx {
|
||||
struct scatterlist src[2];
|
||||
struct scatterlist *fixup_src;
|
||||
unsigned int fixup_src_len;
|
||||
struct rsa_edesc *edesc;
|
||||
void (*akcipher_op_done)(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -117,6 +125,7 @@ struct caam_rsa_req_ctx {
|
||||
* @mapped_src_nents: number of segments in input h/w link table
|
||||
* @mapped_dst_nents: number of segments in output h/w link table
|
||||
* @sec4_sg_bytes : length of h/w link table
|
||||
* @bklog : stored to determine if the request needs backlog
|
||||
* @sec4_sg_dma : dma address of h/w link table
|
||||
* @sec4_sg : pointer to h/w link table
|
||||
* @pdb : specific RSA Protocol Data Block (PDB)
|
||||
@ -128,6 +137,7 @@ struct rsa_edesc {
|
||||
int mapped_src_nents;
|
||||
int mapped_dst_nents;
|
||||
int sec4_sg_bytes;
|
||||
bool bklog;
|
||||
dma_addr_t sec4_sg_dma;
|
||||
struct sec4_sg_entry *sec4_sg;
|
||||
union {
|
||||
|
@ -7,35 +7,12 @@
|
||||
*
|
||||
* Based on caamalg.c crypto API driver.
|
||||
*
|
||||
* relationship between job descriptors to shared descriptors:
|
||||
*
|
||||
* --------------- --------------
|
||||
* | JobDesc #0 |-------------------->| ShareDesc |
|
||||
* | *(buffer 0) | |------------->| (generate) |
|
||||
* --------------- | | (move) |
|
||||
* | | (store) |
|
||||
* --------------- | --------------
|
||||
* | JobDesc #1 |------|
|
||||
* | *(buffer 1) |
|
||||
* ---------------
|
||||
*
|
||||
* A job desc looks like this:
|
||||
*
|
||||
* ---------------------
|
||||
* | Header |
|
||||
* | ShareDesc Pointer |
|
||||
* | SEQ_OUT_PTR |
|
||||
* | (output buffer) |
|
||||
* ---------------------
|
||||
*
|
||||
* The SharedDesc never changes, and each job descriptor points to one of two
|
||||
* buffers for each device, from which the data will be copied into the
|
||||
* requested destination
|
||||
*/
|
||||
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kfifo.h>
|
||||
|
||||
#include "compat.h"
|
||||
|
||||
@ -45,278 +22,205 @@
|
||||
#include "jr.h"
|
||||
#include "error.h"
|
||||
|
||||
#define CAAM_RNG_MAX_FIFO_STORE_SIZE 16
|
||||
|
||||
/*
|
||||
* Maximum buffer size: maximum number of random, cache-aligned bytes that
|
||||
* will be generated and moved to seq out ptr (extlen not allowed)
|
||||
* Length of used descriptors, see caam_init_desc()
|
||||
*/
|
||||
#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \
|
||||
L1_CACHE_BYTES)
|
||||
|
||||
/* length of descriptors */
|
||||
#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ_MAX * 2)
|
||||
#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
|
||||
|
||||
/* Buffer, its dma address and lock */
|
||||
struct buf_data {
|
||||
u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
|
||||
dma_addr_t addr;
|
||||
struct completion filled;
|
||||
u32 hw_desc[DESC_JOB_O_LEN];
|
||||
#define BUF_NOT_EMPTY 0
|
||||
#define BUF_EMPTY 1
|
||||
#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */
|
||||
atomic_t empty;
|
||||
};
|
||||
#define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \
|
||||
CAAM_CMD_SZ + \
|
||||
CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
|
||||
|
||||
/* rng per-device context */
|
||||
struct caam_rng_ctx {
|
||||
struct hwrng rng;
|
||||
struct device *jrdev;
|
||||
dma_addr_t sh_desc_dma;
|
||||
u32 sh_desc[DESC_RNG_LEN];
|
||||
unsigned int cur_buf_idx;
|
||||
int current_buf;
|
||||
struct buf_data bufs[2];
|
||||
struct device *ctrldev;
|
||||
void *desc_async;
|
||||
void *desc_sync;
|
||||
struct work_struct worker;
|
||||
struct kfifo fifo;
|
||||
};
|
||||
|
||||
static struct caam_rng_ctx *rng_ctx;
|
||||
struct caam_rng_job_ctx {
|
||||
struct completion *done;
|
||||
int *err;
|
||||
};
|
||||
|
||||
/*
|
||||
* Variable used to avoid double free of resources in case
|
||||
* algorithm registration was unsuccessful
|
||||
*/
|
||||
static bool init_done;
|
||||
|
||||
static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
|
||||
static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r)
|
||||
{
|
||||
if (bd->addr)
|
||||
dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
return (struct caam_rng_ctx *)r->priv;
|
||||
}
|
||||
|
||||
static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
|
||||
static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
|
||||
if (ctx->sh_desc_dma)
|
||||
dma_unmap_single(jrdev, ctx->sh_desc_dma,
|
||||
desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
|
||||
rng_unmap_buf(jrdev, &ctx->bufs[0]);
|
||||
rng_unmap_buf(jrdev, &ctx->bufs[1]);
|
||||
}
|
||||
|
||||
static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
|
||||
{
|
||||
struct buf_data *bd;
|
||||
|
||||
bd = container_of(desc, struct buf_data, hw_desc[0]);
|
||||
struct caam_rng_job_ctx *jctx = context;
|
||||
|
||||
if (err)
|
||||
caam_jr_strstatus(jrdev, err);
|
||||
*jctx->err = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
atomic_set(&bd->empty, BUF_NOT_EMPTY);
|
||||
complete(&bd->filled);
|
||||
|
||||
/* Buffer refilled, invalidate cache */
|
||||
dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
bd->buf, RN_BUF_SIZE, 1);
|
||||
complete(jctx->done);
|
||||
}
|
||||
|
||||
static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
|
||||
static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma)
|
||||
{
|
||||
struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
u32 *desc = bd->hw_desc;
|
||||
int err;
|
||||
init_job_desc(desc, 0); /* + 1 cmd_sz */
|
||||
/* Generate random bytes: + 1 cmd_sz */
|
||||
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
|
||||
OP_ALG_PR_ON);
|
||||
/* Store bytes: + 1 cmd_sz + caam_ptr_sz */
|
||||
append_fifo_store(desc, dst_dma,
|
||||
CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE);
|
||||
|
||||
dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
|
||||
init_completion(&bd->filled);
|
||||
err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
|
||||
if (err)
|
||||
complete(&bd->filled); /* don't wait on failed job*/
|
||||
else
|
||||
atomic_inc(&bd->empty); /* note if pending */
|
||||
print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS,
|
||||
16, 4, desc, desc_bytes(desc), 1);
|
||||
|
||||
return err;
|
||||
return desc;
|
||||
}
|
||||
|
||||
static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
static int caam_rng_read_one(struct device *jrdev,
|
||||
void *dst, int len,
|
||||
void *desc,
|
||||
struct completion *done)
|
||||
{
|
||||
struct caam_rng_ctx *ctx = rng_ctx;
|
||||
struct buf_data *bd = &ctx->bufs[ctx->current_buf];
|
||||
int next_buf_idx, copied_idx;
|
||||
int err;
|
||||
dma_addr_t dst_dma;
|
||||
int err, ret = 0;
|
||||
struct caam_rng_job_ctx jctx = {
|
||||
.done = done,
|
||||
.err = &ret,
|
||||
};
|
||||
|
||||
if (atomic_read(&bd->empty)) {
|
||||
/* try to submit job if there wasn't one */
|
||||
if (atomic_read(&bd->empty) == BUF_EMPTY) {
|
||||
err = submit_job(ctx, 1);
|
||||
/* if can't submit job, can't even wait */
|
||||
if (err)
|
||||
return 0;
|
||||
}
|
||||
/* no immediate data, so exit if not waiting */
|
||||
if (!wait)
|
||||
return 0;
|
||||
len = CAAM_RNG_MAX_FIFO_STORE_SIZE;
|
||||
|
||||
/* waiting for pending job */
|
||||
if (atomic_read(&bd->empty))
|
||||
wait_for_completion(&bd->filled);
|
||||
}
|
||||
|
||||
next_buf_idx = ctx->cur_buf_idx + max;
|
||||
dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
|
||||
__func__, ctx->current_buf, ctx->cur_buf_idx);
|
||||
|
||||
/* if enough data in current buffer */
|
||||
if (next_buf_idx < RN_BUF_SIZE) {
|
||||
memcpy(data, bd->buf + ctx->cur_buf_idx, max);
|
||||
ctx->cur_buf_idx = next_buf_idx;
|
||||
return max;
|
||||
}
|
||||
|
||||
/* else, copy what's left... */
|
||||
copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
|
||||
memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
|
||||
ctx->cur_buf_idx = 0;
|
||||
atomic_set(&bd->empty, BUF_EMPTY);
|
||||
|
||||
/* ...refill... */
|
||||
submit_job(ctx, 1);
|
||||
|
||||
/* and use next buffer */
|
||||
ctx->current_buf = !ctx->current_buf;
|
||||
dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
|
||||
|
||||
/* since there already is some data read, don't wait */
|
||||
return copied_idx + caam_read(rng, data + copied_idx,
|
||||
max - copied_idx, false);
|
||||
}
|
||||
|
||||
static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
|
||||
{
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
u32 *desc = ctx->sh_desc;
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
||||
|
||||
/* Generate random bytes */
|
||||
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
|
||||
|
||||
/* Store bytes */
|
||||
append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
|
||||
|
||||
ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
|
||||
dev_err(jrdev, "unable to map shared descriptor\n");
|
||||
dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(jrdev, dst_dma)) {
|
||||
dev_err(jrdev, "unable to map destination memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
desc, desc_bytes(desc), 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
|
||||
{
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
struct buf_data *bd = &ctx->bufs[buf_id];
|
||||
u32 *desc = bd->hw_desc;
|
||||
int sh_len = desc_len(ctx->sh_desc);
|
||||
|
||||
init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
|
||||
HDR_REVERSE);
|
||||
|
||||
bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(jrdev, bd->addr)) {
|
||||
dev_err(jrdev, "unable to map dst\n");
|
||||
return -ENOMEM;
|
||||
init_completion(done);
|
||||
err = caam_jr_enqueue(jrdev,
|
||||
caam_init_desc(desc, dst_dma),
|
||||
caam_rng_done, &jctx);
|
||||
if (err == -EINPROGRESS) {
|
||||
wait_for_completion(done);
|
||||
err = 0;
|
||||
}
|
||||
|
||||
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
|
||||
dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE);
|
||||
|
||||
print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
desc, desc_bytes(desc), 1);
|
||||
return err ?: (ret ?: len);
|
||||
}
|
||||
|
||||
return 0;
|
||||
static void caam_rng_fill_async(struct caam_rng_ctx *ctx)
|
||||
{
|
||||
struct scatterlist sg[1];
|
||||
struct completion done;
|
||||
int len, nents;
|
||||
|
||||
sg_init_table(sg, ARRAY_SIZE(sg));
|
||||
nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg),
|
||||
CAAM_RNG_MAX_FIFO_STORE_SIZE);
|
||||
if (!nents)
|
||||
return;
|
||||
|
||||
len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]),
|
||||
sg[0].length,
|
||||
ctx->desc_async,
|
||||
&done);
|
||||
if (len < 0)
|
||||
return;
|
||||
|
||||
kfifo_dma_in_finish(&ctx->fifo, len);
|
||||
}
|
||||
|
||||
static void caam_rng_worker(struct work_struct *work)
|
||||
{
|
||||
struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
|
||||
worker);
|
||||
caam_rng_fill_async(ctx);
|
||||
}
|
||||
|
||||
static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait)
|
||||
{
|
||||
struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
|
||||
int out;
|
||||
|
||||
if (wait) {
|
||||
struct completion done;
|
||||
|
||||
return caam_rng_read_one(ctx->jrdev, dst, max,
|
||||
ctx->desc_sync, &done);
|
||||
}
|
||||
|
||||
out = kfifo_out(&ctx->fifo, dst, max);
|
||||
if (kfifo_is_empty(&ctx->fifo))
|
||||
schedule_work(&ctx->worker);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static void caam_cleanup(struct hwrng *rng)
|
||||
{
|
||||
int i;
|
||||
struct buf_data *bd;
|
||||
struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
bd = &rng_ctx->bufs[i];
|
||||
if (atomic_read(&bd->empty) == BUF_PENDING)
|
||||
wait_for_completion(&bd->filled);
|
||||
}
|
||||
|
||||
rng_unmap_ctx(rng_ctx);
|
||||
flush_work(&ctx->worker);
|
||||
caam_jr_free(ctx->jrdev);
|
||||
kfifo_free(&ctx->fifo);
|
||||
}
|
||||
|
||||
static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
|
||||
static int caam_init(struct hwrng *rng)
|
||||
{
|
||||
struct buf_data *bd = &ctx->bufs[buf_id];
|
||||
struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
|
||||
int err;
|
||||
|
||||
err = rng_create_job_desc(ctx, buf_id);
|
||||
if (err)
|
||||
return err;
|
||||
ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
|
||||
GFP_DMA | GFP_KERNEL);
|
||||
if (!ctx->desc_sync)
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_set(&bd->empty, BUF_EMPTY);
|
||||
submit_job(ctx, buf_id == ctx->current_buf);
|
||||
wait_for_completion(&bd->filled);
|
||||
ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
|
||||
GFP_DMA | GFP_KERNEL);
|
||||
if (!ctx->desc_async)
|
||||
return -ENOMEM;
|
||||
|
||||
if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
|
||||
GFP_DMA | GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&ctx->worker, caam_rng_worker);
|
||||
|
||||
ctx->jrdev = caam_jr_alloc();
|
||||
err = PTR_ERR_OR_ZERO(ctx->jrdev);
|
||||
if (err) {
|
||||
kfifo_free(&ctx->fifo);
|
||||
pr_err("Job Ring Device allocation for transform failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill async buffer to have early randomness data for
|
||||
* hw_random
|
||||
*/
|
||||
caam_rng_fill_async(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
|
||||
int caam_rng_init(struct device *ctrldev);
|
||||
|
||||
void caam_rng_exit(struct device *ctrldev)
|
||||
{
|
||||
int err;
|
||||
|
||||
ctx->jrdev = jrdev;
|
||||
|
||||
err = rng_create_sh_desc(ctx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->current_buf = 0;
|
||||
ctx->cur_buf_idx = 0;
|
||||
|
||||
err = caam_init_buf(ctx, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return caam_init_buf(ctx, 1);
|
||||
}
|
||||
|
||||
static struct hwrng caam_rng = {
|
||||
.name = "rng-caam",
|
||||
.cleanup = caam_cleanup,
|
||||
.read = caam_read,
|
||||
};
|
||||
|
||||
void caam_rng_exit(void)
|
||||
{
|
||||
if (!init_done)
|
||||
return;
|
||||
|
||||
caam_jr_free(rng_ctx->jrdev);
|
||||
hwrng_unregister(&caam_rng);
|
||||
kfree(rng_ctx);
|
||||
devres_release_group(ctrldev, caam_rng_init);
|
||||
}
|
||||
|
||||
int caam_rng_init(struct device *ctrldev)
|
||||
{
|
||||
struct device *dev;
|
||||
struct caam_rng_ctx *ctx;
|
||||
u32 rng_inst;
|
||||
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||
int err;
|
||||
init_done = false;
|
||||
int ret;
|
||||
|
||||
/* Check for an instantiated RNG before registration */
|
||||
if (priv->era < 10)
|
||||
@ -328,31 +232,30 @@ int caam_rng_init(struct device *ctrldev)
|
||||
if (!rng_inst)
|
||||
return 0;
|
||||
|
||||
dev = caam_jr_alloc();
|
||||
if (IS_ERR(dev)) {
|
||||
pr_err("Job Ring Device allocation for transform failed\n");
|
||||
return PTR_ERR(dev);
|
||||
}
|
||||
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
|
||||
if (!rng_ctx) {
|
||||
err = -ENOMEM;
|
||||
goto free_caam_alloc;
|
||||
}
|
||||
err = caam_init_rng(rng_ctx, dev);
|
||||
if (err)
|
||||
goto free_rng_ctx;
|
||||
if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
dev_info(dev, "registering rng-caam\n");
|
||||
ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
err = hwrng_register(&caam_rng);
|
||||
if (!err) {
|
||||
init_done = true;
|
||||
return err;
|
||||
ctx->ctrldev = ctrldev;
|
||||
|
||||
ctx->rng.name = "rng-caam";
|
||||
ctx->rng.init = caam_init;
|
||||
ctx->rng.cleanup = caam_cleanup;
|
||||
ctx->rng.read = caam_read;
|
||||
ctx->rng.priv = (unsigned long)ctx;
|
||||
ctx->rng.quality = 1024;
|
||||
|
||||
dev_info(ctrldev, "registering rng-caam\n");
|
||||
|
||||
ret = devm_hwrng_register(ctrldev, &ctx->rng);
|
||||
if (ret) {
|
||||
caam_rng_exit(ctrldev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
free_rng_ctx:
|
||||
kfree(rng_ctx);
|
||||
free_caam_alloc:
|
||||
caam_jr_free(dev);
|
||||
return err;
|
||||
devres_close_group(ctrldev, caam_rng_init);
|
||||
return 0;
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/sys_soc.h>
|
||||
#include <linux/fsl/mc.h>
|
||||
|
||||
#include "compat.h"
|
||||
#include "regs.h"
|
||||
@ -36,7 +37,8 @@ static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
|
||||
init_job_desc(desc, 0);
|
||||
|
||||
op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
|
||||
(handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
|
||||
(handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT |
|
||||
OP_ALG_PR_ON;
|
||||
|
||||
/* INIT RNG in non-test mode */
|
||||
append_operation(desc, op_flags);
|
||||
@ -196,7 +198,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
|
||||
u32 *desc, status;
|
||||
int sh_idx, ret = 0;
|
||||
|
||||
desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
|
||||
desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA);
|
||||
if (!desc)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -273,17 +275,30 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
|
||||
int ret = 0, sh_idx;
|
||||
|
||||
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
|
||||
desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
|
||||
desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA);
|
||||
if (!desc)
|
||||
return -ENOMEM;
|
||||
|
||||
for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
|
||||
const u32 rdsta_if = RDSTA_IF0 << sh_idx;
|
||||
const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
|
||||
const u32 rdsta_mask = rdsta_if | rdsta_pr;
|
||||
/*
|
||||
* If the corresponding bit is set, this state handle
|
||||
* was initialized by somebody else, so it's left alone.
|
||||
*/
|
||||
if ((1 << sh_idx) & state_handle_mask)
|
||||
continue;
|
||||
if (rdsta_if & state_handle_mask) {
|
||||
if (rdsta_pr & state_handle_mask)
|
||||
continue;
|
||||
|
||||
dev_info(ctrldev,
|
||||
"RNG4 SH%d was previously instantiated without prediction resistance. Tearing it down\n",
|
||||
sh_idx);
|
||||
|
||||
ret = deinstantiate_rng(ctrldev, rdsta_if);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Create the descriptor for instantiating RNG State Handle */
|
||||
build_instantiation_desc(desc, sh_idx, gen_sk);
|
||||
@ -303,9 +318,9 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
|
||||
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
|
||||
if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
|
||||
!(rdsta_val & (1 << sh_idx))) {
|
||||
(rdsta_val & rdsta_mask) != rdsta_mask) {
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
@ -341,8 +356,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
|
||||
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
|
||||
r4tst = &ctrl->r4tst[0];
|
||||
|
||||
/* put RNG4 into program mode */
|
||||
clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
|
||||
/*
|
||||
* Setting both RTMCTL:PRGM and RTMCTL:TRNG_ACC causes TRNG to
|
||||
* properly invalidate the entropy in the entropy register and
|
||||
* force re-generation.
|
||||
*/
|
||||
clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM | RTMCTL_ACC);
|
||||
|
||||
/*
|
||||
* Performance-wise, it does not make sense to
|
||||
@ -372,7 +391,8 @@ start_rng:
|
||||
* select raw sampling in both entropy shifter
|
||||
* and statistical checker; ; put RNG4 into run mode
|
||||
*/
|
||||
clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
|
||||
clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM | RTMCTL_ACC,
|
||||
RTMCTL_SAMP_MODE_RAW_ES_SC);
|
||||
}
|
||||
|
||||
static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
|
||||
@ -559,6 +579,26 @@ static void caam_remove_debugfs(void *root)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FSL_MC_BUS
|
||||
static bool check_version(struct fsl_mc_version *mc_version, u32 major,
|
||||
u32 minor, u32 revision)
|
||||
{
|
||||
if (mc_version->major > major)
|
||||
return true;
|
||||
|
||||
if (mc_version->major == major) {
|
||||
if (mc_version->minor > minor)
|
||||
return true;
|
||||
|
||||
if (mc_version->minor == minor &&
|
||||
mc_version->revision > revision)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Probe routine for CAAM top (controller) level */
|
||||
static int caam_probe(struct platform_device *pdev)
|
||||
{
|
||||
@ -577,6 +617,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
u8 rng_vid;
|
||||
int pg_size;
|
||||
int BLOCK_OFFSET = 0;
|
||||
bool pr_support = false;
|
||||
|
||||
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
|
||||
if (!ctrlpriv)
|
||||
@ -662,6 +703,21 @@ static int caam_probe(struct platform_device *pdev)
|
||||
|
||||
/* Get the IRQ of the controller (for security violations only) */
|
||||
ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
|
||||
np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
|
||||
ctrlpriv->mc_en = !!np;
|
||||
of_node_put(np);
|
||||
|
||||
#ifdef CONFIG_FSL_MC_BUS
|
||||
if (ctrlpriv->mc_en) {
|
||||
struct fsl_mc_version *mc_version;
|
||||
|
||||
mc_version = fsl_mc_get_version();
|
||||
if (mc_version)
|
||||
pr_support = check_version(mc_version, 10, 20, 0);
|
||||
else
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
|
||||
@ -669,10 +725,6 @@ static int caam_probe(struct platform_device *pdev)
|
||||
* In case of SoCs with Management Complex, MC f/w performs
|
||||
* the configuration.
|
||||
*/
|
||||
np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
|
||||
ctrlpriv->mc_en = !!np;
|
||||
of_node_put(np);
|
||||
|
||||
if (!ctrlpriv->mc_en)
|
||||
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
|
||||
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
|
||||
@ -779,7 +831,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
* already instantiated, do RNG instantiation
|
||||
* In case of SoCs with Management Complex, RNG is managed by MC f/w.
|
||||
*/
|
||||
if (!ctrlpriv->mc_en && rng_vid >= 4) {
|
||||
if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
|
||||
ctrlpriv->rng4_sh_init =
|
||||
rd_reg32(&ctrl->r4tst[0].rdsta);
|
||||
/*
|
||||
@ -789,11 +841,11 @@ static int caam_probe(struct platform_device *pdev)
|
||||
* to regenerate these keys before the next POR.
|
||||
*/
|
||||
gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
|
||||
ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
|
||||
ctrlpriv->rng4_sh_init &= RDSTA_MASK;
|
||||
do {
|
||||
int inst_handles =
|
||||
rd_reg32(&ctrl->r4tst[0].rdsta) &
|
||||
RDSTA_IFMASK;
|
||||
RDSTA_MASK;
|
||||
/*
|
||||
* If either SH were instantiated by somebody else
|
||||
* (e.g. u-boot) then it is assumed that the entropy
|
||||
@ -833,7 +885,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
* Set handles init'ed by this module as the complement of the
|
||||
* already initialized ones
|
||||
*/
|
||||
ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
|
||||
ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
|
||||
|
||||
/* Enable RDB bit so that RNG works faster */
|
||||
clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
|
||||
|
@ -1254,6 +1254,8 @@
|
||||
#define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT)
|
||||
#define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT)
|
||||
|
||||
#define OP_ALG_PR_ON BIT(1)
|
||||
|
||||
#define OP_ALG_DIR_SHIFT 0
|
||||
#define OP_ALG_DIR_MASK 1
|
||||
#define OP_ALG_DECRYPT 0
|
||||
|
@ -11,6 +11,7 @@
|
||||
#define INTERN_H
|
||||
|
||||
#include "ctrl.h"
|
||||
#include <crypto/engine.h>
|
||||
|
||||
/* Currently comes from Kconfig param as a ^2 (driver-required) */
|
||||
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
|
||||
@ -46,6 +47,7 @@ struct caam_drv_private_jr {
|
||||
struct caam_job_ring __iomem *rregs; /* JobR's register space */
|
||||
struct tasklet_struct irqtask;
|
||||
int irq; /* One per queue */
|
||||
bool hwrng;
|
||||
|
||||
/* Number of scatterlist crypt transforms active on the JobR */
|
||||
atomic_t tfm_count ____cacheline_aligned;
|
||||
@ -60,6 +62,7 @@ struct caam_drv_private_jr {
|
||||
int out_ring_read_index; /* Output index "tail" */
|
||||
int tail; /* entinfo (s/w ring) tail index */
|
||||
void *outring; /* Base of output ring, DMA-safe */
|
||||
struct crypto_engine *engine;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -161,7 +164,7 @@ static inline void caam_pkc_exit(void)
|
||||
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
|
||||
|
||||
int caam_rng_init(struct device *dev);
|
||||
void caam_rng_exit(void);
|
||||
void caam_rng_exit(struct device *dev);
|
||||
|
||||
#else
|
||||
|
||||
@ -170,9 +173,7 @@ static inline int caam_rng_init(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void caam_rng_exit(void)
|
||||
{
|
||||
}
|
||||
static inline void caam_rng_exit(struct device *dev) {}
|
||||
|
||||
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
|
||||
|
||||
|
@ -27,7 +27,8 @@ static struct jr_driver_data driver_data;
|
||||
static DEFINE_MUTEX(algs_lock);
|
||||
static unsigned int active_devs;
|
||||
|
||||
static void register_algs(struct device *dev)
|
||||
static void register_algs(struct caam_drv_private_jr *jrpriv,
|
||||
struct device *dev)
|
||||
{
|
||||
mutex_lock(&algs_lock);
|
||||
|
||||
@ -37,7 +38,7 @@ static void register_algs(struct device *dev)
|
||||
caam_algapi_init(dev);
|
||||
caam_algapi_hash_init(dev);
|
||||
caam_pkc_init(dev);
|
||||
caam_rng_init(dev);
|
||||
jrpriv->hwrng = !caam_rng_init(dev);
|
||||
caam_qi_algapi_init(dev);
|
||||
|
||||
algs_unlock:
|
||||
@ -53,7 +54,6 @@ static void unregister_algs(void)
|
||||
|
||||
caam_qi_algapi_exit();
|
||||
|
||||
caam_rng_exit();
|
||||
caam_pkc_exit();
|
||||
caam_algapi_hash_exit();
|
||||
caam_algapi_exit();
|
||||
@ -62,6 +62,15 @@ algs_unlock:
|
||||
mutex_unlock(&algs_lock);
|
||||
}
|
||||
|
||||
static void caam_jr_crypto_engine_exit(void *data)
|
||||
{
|
||||
struct device *jrdev = data;
|
||||
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
|
||||
|
||||
/* Free the resources of crypto-engine */
|
||||
crypto_engine_exit(jrpriv->engine);
|
||||
}
|
||||
|
||||
static int caam_reset_hw_jr(struct device *dev)
|
||||
{
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
|
||||
@ -126,6 +135,9 @@ static int caam_jr_remove(struct platform_device *pdev)
|
||||
jrdev = &pdev->dev;
|
||||
jrpriv = dev_get_drvdata(jrdev);
|
||||
|
||||
if (jrpriv->hwrng)
|
||||
caam_rng_exit(jrdev->parent);
|
||||
|
||||
/*
|
||||
* Return EBUSY if job ring already allocated.
|
||||
*/
|
||||
@ -324,8 +336,8 @@ void caam_jr_free(struct device *rdev)
|
||||
EXPORT_SYMBOL(caam_jr_free);
|
||||
|
||||
/**
|
||||
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
|
||||
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
|
||||
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
|
||||
* if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
|
||||
* descriptor.
|
||||
* @dev: device of the job ring to be used. This device should have
|
||||
* been assigned prior by caam_jr_register().
|
||||
@ -377,7 +389,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
|
||||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
|
||||
spin_unlock_bh(&jrp->inplock);
|
||||
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
|
||||
return -EBUSY;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
head_entry = &jrp->entinfo[head];
|
||||
@ -414,7 +426,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
|
||||
|
||||
spin_unlock_bh(&jrp->inplock);
|
||||
|
||||
return 0;
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
EXPORT_SYMBOL(caam_jr_enqueue);
|
||||
|
||||
@ -505,7 +517,7 @@ static int caam_jr_probe(struct platform_device *pdev)
|
||||
int error;
|
||||
|
||||
jrdev = &pdev->dev;
|
||||
jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
|
||||
jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
|
||||
if (!jrpriv)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -538,6 +550,25 @@ static int caam_jr_probe(struct platform_device *pdev)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Initialize crypto engine */
|
||||
jrpriv->engine = crypto_engine_alloc_init(jrdev, false);
|
||||
if (!jrpriv->engine) {
|
||||
dev_err(jrdev, "Could not init crypto-engine\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit,
|
||||
jrdev);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Start crypto engine */
|
||||
error = crypto_engine_start(jrpriv->engine);
|
||||
if (error) {
|
||||
dev_err(jrdev, "Could not start crypto-engine\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Identify the interrupt */
|
||||
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
|
||||
if (!jrpriv->irq) {
|
||||
@ -562,7 +593,7 @@ static int caam_jr_probe(struct platform_device *pdev)
|
||||
|
||||
atomic_set(&jrpriv->tfm_count, 0);
|
||||
|
||||
register_algs(jrdev->parent);
|
||||
register_algs(jrpriv, jrdev->parent);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
|
||||
init_completion(&result.completion);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
||||
if (!ret) {
|
||||
if (ret == -EINPROGRESS) {
|
||||
/* in progress */
|
||||
wait_for_completion(&result.completion);
|
||||
ret = result.err;
|
||||
|
@ -4,7 +4,7 @@
|
||||
* Queue Interface backend functionality
|
||||
*
|
||||
* Copyright 2013-2016 Freescale Semiconductor, Inc.
|
||||
* Copyright 2016-2017, 2019 NXP
|
||||
* Copyright 2016-2017, 2019-2020 NXP
|
||||
*/
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
@ -124,8 +124,10 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
|
||||
|
||||
do {
|
||||
ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
|
||||
if (likely(!ret))
|
||||
if (likely(!ret)) {
|
||||
refcount_inc(&req->drv_ctx->refcnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ret != -EBUSY)
|
||||
break;
|
||||
@ -148,11 +150,6 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
|
||||
|
||||
fd = &msg->ern.fd;
|
||||
|
||||
if (qm_fd_get_format(fd) != qm_fd_compound) {
|
||||
dev_err(qidev, "Non-compound FD from CAAM\n");
|
||||
return;
|
||||
}
|
||||
|
||||
drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
|
||||
if (!drv_req) {
|
||||
dev_err(qidev,
|
||||
@ -160,6 +157,13 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
|
||||
return;
|
||||
}
|
||||
|
||||
refcount_dec(&drv_req->drv_ctx->refcnt);
|
||||
|
||||
if (qm_fd_get_format(fd) != qm_fd_compound) {
|
||||
dev_err(qidev, "Non-compound FD from CAAM\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
|
||||
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
|
||||
|
||||
@ -287,9 +291,10 @@ empty_fq:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int empty_caam_fq(struct qman_fq *fq)
|
||||
static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx)
|
||||
{
|
||||
int ret;
|
||||
int retries = 10;
|
||||
struct qm_mcr_queryfq_np np;
|
||||
|
||||
/* Wait till the older CAAM FQ get empty */
|
||||
@ -304,11 +309,18 @@ static int empty_caam_fq(struct qman_fq *fq)
|
||||
msleep(20);
|
||||
} while (1);
|
||||
|
||||
/*
|
||||
* Give extra time for pending jobs from this FQ in holding tanks
|
||||
* to get processed
|
||||
*/
|
||||
msleep(20);
|
||||
/* Wait until pending jobs from this FQ are processed by CAAM */
|
||||
do {
|
||||
if (refcount_read(&drv_ctx->refcnt) == 1)
|
||||
break;
|
||||
|
||||
msleep(20);
|
||||
} while (--retries);
|
||||
|
||||
if (!retries)
|
||||
dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n",
|
||||
refcount_read(&drv_ctx->refcnt), fq->fqid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -340,7 +352,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
|
||||
drv_ctx->req_fq = new_fq;
|
||||
|
||||
/* Empty and remove the older FQ */
|
||||
ret = empty_caam_fq(old_fq);
|
||||
ret = empty_caam_fq(old_fq, drv_ctx);
|
||||
if (ret) {
|
||||
dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
|
||||
|
||||
@ -453,6 +465,9 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* init reference counter used to track references to request FQ */
|
||||
refcount_set(&drv_ctx->refcnt, 1);
|
||||
|
||||
drv_ctx->qidev = qidev;
|
||||
return drv_ctx;
|
||||
}
|
||||
@ -571,6 +586,16 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
|
||||
return qman_cb_dqrr_stop;
|
||||
|
||||
fd = &dqrr->fd;
|
||||
|
||||
drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
|
||||
if (unlikely(!drv_req)) {
|
||||
dev_err(qidev,
|
||||
"Can't find original request for caam response\n");
|
||||
return qman_cb_dqrr_consume;
|
||||
}
|
||||
|
||||
refcount_dec(&drv_req->drv_ctx->refcnt);
|
||||
|
||||
status = be32_to_cpu(fd->status);
|
||||
if (unlikely(status)) {
|
||||
u32 ssrc = status & JRSTA_SSRC_MASK;
|
||||
@ -588,13 +613,6 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
|
||||
return qman_cb_dqrr_consume;
|
||||
}
|
||||
|
||||
drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
|
||||
if (unlikely(!drv_req)) {
|
||||
dev_err(qidev,
|
||||
"Can't find original request for caam response\n");
|
||||
return qman_cb_dqrr_consume;
|
||||
}
|
||||
|
||||
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
|
||||
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
* Public definitions for the CAAM/QI (Queue Interface) backend.
|
||||
*
|
||||
* Copyright 2013-2016 Freescale Semiconductor, Inc.
|
||||
* Copyright 2016-2017 NXP
|
||||
* Copyright 2016-2017, 2020 NXP
|
||||
*/
|
||||
|
||||
#ifndef __QI_H__
|
||||
@ -52,6 +52,7 @@ enum optype {
|
||||
* @context_a: shared descriptor dma address
|
||||
* @req_fq: to-CAAM request frame queue
|
||||
* @rsp_fq: from-CAAM response frame queue
|
||||
* @refcnt: reference counter incremented for each frame enqueued in to-CAAM FQ
|
||||
* @cpu: cpu on which to receive CAAM response
|
||||
* @op_type: operation type
|
||||
* @qidev: device pointer for CAAM/QI backend
|
||||
@ -62,6 +63,7 @@ struct caam_drv_ctx {
|
||||
dma_addr_t context_a;
|
||||
struct qman_fq *req_fq;
|
||||
struct qman_fq *rsp_fq;
|
||||
refcount_t refcnt;
|
||||
int cpu;
|
||||
enum optype op_type;
|
||||
struct device *qidev;
|
||||
|
@ -487,7 +487,8 @@ struct rngtst {
|
||||
|
||||
/* RNG4 TRNG test registers */
|
||||
struct rng4tst {
|
||||
#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
|
||||
#define RTMCTL_ACC BIT(5) /* TRNG access mode */
|
||||
#define RTMCTL_PRGM BIT(16) /* 1 -> program mode, 0 -> run mode */
|
||||
#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in
|
||||
both entropy shifter and
|
||||
statistical checker */
|
||||
@ -523,9 +524,11 @@ struct rng4tst {
|
||||
u32 rsvd1[40];
|
||||
#define RDSTA_SKVT 0x80000000
|
||||
#define RDSTA_SKVN 0x40000000
|
||||
#define RDSTA_PR0 BIT(4)
|
||||
#define RDSTA_PR1 BIT(5)
|
||||
#define RDSTA_IF0 0x00000001
|
||||
#define RDSTA_IF1 0x00000002
|
||||
#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
|
||||
#define RDSTA_MASK (RDSTA_PR1 | RDSTA_PR0 | RDSTA_IF1 | RDSTA_IF0)
|
||||
u32 rdsta;
|
||||
u32 rsvd2[15];
|
||||
};
|
||||
|
@ -71,7 +71,7 @@ struct ucode {
|
||||
char version[VERSION_LEN - 1];
|
||||
__be32 code_size;
|
||||
u8 raz[12];
|
||||
u64 code[0];
|
||||
u64 code[];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -215,6 +215,9 @@ void psp_dev_destroy(struct sp_device *sp)
|
||||
tee_dev_destroy(psp);
|
||||
|
||||
sp_free_psp_irq(sp, psp);
|
||||
|
||||
if (sp->clear_psp_master_device)
|
||||
sp->clear_psp_master_device(sp);
|
||||
}
|
||||
|
||||
void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
|
||||
|
@ -283,11 +283,11 @@ static int sev_get_platform_state(int *state, int *error)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
|
||||
static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
|
||||
{
|
||||
int state, rc;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
if (!writable)
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
@ -331,12 +331,12 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
|
||||
static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
|
||||
{
|
||||
struct sev_device *sev = psp_master->sev_data;
|
||||
int rc;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
if (!writable)
|
||||
return -EPERM;
|
||||
|
||||
if (sev->state == SEV_STATE_UNINIT) {
|
||||
@ -348,7 +348,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
|
||||
return __sev_do_cmd_locked(cmd, NULL, &argp->error);
|
||||
}
|
||||
|
||||
static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
|
||||
static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
|
||||
{
|
||||
struct sev_device *sev = psp_master->sev_data;
|
||||
struct sev_user_data_pek_csr input;
|
||||
@ -356,7 +356,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
|
||||
void *blob = NULL;
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
if (!writable)
|
||||
return -EPERM;
|
||||
|
||||
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
|
||||
@ -539,7 +539,7 @@ fw_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
|
||||
static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
|
||||
{
|
||||
struct sev_device *sev = psp_master->sev_data;
|
||||
struct sev_user_data_pek_cert_import input;
|
||||
@ -547,7 +547,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
|
||||
void *pek_blob, *oca_blob;
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
if (!writable)
|
||||
return -EPERM;
|
||||
|
||||
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
|
||||
@ -698,7 +698,7 @@ static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
|
||||
static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
|
||||
{
|
||||
struct sev_device *sev = psp_master->sev_data;
|
||||
struct sev_user_data_pdh_cert_export input;
|
||||
@ -708,7 +708,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
|
||||
|
||||
/* If platform is not in INIT state then transition it to INIT. */
|
||||
if (sev->state != SEV_STATE_INIT) {
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
if (!writable)
|
||||
return -EPERM;
|
||||
|
||||
ret = __sev_platform_init_locked(&argp->error);
|
||||
@ -801,6 +801,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct sev_issue_cmd input;
|
||||
int ret = -EFAULT;
|
||||
bool writable = file->f_mode & FMODE_WRITE;
|
||||
|
||||
if (!psp_master || !psp_master->sev_data)
|
||||
return -ENODEV;
|
||||
@ -819,25 +820,25 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
|
||||
switch (input.cmd) {
|
||||
|
||||
case SEV_FACTORY_RESET:
|
||||
ret = sev_ioctl_do_reset(&input);
|
||||
ret = sev_ioctl_do_reset(&input, writable);
|
||||
break;
|
||||
case SEV_PLATFORM_STATUS:
|
||||
ret = sev_ioctl_do_platform_status(&input);
|
||||
break;
|
||||
case SEV_PEK_GEN:
|
||||
ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
|
||||
ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable);
|
||||
break;
|
||||
case SEV_PDH_GEN:
|
||||
ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
|
||||
ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable);
|
||||
break;
|
||||
case SEV_PEK_CSR:
|
||||
ret = sev_ioctl_do_pek_csr(&input);
|
||||
ret = sev_ioctl_do_pek_csr(&input, writable);
|
||||
break;
|
||||
case SEV_PEK_CERT_IMPORT:
|
||||
ret = sev_ioctl_do_pek_import(&input);
|
||||
ret = sev_ioctl_do_pek_import(&input, writable);
|
||||
break;
|
||||
case SEV_PDH_CERT_EXPORT:
|
||||
ret = sev_ioctl_do_pdh_export(&input);
|
||||
ret = sev_ioctl_do_pdh_export(&input, writable);
|
||||
break;
|
||||
case SEV_GET_ID:
|
||||
pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
|
||||
@ -896,9 +897,9 @@ EXPORT_SYMBOL_GPL(sev_guest_df_flush);
|
||||
|
||||
static void sev_exit(struct kref *ref)
|
||||
{
|
||||
struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
|
||||
|
||||
misc_deregister(&misc_dev->misc);
|
||||
kfree(misc_dev);
|
||||
misc_dev = NULL;
|
||||
}
|
||||
|
||||
static int sev_misc_init(struct sev_device *sev)
|
||||
@ -916,7 +917,7 @@ static int sev_misc_init(struct sev_device *sev)
|
||||
if (!misc_dev) {
|
||||
struct miscdevice *misc;
|
||||
|
||||
misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
|
||||
misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
|
||||
if (!misc_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -90,6 +90,7 @@ struct sp_device {
|
||||
/* get and set master device */
|
||||
struct sp_device*(*get_psp_master_device)(void);
|
||||
void (*set_psp_master_device)(struct sp_device *);
|
||||
void (*clear_psp_master_device)(struct sp_device *);
|
||||
|
||||
bool irq_registered;
|
||||
bool use_tasklet;
|
||||
|
@ -146,6 +146,14 @@ static struct sp_device *psp_get_master(void)
|
||||
return sp_dev_master;
|
||||
}
|
||||
|
||||
static void psp_clear_master(struct sp_device *sp)
|
||||
{
|
||||
if (sp == sp_dev_master) {
|
||||
sp_dev_master = NULL;
|
||||
dev_dbg(sp->dev, "Cleared sp_dev_master\n");
|
||||
}
|
||||
}
|
||||
|
||||
static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct sp_device *sp;
|
||||
@ -206,6 +214,7 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pci_set_master(pdev);
|
||||
sp->set_psp_master_device = psp_set_master;
|
||||
sp->get_psp_master_device = psp_get_master;
|
||||
sp->clear_psp_master_device = psp_clear_master;
|
||||
|
||||
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
|
||||
if (ret) {
|
||||
|
@ -6,8 +6,9 @@
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/internal/des.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <crypto/internal/des.h>
|
||||
#include "cc_driver.h"
|
||||
#include "cc_buffer_mgr.h"
|
||||
#include "cc_aead.h"
|
||||
@ -26,7 +27,7 @@
|
||||
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
|
||||
|
||||
struct cc_aead_handle {
|
||||
cc_sram_addr_t sram_workspace_addr;
|
||||
u32 sram_workspace_addr;
|
||||
struct list_head aead_list;
|
||||
};
|
||||
|
||||
@ -60,11 +61,6 @@ struct cc_aead_ctx {
|
||||
enum drv_hash_mode auth_mode;
|
||||
};
|
||||
|
||||
static inline bool valid_assoclen(struct aead_request *req)
|
||||
{
|
||||
return ((req->assoclen == 16) || (req->assoclen == 20));
|
||||
}
|
||||
|
||||
static void cc_aead_exit(struct crypto_aead *tfm)
|
||||
{
|
||||
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
@ -417,7 +413,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
|
||||
dma_addr_t key_dma_addr = 0;
|
||||
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx->drvdata);
|
||||
u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
|
||||
u32 larval_addr;
|
||||
struct cc_crypto_req cc_req = {};
|
||||
unsigned int blocksize;
|
||||
unsigned int digestsize;
|
||||
@ -448,8 +444,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
|
||||
if (!key)
|
||||
return -ENOMEM;
|
||||
|
||||
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
|
||||
DMA_TO_DEVICE);
|
||||
key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, key_dma_addr)) {
|
||||
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
|
||||
key, keylen);
|
||||
@ -460,6 +455,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
|
||||
/* Load hash initial state */
|
||||
hw_desc_init(&desc[idx]);
|
||||
set_cipher_mode(&desc[idx], hashmode);
|
||||
larval_addr = cc_larval_digest_addr(ctx->drvdata,
|
||||
ctx->auth_mode);
|
||||
set_din_sram(&desc[idx], larval_addr, digestsize);
|
||||
set_flow_mode(&desc[idx], S_DIN_to_HASH);
|
||||
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
|
||||
@ -796,7 +793,7 @@ static void cc_proc_authen_desc(struct aead_request *areq,
|
||||
* assoc. + iv + data -compact in one table
|
||||
* if assoclen is ZERO only IV perform
|
||||
*/
|
||||
cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
|
||||
u32 mlli_addr = areq_ctx->assoc.sram_addr;
|
||||
u32 mlli_nents = areq_ctx->assoc.mlli_nents;
|
||||
|
||||
if (areq_ctx->is_single_pass) {
|
||||
@ -1170,7 +1167,7 @@ static void cc_mlli_to_sram(struct aead_request *req,
|
||||
req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
|
||||
!req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
|
||||
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
|
||||
(unsigned int)ctx->drvdata->mlli_sram_addr,
|
||||
ctx->drvdata->mlli_sram_addr,
|
||||
req_ctx->mlli_params.mlli_len);
|
||||
/* Copy MLLI table host-to-sram */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
@ -1222,7 +1219,7 @@ static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
|
||||
req_ctx->is_single_pass);
|
||||
|
||||
if (req_ctx->is_single_pass) {
|
||||
/**
|
||||
/*
|
||||
* Single-pass flow
|
||||
*/
|
||||
cc_set_hmac_desc(req, desc, seq_size);
|
||||
@ -1234,7 +1231,7 @@ static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Double-pass flow
|
||||
* Fallback for unsupported single-pass modes,
|
||||
* i.e. using assoc. data of non-word-multiple
|
||||
@ -1275,7 +1272,7 @@ cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
|
||||
req_ctx->is_single_pass);
|
||||
|
||||
if (req_ctx->is_single_pass) {
|
||||
/**
|
||||
/*
|
||||
* Single-pass flow
|
||||
*/
|
||||
cc_set_xcbc_desc(req, desc, seq_size);
|
||||
@ -1286,7 +1283,7 @@ cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Double-pass flow
|
||||
* Fallback for unsupported single-pass modes,
|
||||
* i.e. using assoc. data of non-word-multiple
|
||||
@ -1611,7 +1608,6 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
|
||||
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
|
||||
CCM_BLOCK_IV_SIZE);
|
||||
req->iv = areq_ctx->ctr_iv;
|
||||
areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
|
||||
}
|
||||
|
||||
static void cc_set_ghash_desc(struct aead_request *req,
|
||||
@ -1799,12 +1795,6 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
|
||||
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
|
||||
unsigned int cipher_flow_mode;
|
||||
|
||||
if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
|
||||
cipher_flow_mode = AES_and_HASH;
|
||||
} else { /* Encrypt */
|
||||
cipher_flow_mode = AES_to_HASH_and_DOUT;
|
||||
}
|
||||
|
||||
//in RFC4543 no data to encrypt. just copy data from src to dest.
|
||||
if (req_ctx->plaintext_authenticate_only) {
|
||||
cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
|
||||
@ -1816,6 +1806,12 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
|
||||
cipher_flow_mode = AES_and_HASH;
|
||||
} else { /* Encrypt */
|
||||
cipher_flow_mode = AES_to_HASH_and_DOUT;
|
||||
}
|
||||
|
||||
// for gcm and rfc4106.
|
||||
cc_set_ghash_desc(req, desc, seq_size);
|
||||
/* process(ghash) assoc data */
|
||||
@ -1870,8 +1866,7 @@ static int config_gcm_context(struct aead_request *req)
|
||||
*/
|
||||
__be64 temp64;
|
||||
|
||||
temp64 = cpu_to_be64((req_ctx->assoclen +
|
||||
GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
|
||||
temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8);
|
||||
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
|
||||
temp64 = 0;
|
||||
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
|
||||
@ -1891,7 +1886,6 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
|
||||
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
|
||||
GCM_BLOCK_RFC4_IV_SIZE);
|
||||
req->iv = areq_ctx->ctr_iv;
|
||||
areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
|
||||
}
|
||||
|
||||
static int cc_proc_aead(struct aead_request *req,
|
||||
@ -1921,8 +1915,8 @@ static int cc_proc_aead(struct aead_request *req,
|
||||
}
|
||||
|
||||
/* Setup request structure */
|
||||
cc_req.user_cb = (void *)cc_aead_complete;
|
||||
cc_req.user_arg = (void *)req;
|
||||
cc_req.user_cb = cc_aead_complete;
|
||||
cc_req.user_arg = req;
|
||||
|
||||
/* Setup request context */
|
||||
areq_ctx->gen_ctx.op_type = direct;
|
||||
@ -1989,7 +1983,6 @@ static int cc_proc_aead(struct aead_request *req,
|
||||
/* Load MLLI tables to SRAM if necessary */
|
||||
cc_mlli_to_sram(req, desc, &seq_len);
|
||||
|
||||
/*TODO: move seq len by reference */
|
||||
switch (ctx->auth_mode) {
|
||||
case DRV_HASH_SHA1:
|
||||
case DRV_HASH_SHA256:
|
||||
@ -2034,9 +2027,6 @@ static int cc_aead_encrypt(struct aead_request *req)
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->is_gcm4543 = false;
|
||||
|
||||
areq_ctx->plaintext_authenticate_only = false;
|
||||
|
||||
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
|
||||
if (rc != -EINPROGRESS && rc != -EBUSY)
|
||||
@ -2050,22 +2040,17 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
|
||||
/* Very similar to cc_aead_encrypt() above. */
|
||||
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx->drvdata);
|
||||
int rc = -EINVAL;
|
||||
int rc;
|
||||
|
||||
if (!valid_assoclen(req)) {
|
||||
dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
|
||||
rc = crypto_ipsec_check_assoclen(req->assoclen);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->is_gcm4543 = true;
|
||||
areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
|
||||
|
||||
cc_proc_rfc4309_ccm(req);
|
||||
|
||||
@ -2086,9 +2071,6 @@ static int cc_aead_decrypt(struct aead_request *req)
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->is_gcm4543 = false;
|
||||
|
||||
areq_ctx->plaintext_authenticate_only = false;
|
||||
|
||||
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
|
||||
if (rc != -EINPROGRESS && rc != -EBUSY)
|
||||
@ -2099,24 +2081,19 @@ static int cc_aead_decrypt(struct aead_request *req)
|
||||
|
||||
static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx->drvdata);
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc = -EINVAL;
|
||||
int rc;
|
||||
|
||||
if (!valid_assoclen(req)) {
|
||||
dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
|
||||
rc = crypto_ipsec_check_assoclen(req->assoclen);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
|
||||
|
||||
areq_ctx->is_gcm4543 = true;
|
||||
cc_proc_rfc4309_ccm(req);
|
||||
|
||||
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
|
||||
@ -2216,28 +2193,20 @@ static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
|
||||
|
||||
static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
|
||||
{
|
||||
/* Very similar to cc_aead_encrypt() above. */
|
||||
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx->drvdata);
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc = -EINVAL;
|
||||
int rc;
|
||||
|
||||
if (!valid_assoclen(req)) {
|
||||
dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
|
||||
rc = crypto_ipsec_check_assoclen(req->assoclen);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->plaintext_authenticate_only = false;
|
||||
areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
|
||||
|
||||
cc_proc_rfc4_gcm(req);
|
||||
areq_ctx->is_gcm4543 = true;
|
||||
|
||||
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
|
||||
if (rc != -EINPROGRESS && rc != -EBUSY)
|
||||
@ -2248,17 +2217,12 @@ out:
|
||||
|
||||
static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
|
||||
{
|
||||
/* Very similar to cc_aead_encrypt() above. */
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx->drvdata);
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc = -EINVAL;
|
||||
int rc;
|
||||
|
||||
if (!valid_assoclen(req)) {
|
||||
dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
|
||||
rc = crypto_ipsec_check_assoclen(req->assoclen);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
@ -2270,7 +2234,6 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
|
||||
cc_proc_rfc4_gcm(req);
|
||||
areq_ctx->is_gcm4543 = true;
|
||||
|
||||
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
|
||||
if (rc != -EINPROGRESS && rc != -EBUSY)
|
||||
@ -2281,28 +2244,20 @@ out:
|
||||
|
||||
static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
|
||||
{
|
||||
/* Very similar to cc_aead_decrypt() above. */
|
||||
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx->drvdata);
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc = -EINVAL;
|
||||
int rc;
|
||||
|
||||
if (!valid_assoclen(req)) {
|
||||
dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
|
||||
rc = crypto_ipsec_check_assoclen(req->assoclen);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->plaintext_authenticate_only = false;
|
||||
areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
|
||||
|
||||
cc_proc_rfc4_gcm(req);
|
||||
areq_ctx->is_gcm4543 = true;
|
||||
|
||||
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
|
||||
if (rc != -EINPROGRESS && rc != -EBUSY)
|
||||
@ -2313,17 +2268,12 @@ out:
|
||||
|
||||
static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
|
||||
{
|
||||
/* Very similar to cc_aead_decrypt() above. */
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx->drvdata);
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc = -EINVAL;
|
||||
int rc;
|
||||
|
||||
if (!valid_assoclen(req)) {
|
||||
dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
|
||||
rc = crypto_ipsec_check_assoclen(req->assoclen);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
@ -2335,7 +2285,6 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
|
||||
cc_proc_rfc4_gcm(req);
|
||||
areq_ctx->is_gcm4543 = true;
|
||||
|
||||
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
|
||||
if (rc != -EINPROGRESS && rc != -EBUSY)
|
||||
@ -2614,7 +2563,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
|
||||
struct cc_crypto_alg *t_alg;
|
||||
struct aead_alg *alg;
|
||||
|
||||
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
|
||||
t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
|
||||
if (!t_alg)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -2628,6 +2577,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
|
||||
|
||||
alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
alg->base.cra_blocksize = tmpl->blocksize;
|
||||
alg->init = cc_aead_init;
|
||||
alg->exit = cc_aead_exit;
|
||||
|
||||
@ -2643,19 +2593,12 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
|
||||
int cc_aead_free(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_crypto_alg *t_alg, *n;
|
||||
struct cc_aead_handle *aead_handle =
|
||||
(struct cc_aead_handle *)drvdata->aead_handle;
|
||||
struct cc_aead_handle *aead_handle = drvdata->aead_handle;
|
||||
|
||||
if (aead_handle) {
|
||||
/* Remove registered algs */
|
||||
list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
|
||||
entry) {
|
||||
crypto_unregister_aead(&t_alg->aead_alg);
|
||||
list_del(&t_alg->entry);
|
||||
kfree(t_alg);
|
||||
}
|
||||
kfree(aead_handle);
|
||||
drvdata->aead_handle = NULL;
|
||||
/* Remove registered algs */
|
||||
list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
|
||||
crypto_unregister_aead(&t_alg->aead_alg);
|
||||
list_del(&t_alg->entry);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2669,7 +2612,7 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
|
||||
int alg;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
|
||||
aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL);
|
||||
if (!aead_handle) {
|
||||
rc = -ENOMEM;
|
||||
goto fail0;
|
||||
@ -2682,7 +2625,6 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
|
||||
MAX_HMAC_DIGEST_SIZE);
|
||||
|
||||
if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
|
||||
dev_err(dev, "SRAM pool exhausted\n");
|
||||
rc = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
@ -2705,18 +2647,16 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
|
||||
if (rc) {
|
||||
dev_err(dev, "%s alg registration failed\n",
|
||||
t_alg->aead_alg.base.cra_driver_name);
|
||||
goto fail2;
|
||||
} else {
|
||||
list_add_tail(&t_alg->entry, &aead_handle->aead_list);
|
||||
dev_dbg(dev, "Registered %s\n",
|
||||
t_alg->aead_alg.base.cra_driver_name);
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
list_add_tail(&t_alg->entry, &aead_handle->aead_list);
|
||||
dev_dbg(dev, "Registered %s\n",
|
||||
t_alg->aead_alg.base.cra_driver_name);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail2:
|
||||
kfree(t_alg);
|
||||
fail1:
|
||||
cc_aead_free(drvdata);
|
||||
fail0:
|
||||
|
@ -66,7 +66,7 @@ struct aead_req_ctx {
|
||||
/* used to prevent cache coherence problem */
|
||||
u8 backup_mac[MAX_MAC_SIZE];
|
||||
u8 *backup_iv; /* store orig iv */
|
||||
u32 assoclen; /* internal assoclen */
|
||||
u32 assoclen; /* size of AAD buffer to authenticate */
|
||||
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
|
||||
/* buffer for internal ccm configurations */
|
||||
dma_addr_t ccm_iv0_dma_addr;
|
||||
@ -79,7 +79,6 @@ struct aead_req_ctx {
|
||||
dma_addr_t gcm_iv_inc2_dma_addr;
|
||||
dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
|
||||
dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
|
||||
bool is_gcm4543;
|
||||
|
||||
u8 *icv_virt_addr; /* Virt. address of ICV */
|
||||
struct async_gen_req_ctx gen_ctx;
|
||||
|
@ -13,16 +13,6 @@
|
||||
#include "cc_hash.h"
|
||||
#include "cc_aead.h"
|
||||
|
||||
enum dma_buffer_type {
|
||||
DMA_NULL_TYPE = -1,
|
||||
DMA_SGL_TYPE = 1,
|
||||
DMA_BUFF_TYPE = 2,
|
||||
};
|
||||
|
||||
struct buff_mgr_handle {
|
||||
struct dma_pool *mlli_buffs_pool;
|
||||
};
|
||||
|
||||
union buffer_array_entry {
|
||||
struct scatterlist *sgl;
|
||||
dma_addr_t buffer_dma;
|
||||
@ -34,7 +24,6 @@ struct buffer_array {
|
||||
unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
|
||||
int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
|
||||
int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
|
||||
enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
|
||||
bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
|
||||
u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
|
||||
};
|
||||
@ -64,11 +53,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
|
||||
enum cc_sg_cpy_direct dir)
|
||||
{
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
u32 skip = areq_ctx->assoclen + req->cryptlen;
|
||||
|
||||
if (areq_ctx->is_gcm4543)
|
||||
skip += crypto_aead_ivsize(tfm);
|
||||
u32 skip = req->assoclen + req->cryptlen;
|
||||
|
||||
cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
|
||||
(skip - areq_ctx->req_authsize), skip, dir);
|
||||
@ -77,9 +62,13 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
|
||||
/**
|
||||
* cc_get_sgl_nents() - Get scatterlist number of entries.
|
||||
*
|
||||
* @dev: Device object
|
||||
* @sg_list: SG list
|
||||
* @nbytes: [IN] Total SGL data bytes.
|
||||
* @lbytes: [OUT] Returns the amount of bytes at the last entry
|
||||
*
|
||||
* Return:
|
||||
* Number of entries in the scatterlist
|
||||
*/
|
||||
static unsigned int cc_get_sgl_nents(struct device *dev,
|
||||
struct scatterlist *sg_list,
|
||||
@ -87,6 +76,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
|
||||
{
|
||||
unsigned int nents = 0;
|
||||
|
||||
*lbytes = 0;
|
||||
|
||||
while (nbytes && sg_list) {
|
||||
nents++;
|
||||
/* get the number of bytes in the last entry */
|
||||
@ -95,6 +86,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
|
||||
nbytes : sg_list->length;
|
||||
sg_list = sg_next(sg_list);
|
||||
}
|
||||
|
||||
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
|
||||
return nents;
|
||||
}
|
||||
@ -103,11 +95,13 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
|
||||
* cc_copy_sg_portion() - Copy scatter list data,
|
||||
* from to_skip to end, to dest and vice versa
|
||||
*
|
||||
* @dest:
|
||||
* @sg:
|
||||
* @to_skip:
|
||||
* @end:
|
||||
* @direct:
|
||||
* @dev: Device object
|
||||
* @dest: Buffer to copy to/from
|
||||
* @sg: SG list
|
||||
* @to_skip: Number of bytes to skip before copying
|
||||
* @end: Offset of last byte to copy
|
||||
* @direct: Transfer direction (true == from SG list to buffer, false == from
|
||||
* buffer to SG list)
|
||||
*/
|
||||
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
|
||||
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
|
||||
@ -115,7 +109,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
|
||||
u32 nents;
|
||||
|
||||
nents = sg_nents_for_len(sg, end);
|
||||
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
|
||||
sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
|
||||
(direct == CC_SG_TO_BUF));
|
||||
}
|
||||
|
||||
@ -204,21 +198,15 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
|
||||
goto build_mlli_exit;
|
||||
}
|
||||
/* Point to start of MLLI */
|
||||
mlli_p = (u32 *)mlli_params->mlli_virt_addr;
|
||||
mlli_p = mlli_params->mlli_virt_addr;
|
||||
/* go over all SG's and link it to one MLLI table */
|
||||
for (i = 0; i < sg_data->num_of_buffers; i++) {
|
||||
union buffer_array_entry *entry = &sg_data->entry[i];
|
||||
u32 tot_len = sg_data->total_data_len[i];
|
||||
u32 offset = sg_data->offset[i];
|
||||
|
||||
if (sg_data->type[i] == DMA_SGL_TYPE)
|
||||
rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
|
||||
offset, &total_nents,
|
||||
&mlli_p);
|
||||
else /*DMA_BUFF_TYPE*/
|
||||
rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
|
||||
tot_len, &total_nents,
|
||||
&mlli_p);
|
||||
rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
|
||||
&total_nents, &mlli_p);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -244,27 +232,6 @@ build_mlli_exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void cc_add_buffer_entry(struct device *dev,
|
||||
struct buffer_array *sgl_data,
|
||||
dma_addr_t buffer_dma, unsigned int buffer_len,
|
||||
bool is_last_entry, u32 *mlli_nents)
|
||||
{
|
||||
unsigned int index = sgl_data->num_of_buffers;
|
||||
|
||||
dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
|
||||
index, &buffer_dma, buffer_len, is_last_entry);
|
||||
sgl_data->nents[index] = 1;
|
||||
sgl_data->entry[index].buffer_dma = buffer_dma;
|
||||
sgl_data->offset[index] = 0;
|
||||
sgl_data->total_data_len[index] = buffer_len;
|
||||
sgl_data->type[index] = DMA_BUFF_TYPE;
|
||||
sgl_data->is_last[index] = is_last_entry;
|
||||
sgl_data->mlli_nents[index] = mlli_nents;
|
||||
if (sgl_data->mlli_nents[index])
|
||||
*sgl_data->mlli_nents[index] = 0;
|
||||
sgl_data->num_of_buffers++;
|
||||
}
|
||||
|
||||
static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
|
||||
unsigned int nents, struct scatterlist *sgl,
|
||||
unsigned int data_len, unsigned int data_offset,
|
||||
@ -278,7 +245,6 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
|
||||
sgl_data->entry[index].sgl = sgl;
|
||||
sgl_data->offset[index] = data_offset;
|
||||
sgl_data->total_data_len[index] = data_len;
|
||||
sgl_data->type[index] = DMA_SGL_TYPE;
|
||||
sgl_data->is_last[index] = is_last_table;
|
||||
sgl_data->mlli_nents[index] = mlli_nents;
|
||||
if (sgl_data->mlli_nents[index])
|
||||
@ -290,37 +256,25 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
unsigned int nbytes, int direction, u32 *nents,
|
||||
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
|
||||
{
|
||||
if (sg_is_last(sg)) {
|
||||
/* One entry only case -set to DLLI */
|
||||
if (dma_map_sg(dev, sg, 1, direction) != 1) {
|
||||
dev_err(dev, "dma_map_sg() single buffer failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
|
||||
&sg_dma_address(sg), sg_page(sg), sg_virt(sg),
|
||||
sg->offset, sg->length);
|
||||
*lbytes = nbytes;
|
||||
*nents = 1;
|
||||
*mapped_nents = 1;
|
||||
} else { /*sg_is_last*/
|
||||
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
|
||||
if (*nents > max_sg_nents) {
|
||||
*nents = 0;
|
||||
dev_err(dev, "Too many fragments. current %d max %d\n",
|
||||
*nents, max_sg_nents);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* In case of mmu the number of mapped nents might
|
||||
* be changed from the original sgl nents
|
||||
*/
|
||||
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
|
||||
if (*mapped_nents == 0) {
|
||||
*nents = 0;
|
||||
dev_err(dev, "dma_map_sg() sg buffer failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
int ret = 0;
|
||||
|
||||
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
|
||||
if (*nents > max_sg_nents) {
|
||||
*nents = 0;
|
||||
dev_err(dev, "Too many fragments. current %d max %d\n",
|
||||
*nents, max_sg_nents);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = dma_map_sg(dev, sg, *nents, direction);
|
||||
if (dma_mapping_error(dev, ret)) {
|
||||
*nents = 0;
|
||||
dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
*mapped_nents = ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -411,7 +365,6 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
|
||||
{
|
||||
struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
|
||||
struct mlli_params *mlli_params = &req_ctx->mlli_params;
|
||||
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
struct buffer_array sg_data;
|
||||
u32 dummy = 0;
|
||||
@ -424,10 +377,9 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
|
||||
|
||||
/* Map IV buffer */
|
||||
if (ivsize) {
|
||||
dump_byte_array("iv", (u8 *)info, ivsize);
|
||||
dump_byte_array("iv", info, ivsize);
|
||||
req_ctx->gen_ctx.iv_dma_addr =
|
||||
dma_map_single(dev, (void *)info,
|
||||
ivsize, DMA_BIDIRECTIONAL);
|
||||
dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
|
||||
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
|
||||
ivsize, info);
|
||||
@ -476,7 +428,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
|
||||
}
|
||||
|
||||
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
|
||||
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
|
||||
mlli_params->curr_pool = drvdata->mlli_buffs_pool;
|
||||
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
|
||||
if (rc)
|
||||
goto cipher_exit;
|
||||
@ -555,11 +507,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
|
||||
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
|
||||
areq_ctx->assoclen, req->cryptlen);
|
||||
|
||||
dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
|
||||
dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (req->src != req->dst) {
|
||||
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
|
||||
sg_virt(req->dst));
|
||||
dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
|
||||
dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
}
|
||||
if (drvdata->coherent &&
|
||||
@ -614,18 +567,6 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
|
||||
|
||||
dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
|
||||
hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
|
||||
// TODO: what about CTR?? ask Ron
|
||||
if (do_chain && areq_ctx->plaintext_authenticate_only) {
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
|
||||
unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
|
||||
/* Chain to given list */
|
||||
cc_add_buffer_entry(dev, sg_data,
|
||||
(areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
|
||||
iv_size_to_authenc, is_last,
|
||||
&areq_ctx->assoc.mlli_nents);
|
||||
areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
|
||||
}
|
||||
|
||||
chain_iv_exit:
|
||||
return rc;
|
||||
@ -639,13 +580,8 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc = 0;
|
||||
int mapped_nents = 0;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
unsigned int size_of_assoc = areq_ctx->assoclen;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
if (areq_ctx->is_gcm4543)
|
||||
size_of_assoc += crypto_aead_ivsize(tfm);
|
||||
|
||||
if (!sg_data) {
|
||||
rc = -EINVAL;
|
||||
goto chain_assoc_exit;
|
||||
@ -661,7 +597,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
|
||||
goto chain_assoc_exit;
|
||||
}
|
||||
|
||||
mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
|
||||
mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
|
||||
if (mapped_nents < 0)
|
||||
return mapped_nents;
|
||||
|
||||
@ -854,16 +790,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
|
||||
u32 offset = 0;
|
||||
/* non-inplace mode */
|
||||
unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
unsigned int size_for_map = req->assoclen + req->cryptlen;
|
||||
u32 sg_index = 0;
|
||||
bool is_gcm4543 = areq_ctx->is_gcm4543;
|
||||
u32 size_to_skip = areq_ctx->assoclen;
|
||||
u32 size_to_skip = req->assoclen;
|
||||
struct scatterlist *sgl;
|
||||
|
||||
if (is_gcm4543)
|
||||
size_to_skip += crypto_aead_ivsize(tfm);
|
||||
|
||||
offset = size_to_skip;
|
||||
|
||||
if (!sg_data)
|
||||
@ -872,16 +803,13 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||
areq_ctx->src_sgl = req->src;
|
||||
areq_ctx->dst_sgl = req->dst;
|
||||
|
||||
if (is_gcm4543)
|
||||
size_for_map += crypto_aead_ivsize(tfm);
|
||||
|
||||
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
|
||||
authsize : 0;
|
||||
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
|
||||
&src_last_bytes);
|
||||
sg_index = areq_ctx->src_sgl->length;
|
||||
//check where the data starts
|
||||
while (sg_index <= size_to_skip) {
|
||||
while (src_mapped_nents && (sg_index <= size_to_skip)) {
|
||||
src_mapped_nents--;
|
||||
offset -= areq_ctx->src_sgl->length;
|
||||
sgl = sg_next(areq_ctx->src_sgl);
|
||||
@ -901,14 +829,15 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||
areq_ctx->src_offset = offset;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
size_for_map = areq_ctx->assoclen + req->cryptlen;
|
||||
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
|
||||
authsize : 0;
|
||||
if (is_gcm4543)
|
||||
size_for_map += crypto_aead_ivsize(tfm);
|
||||
size_for_map = req->assoclen + req->cryptlen;
|
||||
|
||||
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
|
||||
size_for_map += authsize;
|
||||
else
|
||||
size_for_map -= authsize;
|
||||
|
||||
rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
|
||||
&areq_ctx->dst.nents,
|
||||
&areq_ctx->dst.mapped_nents,
|
||||
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
|
||||
&dst_mapped_nents);
|
||||
if (rc)
|
||||
@ -921,7 +850,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||
offset = size_to_skip;
|
||||
|
||||
//check where the data starts
|
||||
while (sg_index <= size_to_skip) {
|
||||
while (dst_mapped_nents && sg_index <= size_to_skip) {
|
||||
dst_mapped_nents--;
|
||||
offset -= areq_ctx->dst_sgl->length;
|
||||
sgl = sg_next(areq_ctx->dst_sgl);
|
||||
@ -1012,14 +941,11 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
struct buffer_array sg_data;
|
||||
unsigned int authsize = areq_ctx->req_authsize;
|
||||
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
|
||||
int rc = 0;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
bool is_gcm4543 = areq_ctx->is_gcm4543;
|
||||
dma_addr_t dma_addr;
|
||||
u32 mapped_nents = 0;
|
||||
u32 dummy = 0; /*used for the assoc data fragments */
|
||||
u32 size_to_map = 0;
|
||||
u32 size_to_map;
|
||||
gfp_t flags = cc_gfp_flags(&req->base);
|
||||
|
||||
mlli_params->curr_pool = NULL;
|
||||
@ -1116,14 +1042,15 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
|
||||
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
|
||||
}
|
||||
|
||||
size_to_map = req->cryptlen + areq_ctx->assoclen;
|
||||
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
|
||||
size_to_map = req->cryptlen + req->assoclen;
|
||||
/* If we do in-place encryption, we also need the auth tag */
|
||||
if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
|
||||
(req->src == req->dst)) {
|
||||
size_to_map += authsize;
|
||||
}
|
||||
|
||||
if (is_gcm4543)
|
||||
size_to_map += crypto_aead_ivsize(tfm);
|
||||
rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
|
||||
&areq_ctx->src.nents,
|
||||
&areq_ctx->src.mapped_nents,
|
||||
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
|
||||
LLI_MAX_NUM_OF_DATA_ENTRIES),
|
||||
&dummy, &mapped_nents);
|
||||
@ -1183,7 +1110,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
|
||||
*/
|
||||
if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
|
||||
areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
|
||||
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
|
||||
mlli_params->curr_pool = drvdata->mlli_buffs_pool;
|
||||
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
|
||||
if (rc)
|
||||
goto aead_map_failure;
|
||||
@ -1211,7 +1138,6 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
|
||||
u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
|
||||
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
|
||||
struct buffer_array sg_data;
|
||||
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
|
||||
int rc = 0;
|
||||
u32 dummy = 0;
|
||||
u32 mapped_nents = 0;
|
||||
@ -1229,7 +1155,6 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*TODO: copy data in case that buffer is enough for operation */
|
||||
/* map the previous buffer */
|
||||
if (*curr_buff_cnt) {
|
||||
rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
|
||||
@ -1258,7 +1183,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
|
||||
|
||||
/*build mlli */
|
||||
if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
|
||||
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
|
||||
mlli_params->curr_pool = drvdata->mlli_buffs_pool;
|
||||
/* add the src data to the sg_data */
|
||||
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
|
||||
0, true, &areq_ctx->mlli_nents);
|
||||
@ -1296,7 +1221,6 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
|
||||
unsigned int update_data_len;
|
||||
u32 total_in_len = nbytes + *curr_buff_cnt;
|
||||
struct buffer_array sg_data;
|
||||
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
|
||||
unsigned int swap_index = 0;
|
||||
int rc = 0;
|
||||
u32 dummy = 0;
|
||||
@ -1371,7 +1295,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
|
||||
}
|
||||
|
||||
if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
|
||||
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
|
||||
mlli_params->curr_pool = drvdata->mlli_buffs_pool;
|
||||
/* add the src data to the sg_data */
|
||||
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
|
||||
(update_data_len - *curr_buff_cnt), 0, true,
|
||||
@ -1438,39 +1362,22 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
|
||||
|
||||
int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct buff_mgr_handle *buff_mgr_handle;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
|
||||
if (!buff_mgr_handle)
|
||||
return -ENOMEM;
|
||||
|
||||
drvdata->buff_mgr_handle = buff_mgr_handle;
|
||||
|
||||
buff_mgr_handle->mlli_buffs_pool =
|
||||
drvdata->mlli_buffs_pool =
|
||||
dma_pool_create("dx_single_mlli_tables", dev,
|
||||
MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
|
||||
LLI_ENTRY_BYTE_SIZE,
|
||||
MLLI_TABLE_MIN_ALIGNMENT, 0);
|
||||
|
||||
if (!buff_mgr_handle->mlli_buffs_pool)
|
||||
goto error;
|
||||
if (!drvdata->mlli_buffs_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
cc_buffer_mgr_fini(drvdata);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
|
||||
|
||||
if (buff_mgr_handle) {
|
||||
dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
|
||||
kfree(drvdata->buff_mgr_handle);
|
||||
drvdata->buff_mgr_handle = NULL;
|
||||
}
|
||||
dma_pool_destroy(drvdata->mlli_buffs_pool);
|
||||
return 0;
|
||||
}
|
||||
|
@ -24,14 +24,15 @@ enum cc_sg_cpy_direct {
|
||||
};
|
||||
|
||||
struct cc_mlli {
|
||||
cc_sram_addr_t sram_addr;
|
||||
u32 sram_addr;
|
||||
unsigned int mapped_nents;
|
||||
unsigned int nents; //sg nents
|
||||
unsigned int mlli_nents; //mlli nents might be different than the above
|
||||
};
|
||||
|
||||
struct mlli_params {
|
||||
struct dma_pool *curr_pool;
|
||||
u8 *mlli_virt_addr;
|
||||
void *mlli_virt_addr;
|
||||
dma_addr_t mlli_dma_addr;
|
||||
u32 mlli_len;
|
||||
};
|
||||
|
@ -20,10 +20,6 @@
|
||||
|
||||
#define template_skcipher template_u.skcipher
|
||||
|
||||
struct cc_cipher_handle {
|
||||
struct list_head alg_list;
|
||||
};
|
||||
|
||||
struct cc_user_key_info {
|
||||
u8 *key;
|
||||
dma_addr_t key_dma_addr;
|
||||
@ -184,7 +180,7 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
|
||||
ctx_p->user.key);
|
||||
|
||||
/* Map key buffer */
|
||||
ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
|
||||
ctx_p->user.key_dma_addr = dma_map_single(dev, ctx_p->user.key,
|
||||
max_key_buf_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
|
||||
@ -284,7 +280,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
|
||||
|
||||
dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n",
|
||||
ctx_p, crypto_tfm_alg_name(tfm), keylen);
|
||||
dump_byte_array("key", (u8 *)key, keylen);
|
||||
dump_byte_array("key", key, keylen);
|
||||
|
||||
/* STAT_PHASE_0: Init and sanity checks */
|
||||
|
||||
@ -387,7 +383,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
|
||||
|
||||
dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
|
||||
ctx_p, crypto_tfm_alg_name(tfm), keylen);
|
||||
dump_byte_array("key", (u8 *)key, keylen);
|
||||
dump_byte_array("key", key, keylen);
|
||||
|
||||
/* STAT_PHASE_0: Init and sanity checks */
|
||||
|
||||
@ -533,14 +529,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
|
||||
int flow_mode = ctx_p->flow_mode;
|
||||
int direction = req_ctx->gen_ctx.op_type;
|
||||
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
|
||||
unsigned int du_size = nbytes;
|
||||
|
||||
struct cc_crypto_alg *cc_alg =
|
||||
container_of(tfm->__crt_alg, struct cc_crypto_alg,
|
||||
skcipher_alg.base);
|
||||
|
||||
if (cc_alg->data_unit)
|
||||
du_size = cc_alg->data_unit;
|
||||
|
||||
switch (cipher_mode) {
|
||||
case DRV_CIPHER_ECB:
|
||||
@ -753,7 +741,7 @@ static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
|
||||
dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
|
||||
&req_ctx->mlli_params.mlli_dma_addr,
|
||||
req_ctx->mlli_params.mlli_len,
|
||||
(unsigned int)ctx_p->drvdata->mlli_sram_addr);
|
||||
ctx_p->drvdata->mlli_sram_addr);
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_din_type(&desc[*seq_size], DMA_DLLI,
|
||||
req_ctx->mlli_params.mlli_dma_addr,
|
||||
@ -801,16 +789,16 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
|
||||
req_ctx->in_mlli_nents, NS_BIT);
|
||||
if (req_ctx->out_nents == 0) {
|
||||
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
|
||||
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
|
||||
(unsigned int)ctx_p->drvdata->mlli_sram_addr);
|
||||
ctx_p->drvdata->mlli_sram_addr,
|
||||
ctx_p->drvdata->mlli_sram_addr);
|
||||
set_dout_mlli(&desc[*seq_size],
|
||||
ctx_p->drvdata->mlli_sram_addr,
|
||||
req_ctx->in_mlli_nents, NS_BIT,
|
||||
(!last_desc ? 0 : 1));
|
||||
} else {
|
||||
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
|
||||
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
|
||||
(unsigned int)ctx_p->drvdata->mlli_sram_addr +
|
||||
ctx_p->drvdata->mlli_sram_addr,
|
||||
ctx_p->drvdata->mlli_sram_addr +
|
||||
(u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
|
||||
set_dout_mlli(&desc[*seq_size],
|
||||
(ctx_p->drvdata->mlli_sram_addr +
|
||||
@ -871,7 +859,6 @@ static int cc_cipher_process(struct skcipher_request *req,
|
||||
|
||||
/* STAT_PHASE_0: Init and sanity checks */
|
||||
|
||||
/* TODO: check data length according to mode */
|
||||
if (validate_data_size(ctx_p, nbytes)) {
|
||||
dev_dbg(dev, "Unsupported data size %d.\n", nbytes);
|
||||
rc = -EINVAL;
|
||||
@ -893,8 +880,8 @@ static int cc_cipher_process(struct skcipher_request *req,
|
||||
}
|
||||
|
||||
/* Setup request structure */
|
||||
cc_req.user_cb = (void *)cc_cipher_complete;
|
||||
cc_req.user_arg = (void *)req;
|
||||
cc_req.user_cb = cc_cipher_complete;
|
||||
cc_req.user_arg = req;
|
||||
|
||||
/* Setup CPP operation details */
|
||||
if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
|
||||
@ -1228,6 +1215,10 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
/* See https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg40576.html
|
||||
* for the reason why this differs from the generic
|
||||
* implementation.
|
||||
*/
|
||||
.name = "xts(aes)",
|
||||
.driver_name = "xts-aes-ccree",
|
||||
.blocksize = 1,
|
||||
@ -1423,7 +1414,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
{
|
||||
.name = "ofb(aes)",
|
||||
.driver_name = "ofb-aes-ccree",
|
||||
.blocksize = AES_BLOCK_SIZE,
|
||||
.blocksize = 1,
|
||||
.template_skcipher = {
|
||||
.setkey = cc_cipher_setkey,
|
||||
.encrypt = cc_cipher_encrypt,
|
||||
@ -1576,7 +1567,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
{
|
||||
.name = "ctr(sm4)",
|
||||
.driver_name = "ctr-sm4-ccree",
|
||||
.blocksize = SM4_BLOCK_SIZE,
|
||||
.blocksize = 1,
|
||||
.template_skcipher = {
|
||||
.setkey = cc_cipher_setkey,
|
||||
.encrypt = cc_cipher_encrypt,
|
||||
@ -1634,7 +1625,7 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
|
||||
struct cc_crypto_alg *t_alg;
|
||||
struct skcipher_alg *alg;
|
||||
|
||||
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
|
||||
t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
|
||||
if (!t_alg)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -1665,36 +1656,23 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
|
||||
int cc_cipher_free(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_crypto_alg *t_alg, *n;
|
||||
struct cc_cipher_handle *cipher_handle = drvdata->cipher_handle;
|
||||
|
||||
if (cipher_handle) {
|
||||
/* Remove registered algs */
|
||||
list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list,
|
||||
entry) {
|
||||
crypto_unregister_skcipher(&t_alg->skcipher_alg);
|
||||
list_del(&t_alg->entry);
|
||||
kfree(t_alg);
|
||||
}
|
||||
kfree(cipher_handle);
|
||||
drvdata->cipher_handle = NULL;
|
||||
/* Remove registered algs */
|
||||
list_for_each_entry_safe(t_alg, n, &drvdata->alg_list, entry) {
|
||||
crypto_unregister_skcipher(&t_alg->skcipher_alg);
|
||||
list_del(&t_alg->entry);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cc_cipher_alloc(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_cipher_handle *cipher_handle;
|
||||
struct cc_crypto_alg *t_alg;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
int rc = -ENOMEM;
|
||||
int alg;
|
||||
|
||||
cipher_handle = kmalloc(sizeof(*cipher_handle), GFP_KERNEL);
|
||||
if (!cipher_handle)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&cipher_handle->alg_list);
|
||||
drvdata->cipher_handle = cipher_handle;
|
||||
INIT_LIST_HEAD(&drvdata->alg_list);
|
||||
|
||||
/* Linux crypto */
|
||||
dev_dbg(dev, "Number of algorithms = %zu\n",
|
||||
@ -1723,14 +1701,12 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
|
||||
if (rc) {
|
||||
dev_err(dev, "%s alg registration failed\n",
|
||||
t_alg->skcipher_alg.base.cra_driver_name);
|
||||
kfree(t_alg);
|
||||
goto fail0;
|
||||
} else {
|
||||
list_add_tail(&t_alg->entry,
|
||||
&cipher_handle->alg_list);
|
||||
dev_dbg(dev, "Registered %s\n",
|
||||
t_alg->skcipher_alg.base.cra_driver_name);
|
||||
}
|
||||
|
||||
list_add_tail(&t_alg->entry, &drvdata->alg_list);
|
||||
dev_dbg(dev, "Registered %s\n",
|
||||
t_alg->skcipher_alg.base.cra_driver_name);
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
@ -8,10 +8,6 @@
|
||||
#include "cc_crypto_ctx.h"
|
||||
#include "cc_debugfs.h"
|
||||
|
||||
struct cc_debugfs_ctx {
|
||||
struct dentry *dir;
|
||||
};
|
||||
|
||||
#define CC_DEBUG_REG(_X) { \
|
||||
.name = __stringify(_X),\
|
||||
.offset = CC_REG(_X) \
|
||||
@ -67,13 +63,8 @@ void __exit cc_debugfs_global_fini(void)
|
||||
int cc_debugfs_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
struct cc_debugfs_ctx *ctx;
|
||||
struct debugfs_regset32 *regset, *verset;
|
||||
|
||||
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
|
||||
if (!regset)
|
||||
return -ENOMEM;
|
||||
@ -81,16 +72,18 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
|
||||
regset->regs = debug_regs;
|
||||
regset->nregs = ARRAY_SIZE(debug_regs);
|
||||
regset->base = drvdata->cc_base;
|
||||
regset->dev = dev;
|
||||
|
||||
ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
|
||||
drvdata->dir = debugfs_create_dir(drvdata->plat_dev->name,
|
||||
cc_debugfs_dir);
|
||||
|
||||
debugfs_create_regset32("regs", 0400, ctx->dir, regset);
|
||||
debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
|
||||
debugfs_create_regset32("regs", 0400, drvdata->dir, regset);
|
||||
debugfs_create_bool("coherent", 0400, drvdata->dir, &drvdata->coherent);
|
||||
|
||||
verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
|
||||
/* Failing here is not important enough to fail the module load */
|
||||
if (!verset)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
if (drvdata->hw_rev <= CC_HW_REV_712) {
|
||||
ver_sig_regs[0].offset = drvdata->sig_offset;
|
||||
@ -102,17 +95,13 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
|
||||
verset->nregs = ARRAY_SIZE(pid_cid_regs);
|
||||
}
|
||||
verset->base = drvdata->cc_base;
|
||||
verset->dev = dev;
|
||||
|
||||
debugfs_create_regset32("version", 0400, ctx->dir, verset);
|
||||
|
||||
out:
|
||||
drvdata->debugfs = ctx;
|
||||
debugfs_create_regset32("version", 0400, drvdata->dir, verset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cc_debugfs_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
|
||||
|
||||
debugfs_remove_recursive(ctx->dir);
|
||||
debugfs_remove_recursive(drvdata->dir);
|
||||
}
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "cc_driver.h"
|
||||
#include "cc_request_mgr.h"
|
||||
@ -134,7 +136,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
|
||||
|
||||
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
|
||||
/* if driver suspended return, probably shared interrupt */
|
||||
if (cc_pm_is_dev_suspended(dev))
|
||||
if (pm_runtime_suspended(dev))
|
||||
return IRQ_NONE;
|
||||
|
||||
/* read the interrupt status */
|
||||
@ -269,7 +271,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
u32 val, hw_rev_pidr, sig_cidr;
|
||||
u64 dma_mask;
|
||||
const struct cc_hw_data *hw_rev;
|
||||
const struct of_device_id *dev_id;
|
||||
struct clk *clk;
|
||||
int irq;
|
||||
int rc = 0;
|
||||
@ -278,11 +279,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
if (!new_drvdata)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_id = of_match_node(arm_ccree_dev_of_match, np);
|
||||
if (!dev_id)
|
||||
return -ENODEV;
|
||||
|
||||
hw_rev = (struct cc_hw_data *)dev_id->data;
|
||||
hw_rev = of_device_get_match_data(dev);
|
||||
new_drvdata->hw_rev_name = hw_rev->name;
|
||||
new_drvdata->hw_rev = hw_rev->rev;
|
||||
new_drvdata->std_bodies = hw_rev->std_bodies;
|
||||
@ -302,22 +299,12 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
platform_set_drvdata(plat_dev, new_drvdata);
|
||||
new_drvdata->plat_dev = plat_dev;
|
||||
|
||||
clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(clk))
|
||||
switch (PTR_ERR(clk)) {
|
||||
/* Clock is optional so this might be fine */
|
||||
case -ENOENT:
|
||||
break;
|
||||
|
||||
/* Clock not available, let's try again soon */
|
||||
case -EPROBE_DEFER:
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
default:
|
||||
dev_err(dev, "Error getting clock: %ld\n",
|
||||
PTR_ERR(clk));
|
||||
return PTR_ERR(clk);
|
||||
}
|
||||
clk = devm_clk_get_optional(dev, NULL);
|
||||
if (IS_ERR(clk)) {
|
||||
if (PTR_ERR(clk) != -EPROBE_DEFER)
|
||||
dev_err(dev, "Error getting clock: %pe\n", clk);
|
||||
return PTR_ERR(clk);
|
||||
}
|
||||
new_drvdata->clk = clk;
|
||||
|
||||
new_drvdata->coherent = of_dma_is_coherent(np);
|
||||
@ -344,13 +331,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
|
||||
init_completion(&new_drvdata->hw_queue_avail);
|
||||
|
||||
if (!plat_dev->dev.dma_mask)
|
||||
plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
|
||||
if (!dev->dma_mask)
|
||||
dev->dma_mask = &dev->coherent_dma_mask;
|
||||
|
||||
dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
|
||||
while (dma_mask > 0x7fffffffUL) {
|
||||
if (dma_supported(&plat_dev->dev, dma_mask)) {
|
||||
rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
|
||||
if (dma_supported(dev, dma_mask)) {
|
||||
rc = dma_set_coherent_mask(dev, dma_mask);
|
||||
if (!rc)
|
||||
break;
|
||||
}
|
||||
@ -362,7 +349,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = cc_clk_on(new_drvdata);
|
||||
rc = clk_prepare_enable(new_drvdata->clk);
|
||||
if (rc) {
|
||||
dev_err(dev, "Failed to enable clock");
|
||||
return rc;
|
||||
@ -370,7 +357,17 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
|
||||
new_drvdata->sec_disabled = cc_sec_disable;
|
||||
|
||||
/* wait for Crytpcell reset completion */
|
||||
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
rc = pm_runtime_get_sync(dev);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "pm_runtime_get_sync() failed: %d\n", rc);
|
||||
goto post_pm_err;
|
||||
}
|
||||
|
||||
/* Wait for Cryptocell reset completion */
|
||||
if (!cc_wait_for_reset_completion(new_drvdata)) {
|
||||
dev_err(dev, "Cryptocell reset not completed");
|
||||
}
|
||||
@ -382,7 +379,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
|
||||
val, hw_rev->sig);
|
||||
rc = -EINVAL;
|
||||
goto post_clk_err;
|
||||
goto post_pm_err;
|
||||
}
|
||||
sig_cidr = val;
|
||||
hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
|
||||
@ -393,7 +390,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
|
||||
val, hw_rev->pidr_0124);
|
||||
rc = -EINVAL;
|
||||
goto post_clk_err;
|
||||
goto post_pm_err;
|
||||
}
|
||||
hw_rev_pidr = val;
|
||||
|
||||
@ -402,7 +399,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
|
||||
val, hw_rev->cidr_0123);
|
||||
rc = -EINVAL;
|
||||
goto post_clk_err;
|
||||
goto post_pm_err;
|
||||
}
|
||||
sig_cidr = val;
|
||||
|
||||
@ -421,7 +418,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
default:
|
||||
dev_err(dev, "Unsupported engines configuration.\n");
|
||||
rc = -EINVAL;
|
||||
goto post_clk_err;
|
||||
goto post_pm_err;
|
||||
}
|
||||
|
||||
/* Check security disable state */
|
||||
@ -447,14 +444,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "Could not register to interrupt %d\n", irq);
|
||||
goto post_clk_err;
|
||||
goto post_pm_err;
|
||||
}
|
||||
dev_dbg(dev, "Registered to IRQ: %d\n", irq);
|
||||
|
||||
rc = init_cc_regs(new_drvdata, true);
|
||||
if (rc) {
|
||||
dev_err(dev, "init_cc_regs failed\n");
|
||||
goto post_clk_err;
|
||||
goto post_pm_err;
|
||||
}
|
||||
|
||||
rc = cc_debugfs_init(new_drvdata);
|
||||
@ -477,15 +474,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
new_drvdata->mlli_sram_addr =
|
||||
cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
|
||||
if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
|
||||
dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
|
||||
rc = -ENOMEM;
|
||||
goto post_sram_mgr_err;
|
||||
goto post_fips_init_err;
|
||||
}
|
||||
|
||||
rc = cc_req_mgr_init(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_req_mgr_init failed\n");
|
||||
goto post_sram_mgr_err;
|
||||
goto post_fips_init_err;
|
||||
}
|
||||
|
||||
rc = cc_buffer_mgr_init(new_drvdata);
|
||||
@ -494,12 +490,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
goto post_req_mgr_err;
|
||||
}
|
||||
|
||||
rc = cc_pm_init(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_pm_init failed\n");
|
||||
goto post_buf_mgr_err;
|
||||
}
|
||||
|
||||
/* Allocate crypto algs */
|
||||
rc = cc_cipher_alloc(new_drvdata);
|
||||
if (rc) {
|
||||
@ -520,15 +510,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
goto post_hash_err;
|
||||
}
|
||||
|
||||
/* All set, we can allow autosuspend */
|
||||
cc_pm_go(new_drvdata);
|
||||
|
||||
/* If we got here and FIPS mode is enabled
|
||||
* it means all FIPS test passed, so let TEE
|
||||
* know we're good.
|
||||
*/
|
||||
cc_set_ree_fips_status(new_drvdata, true);
|
||||
|
||||
pm_runtime_put(dev);
|
||||
return 0;
|
||||
|
||||
post_hash_err:
|
||||
@ -539,16 +527,17 @@ post_buf_mgr_err:
|
||||
cc_buffer_mgr_fini(new_drvdata);
|
||||
post_req_mgr_err:
|
||||
cc_req_mgr_fini(new_drvdata);
|
||||
post_sram_mgr_err:
|
||||
cc_sram_mgr_fini(new_drvdata);
|
||||
post_fips_init_err:
|
||||
cc_fips_fini(new_drvdata);
|
||||
post_debugfs_err:
|
||||
cc_debugfs_fini(new_drvdata);
|
||||
post_regs_err:
|
||||
fini_cc_regs(new_drvdata);
|
||||
post_clk_err:
|
||||
cc_clk_off(new_drvdata);
|
||||
post_pm_err:
|
||||
pm_runtime_put_noidle(dev);
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_suspended(dev);
|
||||
clk_disable_unprepare(new_drvdata->clk);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -560,36 +549,22 @@ void fini_cc_regs(struct cc_drvdata *drvdata)
|
||||
|
||||
static void cleanup_cc_resources(struct platform_device *plat_dev)
|
||||
{
|
||||
struct device *dev = &plat_dev->dev;
|
||||
struct cc_drvdata *drvdata =
|
||||
(struct cc_drvdata *)platform_get_drvdata(plat_dev);
|
||||
|
||||
cc_aead_free(drvdata);
|
||||
cc_hash_free(drvdata);
|
||||
cc_cipher_free(drvdata);
|
||||
cc_pm_fini(drvdata);
|
||||
cc_buffer_mgr_fini(drvdata);
|
||||
cc_req_mgr_fini(drvdata);
|
||||
cc_sram_mgr_fini(drvdata);
|
||||
cc_fips_fini(drvdata);
|
||||
cc_debugfs_fini(drvdata);
|
||||
fini_cc_regs(drvdata);
|
||||
cc_clk_off(drvdata);
|
||||
}
|
||||
|
||||
int cc_clk_on(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct clk *clk = drvdata->clk;
|
||||
int rc;
|
||||
|
||||
if (IS_ERR(clk))
|
||||
/* Not all devices have a clock associated with CCREE */
|
||||
return 0;
|
||||
|
||||
rc = clk_prepare_enable(clk);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
pm_runtime_put_noidle(dev);
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_suspended(dev);
|
||||
clk_disable_unprepare(drvdata->clk);
|
||||
}
|
||||
|
||||
unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
|
||||
@ -600,17 +575,6 @@ unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
|
||||
return HASH_LEN_SIZE_630;
|
||||
}
|
||||
|
||||
void cc_clk_off(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct clk *clk = drvdata->clk;
|
||||
|
||||
if (IS_ERR(clk))
|
||||
/* Not all devices have a clock associated with CCREE */
|
||||
return;
|
||||
|
||||
clk_disable_unprepare(clk);
|
||||
}
|
||||
|
||||
static int ccree_probe(struct platform_device *plat_dev)
|
||||
{
|
||||
int rc;
|
||||
@ -653,7 +617,6 @@ static struct platform_driver ccree_driver = {
|
||||
|
||||
static int __init ccree_init(void)
|
||||
{
|
||||
cc_hash_global_init();
|
||||
cc_debugfs_global_init();
|
||||
|
||||
return platform_driver_register(&ccree_driver);
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
/* Registers definitions from shared/hw/ree_include */
|
||||
#include "cc_host_regs.h"
|
||||
#include "cc_crypto_ctx.h"
|
||||
#include "cc_hw_queue_defs.h"
|
||||
@ -71,9 +70,7 @@ enum cc_std_body {
|
||||
|
||||
#define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
|
||||
|
||||
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
|
||||
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
|
||||
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
|
||||
#define AXIM_MON_COMP_VALUE CC_GENMASK(CC_AXIM_MON_COMP_VALUE)
|
||||
|
||||
#define CC_CPP_AES_ABORT_MASK ( \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
|
||||
@ -139,15 +136,15 @@ struct cc_drvdata {
|
||||
int irq;
|
||||
struct completion hw_queue_avail; /* wait for HW queue availability */
|
||||
struct platform_device *plat_dev;
|
||||
cc_sram_addr_t mlli_sram_addr;
|
||||
void *buff_mgr_handle;
|
||||
void *cipher_handle;
|
||||
u32 mlli_sram_addr;
|
||||
struct dma_pool *mlli_buffs_pool;
|
||||
struct list_head alg_list;
|
||||
void *hash_handle;
|
||||
void *aead_handle;
|
||||
void *request_mgr_handle;
|
||||
void *fips_handle;
|
||||
void *sram_mgr_handle;
|
||||
void *debugfs;
|
||||
u32 sram_free_offset; /* offset to non-allocated area in SRAM */
|
||||
struct dentry *dir; /* for debugfs */
|
||||
struct clk *clk;
|
||||
bool coherent;
|
||||
char *hw_rev_name;
|
||||
@ -158,7 +155,6 @@ struct cc_drvdata {
|
||||
int std_bodies;
|
||||
bool sec_disabled;
|
||||
u32 comp_mask;
|
||||
bool pm_on;
|
||||
};
|
||||
|
||||
struct cc_crypto_alg {
|
||||
@ -212,8 +208,6 @@ static inline void dump_byte_array(const char *name, const u8 *the_array,
|
||||
bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
|
||||
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
|
||||
void fini_cc_regs(struct cc_drvdata *drvdata);
|
||||
int cc_clk_on(struct cc_drvdata *drvdata);
|
||||
void cc_clk_off(struct cc_drvdata *drvdata);
|
||||
unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
|
||||
|
||||
static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
|
||||
|
@ -20,8 +20,8 @@
|
||||
#define CC_SM3_HASH_LEN_SIZE 8
|
||||
|
||||
struct cc_hash_handle {
|
||||
cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
|
||||
cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
|
||||
u32 digest_len_sram_addr; /* const value in SRAM*/
|
||||
u32 larval_digest_sram_addr; /* const value in SRAM */
|
||||
struct list_head hash_list;
|
||||
};
|
||||
|
||||
@ -39,12 +39,19 @@ static const u32 cc_sha256_init[] = {
|
||||
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
|
||||
static const u32 cc_digest_len_sha512_init[] = {
|
||||
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
|
||||
static u64 cc_sha384_init[] = {
|
||||
SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
|
||||
SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
|
||||
static u64 cc_sha512_init[] = {
|
||||
SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
|
||||
SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
|
||||
|
||||
/*
|
||||
* Due to the way the HW works, every double word in the SHA384 and SHA512
|
||||
* larval hashes must be stored in hi/lo order
|
||||
*/
|
||||
#define hilo(x) upper_32_bits(x), lower_32_bits(x)
|
||||
static const u32 cc_sha384_init[] = {
|
||||
hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
|
||||
hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
|
||||
static const u32 cc_sha512_init[] = {
|
||||
hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
|
||||
hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
|
||||
|
||||
static const u32 cc_sm3_init[] = {
|
||||
SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
|
||||
SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
|
||||
@ -342,7 +349,6 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
|
||||
/* Get final MAC result */
|
||||
hw_desc_init(&desc[idx]);
|
||||
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
|
||||
/* TODO */
|
||||
set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
|
||||
NS_BIT, 1);
|
||||
set_queue_last_ind(ctx->drvdata, &desc[idx]);
|
||||
@ -422,8 +428,7 @@ static int cc_hash_digest(struct ahash_request *req)
|
||||
bool is_hmac = ctx->is_hmac;
|
||||
struct cc_crypto_req cc_req = {};
|
||||
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
|
||||
cc_sram_addr_t larval_digest_addr =
|
||||
cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
|
||||
u32 larval_digest_addr;
|
||||
int idx = 0;
|
||||
int rc = 0;
|
||||
gfp_t flags = cc_gfp_flags(&req->base);
|
||||
@ -465,6 +470,8 @@ static int cc_hash_digest(struct ahash_request *req)
|
||||
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
|
||||
ctx->inter_digestsize, NS_BIT);
|
||||
} else {
|
||||
larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
|
||||
ctx->hash_mode);
|
||||
set_din_sram(&desc[idx], larval_digest_addr,
|
||||
ctx->inter_digestsize);
|
||||
}
|
||||
@ -726,7 +733,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
|
||||
int digestsize = 0;
|
||||
int i, idx = 0, rc = 0;
|
||||
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
|
||||
cc_sram_addr_t larval_addr;
|
||||
u32 larval_addr;
|
||||
struct device *dev;
|
||||
|
||||
ctx = crypto_ahash_ctx(ahash);
|
||||
@ -752,7 +759,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->key_params.key_dma_addr =
|
||||
dma_map_single(dev, (void *)ctx->key_params.key, keylen,
|
||||
dma_map_single(dev, ctx->key_params.key, keylen,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
|
||||
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
|
||||
@ -1067,8 +1074,8 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
|
||||
ctx->key_params.keylen = 0;
|
||||
|
||||
ctx->digest_buff_dma_addr =
|
||||
dma_map_single(dev, (void *)ctx->digest_buff,
|
||||
sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
|
||||
dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
|
||||
dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
|
||||
sizeof(ctx->digest_buff), ctx->digest_buff);
|
||||
@ -1079,7 +1086,7 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
|
||||
&ctx->digest_buff_dma_addr);
|
||||
|
||||
ctx->opad_tmp_keys_dma_addr =
|
||||
dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
|
||||
dma_map_single(dev, ctx->opad_tmp_keys_buff,
|
||||
sizeof(ctx->opad_tmp_keys_buff),
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
|
||||
@ -1196,8 +1203,8 @@ static int cc_mac_update(struct ahash_request *req)
|
||||
idx++;
|
||||
|
||||
/* Setup request structure */
|
||||
cc_req.user_cb = (void *)cc_update_complete;
|
||||
cc_req.user_arg = (void *)req;
|
||||
cc_req.user_cb = cc_update_complete;
|
||||
cc_req.user_arg = req;
|
||||
|
||||
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
|
||||
if (rc != -EINPROGRESS && rc != -EBUSY) {
|
||||
@ -1254,8 +1261,8 @@ static int cc_mac_final(struct ahash_request *req)
|
||||
}
|
||||
|
||||
/* Setup request structure */
|
||||
cc_req.user_cb = (void *)cc_hash_complete;
|
||||
cc_req.user_arg = (void *)req;
|
||||
cc_req.user_cb = cc_hash_complete;
|
||||
cc_req.user_arg = req;
|
||||
|
||||
if (state->xcbc_count && rem_cnt == 0) {
|
||||
/* Load key for ECB decryption */
|
||||
@ -1311,7 +1318,6 @@ static int cc_mac_final(struct ahash_request *req)
|
||||
|
||||
/* Get final MAC result */
|
||||
hw_desc_init(&desc[idx]);
|
||||
/* TODO */
|
||||
set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
|
||||
digestsize, NS_BIT, 1);
|
||||
set_queue_last_ind(ctx->drvdata, &desc[idx]);
|
||||
@ -1369,8 +1375,8 @@ static int cc_mac_finup(struct ahash_request *req)
|
||||
}
|
||||
|
||||
/* Setup request structure */
|
||||
cc_req.user_cb = (void *)cc_hash_complete;
|
||||
cc_req.user_arg = (void *)req;
|
||||
cc_req.user_cb = cc_hash_complete;
|
||||
cc_req.user_arg = req;
|
||||
|
||||
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
|
||||
key_len = CC_AES_128_BIT_KEY_SIZE;
|
||||
@ -1393,7 +1399,6 @@ static int cc_mac_finup(struct ahash_request *req)
|
||||
|
||||
/* Get final MAC result */
|
||||
hw_desc_init(&desc[idx]);
|
||||
/* TODO */
|
||||
set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
|
||||
digestsize, NS_BIT, 1);
|
||||
set_queue_last_ind(ctx->drvdata, &desc[idx]);
|
||||
@ -1448,8 +1453,8 @@ static int cc_mac_digest(struct ahash_request *req)
|
||||
}
|
||||
|
||||
/* Setup request structure */
|
||||
cc_req.user_cb = (void *)cc_digest_complete;
|
||||
cc_req.user_arg = (void *)req;
|
||||
cc_req.user_cb = cc_digest_complete;
|
||||
cc_req.user_arg = req;
|
||||
|
||||
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
|
||||
key_len = CC_AES_128_BIT_KEY_SIZE;
|
||||
@ -1820,7 +1825,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
|
||||
struct crypto_alg *alg;
|
||||
struct ahash_alg *halg;
|
||||
|
||||
t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
|
||||
t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
|
||||
if (!t_crypto_alg)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -1857,104 +1862,85 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
|
||||
return t_crypto_alg;
|
||||
}
|
||||
|
||||
static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
|
||||
unsigned int size, u32 *sram_buff_ofs)
|
||||
{
|
||||
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
|
||||
unsigned int larval_seq_len = 0;
|
||||
int rc;
|
||||
|
||||
cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
|
||||
larval_seq, &larval_seq_len);
|
||||
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
*sram_buff_ofs += size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cc_init_hash_sram(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_hash_handle *hash_handle = drvdata->hash_handle;
|
||||
cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
|
||||
unsigned int larval_seq_len = 0;
|
||||
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
|
||||
u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
|
||||
bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
|
||||
bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
|
||||
int rc = 0;
|
||||
|
||||
/* Copy-to-sram digest-len */
|
||||
cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs,
|
||||
ARRAY_SIZE(cc_digest_len_init), larval_seq,
|
||||
&larval_seq_len);
|
||||
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
||||
rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
|
||||
sizeof(cc_digest_len_init), &sram_buff_ofs);
|
||||
if (rc)
|
||||
goto init_digest_const_err;
|
||||
|
||||
sram_buff_ofs += sizeof(cc_digest_len_init);
|
||||
larval_seq_len = 0;
|
||||
|
||||
if (large_sha_supported) {
|
||||
/* Copy-to-sram digest-len for sha384/512 */
|
||||
cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs,
|
||||
ARRAY_SIZE(cc_digest_len_sha512_init),
|
||||
larval_seq, &larval_seq_len);
|
||||
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
||||
rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
|
||||
sizeof(cc_digest_len_sha512_init),
|
||||
&sram_buff_ofs);
|
||||
if (rc)
|
||||
goto init_digest_const_err;
|
||||
|
||||
sram_buff_ofs += sizeof(cc_digest_len_sha512_init);
|
||||
larval_seq_len = 0;
|
||||
}
|
||||
|
||||
/* The initial digests offset */
|
||||
hash_handle->larval_digest_sram_addr = sram_buff_ofs;
|
||||
|
||||
/* Copy-to-sram initial SHA* digests */
|
||||
cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init),
|
||||
larval_seq, &larval_seq_len);
|
||||
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
||||
rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
|
||||
&sram_buff_ofs);
|
||||
if (rc)
|
||||
goto init_digest_const_err;
|
||||
sram_buff_ofs += sizeof(cc_md5_init);
|
||||
larval_seq_len = 0;
|
||||
|
||||
cc_set_sram_desc(cc_sha1_init, sram_buff_ofs,
|
||||
ARRAY_SIZE(cc_sha1_init), larval_seq,
|
||||
&larval_seq_len);
|
||||
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
||||
rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
|
||||
&sram_buff_ofs);
|
||||
if (rc)
|
||||
goto init_digest_const_err;
|
||||
sram_buff_ofs += sizeof(cc_sha1_init);
|
||||
larval_seq_len = 0;
|
||||
|
||||
cc_set_sram_desc(cc_sha224_init, sram_buff_ofs,
|
||||
ARRAY_SIZE(cc_sha224_init), larval_seq,
|
||||
&larval_seq_len);
|
||||
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
||||
rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
|
||||
&sram_buff_ofs);
|
||||
if (rc)
|
||||
goto init_digest_const_err;
|
||||
sram_buff_ofs += sizeof(cc_sha224_init);
|
||||
larval_seq_len = 0;
|
||||
|
||||
cc_set_sram_desc(cc_sha256_init, sram_buff_ofs,
|
||||
ARRAY_SIZE(cc_sha256_init), larval_seq,
|
||||
&larval_seq_len);
|
||||
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
||||
rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
|
||||
&sram_buff_ofs);
|
||||
if (rc)
|
||||
goto init_digest_const_err;
|
||||
sram_buff_ofs += sizeof(cc_sha256_init);
|
||||
larval_seq_len = 0;
|
||||
|
||||
if (sm3_supported) {
|
||||
cc_set_sram_desc(cc_sm3_init, sram_buff_ofs,
|
||||
ARRAY_SIZE(cc_sm3_init), larval_seq,
|
||||
&larval_seq_len);
|
||||
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
||||
rc = cc_init_copy_sram(drvdata, cc_sm3_init,
|
||||
sizeof(cc_sm3_init), &sram_buff_ofs);
|
||||
if (rc)
|
||||
goto init_digest_const_err;
|
||||
sram_buff_ofs += sizeof(cc_sm3_init);
|
||||
larval_seq_len = 0;
|
||||
}
|
||||
|
||||
if (large_sha_supported) {
|
||||
cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
|
||||
(ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
|
||||
&larval_seq_len);
|
||||
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
||||
rc = cc_init_copy_sram(drvdata, cc_sha384_init,
|
||||
sizeof(cc_sha384_init), &sram_buff_ofs);
|
||||
if (rc)
|
||||
goto init_digest_const_err;
|
||||
sram_buff_ofs += sizeof(cc_sha384_init);
|
||||
larval_seq_len = 0;
|
||||
|
||||
cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
|
||||
(ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
|
||||
&larval_seq_len);
|
||||
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
||||
rc = cc_init_copy_sram(drvdata, cc_sha512_init,
|
||||
sizeof(cc_sha512_init), &sram_buff_ofs);
|
||||
if (rc)
|
||||
goto init_digest_const_err;
|
||||
}
|
||||
@ -1963,38 +1949,16 @@ init_digest_const_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __init cc_swap_dwords(u32 *buf, unsigned long size)
|
||||
{
|
||||
int i;
|
||||
u32 tmp;
|
||||
|
||||
for (i = 0; i < size; i += 2) {
|
||||
tmp = buf[i];
|
||||
buf[i] = buf[i + 1];
|
||||
buf[i + 1] = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Due to the way the HW works we need to swap every
|
||||
* double word in the SHA384 and SHA512 larval hashes
|
||||
*/
|
||||
void __init cc_hash_global_init(void)
|
||||
{
|
||||
cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
|
||||
cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
|
||||
}
|
||||
|
||||
int cc_hash_alloc(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_hash_handle *hash_handle;
|
||||
cc_sram_addr_t sram_buff;
|
||||
u32 sram_buff;
|
||||
u32 sram_size_to_alloc;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
int rc = 0;
|
||||
int alg;
|
||||
|
||||
hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
|
||||
hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
|
||||
if (!hash_handle)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2016,7 +1980,6 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
|
||||
|
||||
sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
|
||||
if (sram_buff == NULL_SRAM_ADDR) {
|
||||
dev_err(dev, "SRAM pool exhausted\n");
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
@ -2056,12 +2019,10 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
|
||||
if (rc) {
|
||||
dev_err(dev, "%s alg registration failed\n",
|
||||
driver_hash[alg].driver_name);
|
||||
kfree(t_alg);
|
||||
goto fail;
|
||||
} else {
|
||||
list_add_tail(&t_alg->entry,
|
||||
&hash_handle->hash_list);
|
||||
}
|
||||
|
||||
list_add_tail(&t_alg->entry, &hash_handle->hash_list);
|
||||
}
|
||||
if (hw_mode == DRV_CIPHER_XCBC_MAC ||
|
||||
hw_mode == DRV_CIPHER_CMAC)
|
||||
@ -2081,18 +2042,16 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
|
||||
if (rc) {
|
||||
dev_err(dev, "%s alg registration failed\n",
|
||||
driver_hash[alg].driver_name);
|
||||
kfree(t_alg);
|
||||
goto fail;
|
||||
} else {
|
||||
list_add_tail(&t_alg->entry, &hash_handle->hash_list);
|
||||
}
|
||||
|
||||
list_add_tail(&t_alg->entry, &hash_handle->hash_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
kfree(drvdata->hash_handle);
|
||||
drvdata->hash_handle = NULL;
|
||||
cc_hash_free(drvdata);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -2101,17 +2060,12 @@ int cc_hash_free(struct cc_drvdata *drvdata)
|
||||
struct cc_hash_alg *t_hash_alg, *hash_n;
|
||||
struct cc_hash_handle *hash_handle = drvdata->hash_handle;
|
||||
|
||||
if (hash_handle) {
|
||||
list_for_each_entry_safe(t_hash_alg, hash_n,
|
||||
&hash_handle->hash_list, entry) {
|
||||
crypto_unregister_ahash(&t_hash_alg->ahash_alg);
|
||||
list_del(&t_hash_alg->entry);
|
||||
kfree(t_hash_alg);
|
||||
}
|
||||
|
||||
kfree(hash_handle);
|
||||
drvdata->hash_handle = NULL;
|
||||
list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
|
||||
entry) {
|
||||
crypto_unregister_ahash(&t_hash_alg->ahash_alg);
|
||||
list_del(&t_hash_alg->entry);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2272,22 +2226,23 @@ static const void *cc_larval_digest(struct device *dev, u32 mode)
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
* Gets the address of the initial digest in SRAM
|
||||
/**
|
||||
* cc_larval_digest_addr() - Get the address of the initial digest in SRAM
|
||||
* according to the given hash mode
|
||||
*
|
||||
* \param drvdata
|
||||
* \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
|
||||
* @drvdata: Associated device driver context
|
||||
* @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
|
||||
*
|
||||
* \return u32 The address of the initial digest in SRAM
|
||||
* Return:
|
||||
* The address of the initial digest in SRAM
|
||||
*/
|
||||
cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
|
||||
u32 cc_larval_digest_addr(void *drvdata, u32 mode)
|
||||
{
|
||||
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
|
||||
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
|
||||
struct device *dev = drvdata_to_dev(_drvdata);
|
||||
bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
|
||||
cc_sram_addr_t addr;
|
||||
u32 addr;
|
||||
|
||||
switch (mode) {
|
||||
case DRV_HASH_NULL:
|
||||
@ -2339,12 +2294,11 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
|
||||
return hash_handle->larval_digest_sram_addr;
|
||||
}
|
||||
|
||||
cc_sram_addr_t
|
||||
cc_digest_len_addr(void *drvdata, u32 mode)
|
||||
u32 cc_digest_len_addr(void *drvdata, u32 mode)
|
||||
{
|
||||
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
|
||||
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
|
||||
cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
|
||||
u32 digest_len_addr = hash_handle->digest_len_sram_addr;
|
||||
|
||||
switch (mode) {
|
||||
case DRV_HASH_SHA1:
|
||||
|
@ -80,30 +80,27 @@ int cc_hash_alloc(struct cc_drvdata *drvdata);
|
||||
int cc_init_hash_sram(struct cc_drvdata *drvdata);
|
||||
int cc_hash_free(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Gets the initial digest length
|
||||
/**
|
||||
* cc_digest_len_addr() - Gets the initial digest length
|
||||
*
|
||||
* \param drvdata
|
||||
* \param mode The Hash mode. Supported modes:
|
||||
* MD5/SHA1/SHA224/SHA256/SHA384/SHA512
|
||||
* @drvdata: Associated device driver context
|
||||
* @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
|
||||
*
|
||||
* \return u32 returns the address of the initial digest length in SRAM
|
||||
* Return:
|
||||
* Returns the address of the initial digest length in SRAM
|
||||
*/
|
||||
cc_sram_addr_t
|
||||
cc_digest_len_addr(void *drvdata, u32 mode);
|
||||
u32 cc_digest_len_addr(void *drvdata, u32 mode);
|
||||
|
||||
/*!
|
||||
* Gets the address of the initial digest in SRAM
|
||||
/**
|
||||
* cc_larval_digest_addr() - Gets the address of the initial digest in SRAM
|
||||
* according to the given hash mode
|
||||
*
|
||||
* \param drvdata
|
||||
* \param mode The Hash mode. Supported modes:
|
||||
* MD5/SHA1/SHA224/SHA256/SHA384/SHA512
|
||||
* @drvdata: Associated device driver context
|
||||
* @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
|
||||
*
|
||||
* \return u32 The address of the initial digest in SRAM
|
||||
* Return:
|
||||
* The address of the initial digest in SRAM
|
||||
*/
|
||||
cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
|
||||
|
||||
void cc_hash_global_init(void);
|
||||
u32 cc_larval_digest_addr(void *drvdata, u32 mode);
|
||||
|
||||
#endif /*__CC_HASH_H__*/
|
||||
|
@ -17,46 +17,43 @@
|
||||
/* Define max. available slots in HW queue */
|
||||
#define HW_QUEUE_SLOTS_MAX 15
|
||||
|
||||
#define CC_REG_LOW(word, name) \
|
||||
(CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
|
||||
#define CC_REG_LOW(name) (name ## _BIT_SHIFT)
|
||||
#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
|
||||
#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
|
||||
|
||||
#define CC_REG_HIGH(word, name) \
|
||||
(CC_REG_LOW(word, name) + \
|
||||
CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
|
||||
#define CC_HWQ_GENMASK(word, field) \
|
||||
CC_GENMASK(CC_DSCRPTR_QUEUE_WORD ## word ## _ ## field)
|
||||
|
||||
#define CC_GENMASK(word, name) \
|
||||
GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
|
||||
|
||||
#define WORD0_VALUE CC_GENMASK(0, VALUE)
|
||||
#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE)
|
||||
#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
|
||||
#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
|
||||
#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
|
||||
#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
|
||||
#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
|
||||
#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE)
|
||||
#define WORD2_VALUE CC_GENMASK(2, VALUE)
|
||||
#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
|
||||
#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
|
||||
#define WORD3_DOUT_SIZE CC_GENMASK(3, DOUT_SIZE)
|
||||
#define WORD3_HASH_XOR_BIT CC_GENMASK(3, HASH_XOR_BIT)
|
||||
#define WORD3_NS_BIT CC_GENMASK(3, NS_BIT)
|
||||
#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND)
|
||||
#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED)
|
||||
#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH)
|
||||
#define WORD4_AES_XOR_CRYPTO_KEY CC_GENMASK(4, AES_XOR_CRYPTO_KEY)
|
||||
#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP)
|
||||
#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0)
|
||||
#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1)
|
||||
#define WORD4_CIPHER_CONF2 CC_GENMASK(4, CIPHER_CONF2)
|
||||
#define WORD4_CIPHER_DO CC_GENMASK(4, CIPHER_DO)
|
||||
#define WORD4_CIPHER_MODE CC_GENMASK(4, CIPHER_MODE)
|
||||
#define WORD4_CMAC_SIZE0 CC_GENMASK(4, CMAC_SIZE0)
|
||||
#define WORD4_DATA_FLOW_MODE CC_GENMASK(4, DATA_FLOW_MODE)
|
||||
#define WORD4_KEY_SIZE CC_GENMASK(4, KEY_SIZE)
|
||||
#define WORD4_SETUP_OPERATION CC_GENMASK(4, SETUP_OPERATION)
|
||||
#define WORD5_DIN_ADDR_HIGH CC_GENMASK(5, DIN_ADDR_HIGH)
|
||||
#define WORD5_DOUT_ADDR_HIGH CC_GENMASK(5, DOUT_ADDR_HIGH)
|
||||
#define WORD0_VALUE CC_HWQ_GENMASK(0, VALUE)
|
||||
#define WORD0_CPP_CIPHER_MODE CC_HWQ_GENMASK(0, CPP_CIPHER_MODE)
|
||||
#define WORD1_DIN_CONST_VALUE CC_HWQ_GENMASK(1, DIN_CONST_VALUE)
|
||||
#define WORD1_DIN_DMA_MODE CC_HWQ_GENMASK(1, DIN_DMA_MODE)
|
||||
#define WORD1_DIN_SIZE CC_HWQ_GENMASK(1, DIN_SIZE)
|
||||
#define WORD1_NOT_LAST CC_HWQ_GENMASK(1, NOT_LAST)
|
||||
#define WORD1_NS_BIT CC_HWQ_GENMASK(1, NS_BIT)
|
||||
#define WORD1_LOCK_QUEUE CC_HWQ_GENMASK(1, LOCK_QUEUE)
|
||||
#define WORD2_VALUE CC_HWQ_GENMASK(2, VALUE)
|
||||
#define WORD3_DOUT_DMA_MODE CC_HWQ_GENMASK(3, DOUT_DMA_MODE)
|
||||
#define WORD3_DOUT_LAST_IND CC_HWQ_GENMASK(3, DOUT_LAST_IND)
|
||||
#define WORD3_DOUT_SIZE CC_HWQ_GENMASK(3, DOUT_SIZE)
|
||||
#define WORD3_HASH_XOR_BIT CC_HWQ_GENMASK(3, HASH_XOR_BIT)
|
||||
#define WORD3_NS_BIT CC_HWQ_GENMASK(3, NS_BIT)
|
||||
#define WORD3_QUEUE_LAST_IND CC_HWQ_GENMASK(3, QUEUE_LAST_IND)
|
||||
#define WORD4_ACK_NEEDED CC_HWQ_GENMASK(4, ACK_NEEDED)
|
||||
#define WORD4_AES_SEL_N_HASH CC_HWQ_GENMASK(4, AES_SEL_N_HASH)
|
||||
#define WORD4_AES_XOR_CRYPTO_KEY CC_HWQ_GENMASK(4, AES_XOR_CRYPTO_KEY)
|
||||
#define WORD4_BYTES_SWAP CC_HWQ_GENMASK(4, BYTES_SWAP)
|
||||
#define WORD4_CIPHER_CONF0 CC_HWQ_GENMASK(4, CIPHER_CONF0)
|
||||
#define WORD4_CIPHER_CONF1 CC_HWQ_GENMASK(4, CIPHER_CONF1)
|
||||
#define WORD4_CIPHER_CONF2 CC_HWQ_GENMASK(4, CIPHER_CONF2)
|
||||
#define WORD4_CIPHER_DO CC_HWQ_GENMASK(4, CIPHER_DO)
|
||||
#define WORD4_CIPHER_MODE CC_HWQ_GENMASK(4, CIPHER_MODE)
|
||||
#define WORD4_CMAC_SIZE0 CC_HWQ_GENMASK(4, CMAC_SIZE0)
|
||||
#define WORD4_DATA_FLOW_MODE CC_HWQ_GENMASK(4, DATA_FLOW_MODE)
|
||||
#define WORD4_KEY_SIZE CC_HWQ_GENMASK(4, KEY_SIZE)
|
||||
#define WORD4_SETUP_OPERATION CC_HWQ_GENMASK(4, SETUP_OPERATION)
|
||||
#define WORD5_DIN_ADDR_HIGH CC_HWQ_GENMASK(5, DIN_ADDR_HIGH)
|
||||
#define WORD5_DOUT_ADDR_HIGH CC_HWQ_GENMASK(5, DOUT_ADDR_HIGH)
|
||||
|
||||
/******************************************************************************
|
||||
* TYPE DEFINITIONS
|
||||
@ -207,31 +204,32 @@ enum cc_hash_cipher_pad {
|
||||
/* Descriptor packing macros */
|
||||
/*****************************/
|
||||
|
||||
/*
|
||||
* Init a HW descriptor struct
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
/**
|
||||
* hw_desc_init() - Init a HW descriptor struct
|
||||
* @pdesc: pointer to HW descriptor struct
|
||||
*/
|
||||
static inline void hw_desc_init(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
memset(pdesc, 0, sizeof(struct cc_hw_desc));
|
||||
}
|
||||
|
||||
/*
|
||||
* Indicates the end of current HW descriptors flow and release the HW engines.
|
||||
/**
|
||||
* set_queue_last_ind_bit() - Indicate the end of current HW descriptors flow
|
||||
* and release the HW engines.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
*/
|
||||
static inline void set_queue_last_ind_bit(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN field of a HW descriptors
|
||||
/**
|
||||
* set_din_type() - Set the DIN field of a HW descriptor
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @dma_mode: dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
|
||||
* @addr: dinAdr DIN address
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
|
||||
* @addr: DIN address
|
||||
* @size: Data size in bytes
|
||||
* @axi_sec: AXI secure bit
|
||||
*/
|
||||
@ -239,20 +237,20 @@ static inline void set_din_type(struct cc_hw_desc *pdesc,
|
||||
enum cc_dma_mode dma_mode, dma_addr_t addr,
|
||||
u32 size, enum cc_axi_sec axi_sec)
|
||||
{
|
||||
pdesc->word[0] = (u32)addr;
|
||||
pdesc->word[0] = lower_32_bits(addr);
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, ((u16)(addr >> 32)));
|
||||
pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, upper_32_bits(addr));
|
||||
#endif
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) |
|
||||
FIELD_PREP(WORD1_DIN_SIZE, size) |
|
||||
FIELD_PREP(WORD1_NS_BIT, axi_sec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN field of a HW descriptors to NO DMA mode.
|
||||
/**
|
||||
* set_din_no_dma() - Set the DIN field of a HW descriptor to NO DMA mode.
|
||||
* Used for NOP descriptor, register patches and other special modes.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @addr: DIN address
|
||||
* @size: Data size in bytes
|
||||
*/
|
||||
@ -262,14 +260,11 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup the special CPP descriptor
|
||||
/**
|
||||
* set_cpp_crypto_key() - Setup the special CPP descriptor
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @alg: cipher used (AES / SM4)
|
||||
* @mode: mode used (CTR or CBC)
|
||||
* @slot: slot number
|
||||
* @ksize: key size
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @slot: Slot number
|
||||
*/
|
||||
static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
|
||||
{
|
||||
@ -281,27 +276,26 @@ static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN field of a HW descriptors to SRAM mode.
|
||||
/**
|
||||
* set_din_sram() - Set the DIN field of a HW descriptor to SRAM mode.
|
||||
* Note: No need to check SRAM alignment since host requests do not use SRAM and
|
||||
* adaptor will enforce alignment check.
|
||||
* the adaptor will enforce alignment checks.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @addr: DIN address
|
||||
* @size Data size in bytes
|
||||
* @size: Data size in bytes
|
||||
*/
|
||||
static inline void set_din_sram(struct cc_hw_desc *pdesc, dma_addr_t addr,
|
||||
u32 size)
|
||||
static inline void set_din_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
|
||||
{
|
||||
pdesc->word[0] = (u32)addr;
|
||||
pdesc->word[0] = addr;
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) |
|
||||
FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN field of a HW descriptors to CONST mode
|
||||
/**
|
||||
* set_din_const() - Set the DIN field of a HW descriptor to CONST mode
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @val: DIN const value
|
||||
* @size: Data size in bytes
|
||||
*/
|
||||
@ -313,20 +307,20 @@ static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size)
|
||||
FIELD_PREP(WORD1_DIN_SIZE, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN not last input data indicator
|
||||
/**
|
||||
* set_din_not_last_indication() - Set the DIN not last input data indicator
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
*/
|
||||
static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DOUT field of a HW descriptors
|
||||
/**
|
||||
* set_dout_type() - Set the DOUT field of a HW descriptor
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
|
||||
* @addr: DOUT address
|
||||
* @size: Data size in bytes
|
||||
@ -336,24 +330,24 @@ static inline void set_dout_type(struct cc_hw_desc *pdesc,
|
||||
enum cc_dma_mode dma_mode, dma_addr_t addr,
|
||||
u32 size, enum cc_axi_sec axi_sec)
|
||||
{
|
||||
pdesc->word[2] = (u32)addr;
|
||||
pdesc->word[2] = lower_32_bits(addr);
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, ((u16)(addr >> 32)));
|
||||
pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, upper_32_bits(addr));
|
||||
#endif
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) |
|
||||
FIELD_PREP(WORD3_DOUT_SIZE, size) |
|
||||
FIELD_PREP(WORD3_NS_BIT, axi_sec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DOUT field of a HW descriptors to DLLI type
|
||||
/**
|
||||
* set_dout_dlli() - Set the DOUT field of a HW descriptor to DLLI type
|
||||
* The LAST INDICATION is provided by the user
|
||||
*
|
||||
* @pdesc pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @addr: DOUT address
|
||||
* @size: Data size in bytes
|
||||
* @last_ind: The last indication bit
|
||||
* @axi_sec: AXI secure bit
|
||||
* @last_ind: The last indication bit
|
||||
*/
|
||||
static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
|
||||
u32 size, enum cc_axi_sec axi_sec,
|
||||
@ -363,29 +357,28 @@ static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DOUT field of a HW descriptors to DLLI type
|
||||
/**
|
||||
* set_dout_mlli() - Set the DOUT field of a HW descriptor to MLLI type
|
||||
* The LAST INDICATION is provided by the user
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @addr: DOUT address
|
||||
* @size: Data size in bytes
|
||||
* @last_ind: The last indication bit
|
||||
* @axi_sec: AXI secure bit
|
||||
* @last_ind: The last indication bit
|
||||
*/
|
||||
static inline void set_dout_mlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
|
||||
u32 size, enum cc_axi_sec axi_sec,
|
||||
bool last_ind)
|
||||
static inline void set_dout_mlli(struct cc_hw_desc *pdesc, u32 addr, u32 size,
|
||||
enum cc_axi_sec axi_sec, bool last_ind)
|
||||
{
|
||||
set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec);
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DOUT field of a HW descriptors to NO DMA mode.
|
||||
/**
|
||||
* set_dout_no_dma() - Set the DOUT field of a HW descriptor to NO DMA mode.
|
||||
* Used for NOP descriptor, register patches and other special modes.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: pointer to HW descriptor struct
|
||||
* @addr: DOUT address
|
||||
* @size: Data size in bytes
|
||||
* @write_enable: Enables a write operation to a register
|
||||
@ -398,54 +391,55 @@ static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr,
|
||||
FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the word for the XOR operation.
|
||||
/**
|
||||
* set_xor_val() - Set the word for the XOR operation.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @val: xor data value
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @val: XOR data value
|
||||
*/
|
||||
static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val)
|
||||
{
|
||||
pdesc->word[2] = val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the XOR indicator bit in the descriptor
|
||||
/**
|
||||
* set_xor_active() - Set the XOR indicator bit in the descriptor
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
*/
|
||||
static inline void set_xor_active(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Select the AES engine instead of HASH engine when setting up combined mode
|
||||
* with AES XCBC MAC
|
||||
/**
|
||||
* set_aes_not_hash_mode() - Select the AES engine instead of HASH engine when
|
||||
* setting up combined mode with AES XCBC MAC
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
*/
|
||||
static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set aes xor crypto key, this in some secenrios select SM3 engine
|
||||
/**
|
||||
* set_aes_xor_crypto_key() - Set aes xor crypto key, which in some scenarios
|
||||
* selects the SM3 engine
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
*/
|
||||
static inline void set_aes_xor_crypto_key(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_AES_XOR_CRYPTO_KEY, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DOUT field of a HW descriptors to SRAM mode
|
||||
/**
|
||||
* set_dout_sram() - Set the DOUT field of a HW descriptor to SRAM mode
|
||||
* Note: No need to check SRAM alignment since host requests do not use SRAM and
|
||||
* adaptor will enforce alignment check.
|
||||
* the adaptor will enforce alignment checks.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @addr: DOUT address
|
||||
* @size: Data size in bytes
|
||||
*/
|
||||
@ -456,32 +450,34 @@ static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
|
||||
FIELD_PREP(WORD3_DOUT_SIZE, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the data unit size for XEX mode in data_out_addr[15:0]
|
||||
/**
|
||||
* set_xex_data_unit_size() - Set the data unit size for XEX mode in
|
||||
* data_out_addr[15:0]
|
||||
*
|
||||
* @pdesc: pDesc pointer HW descriptor struct
|
||||
* @size: data unit size for XEX mode
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @size: Data unit size for XEX mode
|
||||
*/
|
||||
static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size)
|
||||
{
|
||||
pdesc->word[2] = size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the number of rounds for Multi2 in data_out_addr[15:0]
|
||||
/**
|
||||
* set_multi2_num_rounds() - Set the number of rounds for Multi2 in
|
||||
* data_out_addr[15:0]
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @num: number of rounds for Multi2
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @num: Number of rounds for Multi2
|
||||
*/
|
||||
static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num)
|
||||
{
|
||||
pdesc->word[2] = num;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the flow mode.
|
||||
/**
|
||||
* set_flow_mode() - Set the flow mode.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @mode: Any one of the modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_flow_mode(struct cc_hw_desc *pdesc,
|
||||
@ -490,22 +486,22 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the cipher mode.
|
||||
/**
|
||||
* set_cipher_mode() - Set the cipher mode.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @mode: Any one of the modes defined in [CC7x-DESC]
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @mode: Any one of the modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the cipher mode for hash algorithms.
|
||||
/**
|
||||
* set_hash_cipher_mode() - Set the cipher mode for hash algorithms.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @cipher_mode: Any one of the modes defined in [CC7x-DESC]
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @cipher_mode: Any one of the modes defined in [CC7x-DESC]
|
||||
* @hash_mode: specifies which hash is being handled
|
||||
*/
|
||||
static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc,
|
||||
@ -517,10 +513,10 @@ static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc,
|
||||
set_aes_xor_crypto_key(pdesc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the cipher configuration fields.
|
||||
/**
|
||||
* set_cipher_config0() - Set the cipher configuration fields.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @mode: Any one of the modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
|
||||
@ -528,11 +524,11 @@ static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the cipher configuration fields.
|
||||
/**
|
||||
* set_cipher_config1() - Set the cipher configuration fields.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @config: Any one of the modes defined in [CC7x-DESC]
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @config: Padding mode
|
||||
*/
|
||||
static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
|
||||
enum cc_hash_conf_pad config)
|
||||
@ -540,10 +536,10 @@ static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set HW key configuration fields.
|
||||
/**
|
||||
* set_hw_crypto_key() - Set HW key configuration fields.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key
|
||||
*/
|
||||
static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
|
||||
@ -555,64 +551,64 @@ static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
|
||||
(hw_key >> HW_KEY_SHIFT_CIPHER_CFG2));
|
||||
}
|
||||
|
||||
/*
|
||||
* Set byte order of all setup-finalize descriptors.
|
||||
/**
|
||||
* set_bytes_swap() - Set byte order of all setup-finalize descriptors.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @config: Any one of the modes defined in [CC7x-DESC]
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @config: True to enable byte swapping
|
||||
*/
|
||||
static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set CMAC_SIZE0 mode.
|
||||
/**
|
||||
* set_cmac_size0_mode() - Set CMAC_SIZE0 mode.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
*/
|
||||
static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set key size descriptor field.
|
||||
/**
|
||||
* set_key_size() - Set key size descriptor field.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @size: key size in bytes (NOT size code)
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @size: Key size in bytes (NOT size code)
|
||||
*/
|
||||
static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set AES key size.
|
||||
/**
|
||||
* set_key_size_aes() - Set AES key size.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @size: key size in bytes (NOT size code)
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @size: Key size in bytes (NOT size code)
|
||||
*/
|
||||
static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size)
|
||||
{
|
||||
set_key_size(pdesc, ((size >> 3) - 2));
|
||||
}
|
||||
|
||||
/*
|
||||
* Set DES key size.
|
||||
/**
|
||||
* set_key_size_des() - Set DES key size.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @size: key size in bytes (NOT size code)
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @size: Key size in bytes (NOT size code)
|
||||
*/
|
||||
static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size)
|
||||
{
|
||||
set_key_size(pdesc, ((size >> 3) - 1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the descriptor setup mode
|
||||
/**
|
||||
* set_setup_mode() - Set the descriptor setup mode
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @mode: Any one of the setup modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_setup_mode(struct cc_hw_desc *pdesc,
|
||||
@ -621,10 +617,10 @@ static inline void set_setup_mode(struct cc_hw_desc *pdesc,
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the descriptor cipher DO
|
||||
/**
|
||||
* set_cipher_do() - Set the descriptor cipher DO
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @pdesc: Pointer to HW descriptor struct
|
||||
* @config: Any one of the cipher do defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_cipher_do(struct cc_hw_desc *pdesc,
|
||||
|
@ -15,29 +15,25 @@
|
||||
#define POWER_DOWN_ENABLE 0x01
|
||||
#define POWER_DOWN_DISABLE 0x00
|
||||
|
||||
const struct dev_pm_ops ccree_pm = {
|
||||
SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
|
||||
};
|
||||
|
||||
int cc_pm_suspend(struct device *dev)
|
||||
static int cc_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
|
||||
fini_cc_regs(drvdata);
|
||||
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
|
||||
cc_clk_off(drvdata);
|
||||
clk_disable_unprepare(drvdata->clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cc_pm_resume(struct device *dev)
|
||||
static int cc_pm_resume(struct device *dev)
|
||||
{
|
||||
int rc;
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
|
||||
/* Enables the device source clk */
|
||||
rc = cc_clk_on(drvdata);
|
||||
rc = clk_prepare_enable(drvdata->clk);
|
||||
if (rc) {
|
||||
dev_err(dev, "failed getting clock back on. We're toast.\n");
|
||||
return rc;
|
||||
@ -62,53 +58,19 @@ int cc_pm_resume(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct dev_pm_ops ccree_pm = {
|
||||
SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
|
||||
};
|
||||
|
||||
int cc_pm_get(struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
if (drvdata->pm_on)
|
||||
rc = pm_runtime_get_sync(dev);
|
||||
int rc = pm_runtime_get_sync(dev);
|
||||
|
||||
return (rc == 1 ? 0 : rc);
|
||||
}
|
||||
|
||||
void cc_pm_put_suspend(struct device *dev)
|
||||
{
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
if (drvdata->pm_on) {
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
}
|
||||
}
|
||||
|
||||
bool cc_pm_is_dev_suspended(struct device *dev)
|
||||
{
|
||||
/* check device state using runtime api */
|
||||
return pm_runtime_suspended(dev);
|
||||
}
|
||||
|
||||
int cc_pm_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
/* must be before the enabling to avoid redundant suspending */
|
||||
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
/* set us as active - note we won't do PM ops until cc_pm_go()! */
|
||||
return pm_runtime_set_active(dev);
|
||||
}
|
||||
|
||||
/* enable the PM module*/
|
||||
void cc_pm_go(struct cc_drvdata *drvdata)
|
||||
{
|
||||
pm_runtime_enable(drvdata_to_dev(drvdata));
|
||||
drvdata->pm_on = true;
|
||||
}
|
||||
|
||||
void cc_pm_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
pm_runtime_disable(drvdata_to_dev(drvdata));
|
||||
drvdata->pm_on = false;
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
}
|
||||
|
@ -15,26 +15,11 @@
|
||||
|
||||
extern const struct dev_pm_ops ccree_pm;
|
||||
|
||||
int cc_pm_init(struct cc_drvdata *drvdata);
|
||||
void cc_pm_go(struct cc_drvdata *drvdata);
|
||||
void cc_pm_fini(struct cc_drvdata *drvdata);
|
||||
int cc_pm_suspend(struct device *dev);
|
||||
int cc_pm_resume(struct device *dev);
|
||||
int cc_pm_get(struct device *dev);
|
||||
void cc_pm_put_suspend(struct device *dev);
|
||||
bool cc_pm_is_dev_suspended(struct device *dev);
|
||||
|
||||
#else
|
||||
|
||||
static inline int cc_pm_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
|
||||
|
||||
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
|
||||
|
||||
static inline int cc_pm_get(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
@ -42,12 +27,6 @@ static inline int cc_pm_get(struct device *dev)
|
||||
|
||||
static inline void cc_pm_put_suspend(struct device *dev) {}
|
||||
|
||||
static inline bool cc_pm_is_dev_suspended(struct device *dev)
|
||||
{
|
||||
/* if PM not supported device is never suspend */
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /*__POWER_MGR_H__*/
|
||||
|
@ -206,12 +206,13 @@ static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
* Completion will take place if and only if user requested completion
|
||||
* by cc_send_sync_request().
|
||||
/**
|
||||
* request_mgr_complete() - Completion will take place if and only if user
|
||||
* requested completion by cc_send_sync_request().
|
||||
*
|
||||
* \param dev
|
||||
* \param dx_compl_h The completion event to signal
|
||||
* @dev: Device pointer
|
||||
* @dx_compl_h: The completion event to signal
|
||||
* @dummy: unused error code
|
||||
*/
|
||||
static void request_mgr_complete(struct device *dev, void *dx_compl_h,
|
||||
int dummy)
|
||||
@ -264,15 +265,15 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Enqueue caller request to crypto hardware.
|
||||
/**
|
||||
* cc_do_send_request() - Enqueue caller request to crypto hardware.
|
||||
* Need to be called with HW lock held and PM running
|
||||
*
|
||||
* \param drvdata
|
||||
* \param cc_req The request to enqueue
|
||||
* \param desc The crypto sequence
|
||||
* \param len The crypto sequence length
|
||||
* \param add_comp If "true": add an artificial dout DMA to mark completion
|
||||
* @drvdata: Associated device driver context
|
||||
* @cc_req: The request to enqueue
|
||||
* @desc: The crypto sequence
|
||||
* @len: The crypto sequence length
|
||||
* @add_comp: If "true": add an artificial dout DMA to mark completion
|
||||
*
|
||||
*/
|
||||
static void cc_do_send_request(struct cc_drvdata *drvdata,
|
||||
@ -295,7 +296,6 @@ static void cc_do_send_request(struct cc_drvdata *drvdata,
|
||||
req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
|
||||
req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
|
||||
(MAX_REQUEST_QUEUE_SIZE - 1);
|
||||
/* TODO: Use circ_buf.h ? */
|
||||
|
||||
dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
|
||||
|
||||
@ -377,7 +377,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
|
||||
rc = cc_queues_status(drvdata, mgr, bli->len);
|
||||
if (rc) {
|
||||
/*
|
||||
* There is still not room in the FIFO for
|
||||
* There is still no room in the FIFO for
|
||||
* this request. Bail out. We'll return here
|
||||
* on the next completion irq.
|
||||
*/
|
||||
@ -476,10 +476,6 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
|
||||
break;
|
||||
|
||||
spin_unlock_bh(&mgr->hw_lock);
|
||||
if (rc != -EAGAIN) {
|
||||
cc_pm_put_suspend(dev);
|
||||
return rc;
|
||||
}
|
||||
wait_for_completion_interruptible(&drvdata->hw_queue_avail);
|
||||
reinit_completion(&drvdata->hw_queue_avail);
|
||||
}
|
||||
@ -490,16 +486,18 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Enqueue caller request to crypto hardware during init process.
|
||||
* assume this function is not called in middle of a flow,
|
||||
/**
|
||||
* send_request_init() - Enqueue caller request to crypto hardware during init
|
||||
* process.
|
||||
* Assume this function is not called in the middle of a flow,
|
||||
* since we set QUEUE_LAST_IND flag in the last descriptor.
|
||||
*
|
||||
* \param drvdata
|
||||
* \param desc The crypto sequence
|
||||
* \param len The crypto sequence length
|
||||
* @drvdata: Associated device driver context
|
||||
* @desc: The crypto sequence
|
||||
* @len: The crypto sequence length
|
||||
*
|
||||
* \return int Returns "0" upon success
|
||||
* Return:
|
||||
* Returns "0" upon success
|
||||
*/
|
||||
int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
|
||||
unsigned int len)
|
||||
|
@ -12,18 +12,17 @@
|
||||
|
||||
int cc_req_mgr_init(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Enqueue caller request to crypto hardware.
|
||||
/**
|
||||
* cc_send_request() - Enqueue caller request to crypto hardware.
|
||||
*
|
||||
* \param drvdata
|
||||
* \param cc_req The request to enqueue
|
||||
* \param desc The crypto sequence
|
||||
* \param len The crypto sequence length
|
||||
* \param is_dout If "true": completion is handled by the caller
|
||||
* If "false": this function adds a dummy descriptor completion
|
||||
* and waits upon completion signal.
|
||||
* @drvdata: Associated device driver context
|
||||
* @cc_req: The request to enqueue
|
||||
* @desc: The crypto sequence
|
||||
* @len: The crypto sequence length
|
||||
* @req: Asynchronous crypto request
|
||||
*
|
||||
* \return int Returns -EINPROGRESS or error
|
||||
* Return:
|
||||
* Returns -EINPROGRESS or error
|
||||
*/
|
||||
int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
|
||||
struct cc_hw_desc *desc, unsigned int len,
|
||||
|
@ -4,89 +4,62 @@
|
||||
#include "cc_driver.h"
|
||||
#include "cc_sram_mgr.h"
|
||||
|
||||
/**
|
||||
* struct cc_sram_ctx -Internal RAM context manager
|
||||
* @sram_free_offset: the offset to the non-allocated area
|
||||
*/
|
||||
struct cc_sram_ctx {
|
||||
cc_sram_addr_t sram_free_offset;
|
||||
};
|
||||
|
||||
/**
|
||||
* cc_sram_mgr_fini() - Cleanup SRAM pool.
|
||||
*
|
||||
* @drvdata: Associated device driver context
|
||||
*/
|
||||
void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
/* Nothing needed */
|
||||
}
|
||||
|
||||
/**
|
||||
* cc_sram_mgr_init() - Initializes SRAM pool.
|
||||
* The pool starts right at the beginning of SRAM.
|
||||
* Returns zero for success, negative value otherwise.
|
||||
*
|
||||
* @drvdata: Associated device driver context
|
||||
*
|
||||
* Return:
|
||||
* 0 for success, negative error code for failure.
|
||||
*/
|
||||
int cc_sram_mgr_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_sram_ctx *ctx;
|
||||
dma_addr_t start = 0;
|
||||
u32 start = 0;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
if (drvdata->hw_rev < CC_HW_REV_712) {
|
||||
/* Pool starts after ROM bytes */
|
||||
start = (dma_addr_t)cc_ioread(drvdata,
|
||||
CC_REG(HOST_SEP_SRAM_THRESHOLD));
|
||||
|
||||
start = cc_ioread(drvdata, CC_REG(HOST_SEP_SRAM_THRESHOLD));
|
||||
if ((start & 0x3) != 0) {
|
||||
dev_err(dev, "Invalid SRAM offset %pad\n", &start);
|
||||
dev_err(dev, "Invalid SRAM offset 0x%x\n", start);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate "this" context */
|
||||
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
||||
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->sram_free_offset = start;
|
||||
drvdata->sram_mgr_handle = ctx;
|
||||
|
||||
drvdata->sram_free_offset = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Allocated buffer from SRAM pool.
|
||||
* Note: Caller is responsible to free the LAST allocated buffer.
|
||||
* This function does not taking care of any fragmentation may occur
|
||||
* by the order of calls to alloc/free.
|
||||
/**
|
||||
* cc_sram_alloc() - Allocate buffer from SRAM pool.
|
||||
*
|
||||
* \param drvdata
|
||||
* \param size The requested bytes to allocate
|
||||
* @drvdata: Associated device driver context
|
||||
* @size: The requested numer of bytes to allocate
|
||||
*
|
||||
* Return:
|
||||
* Address offset in SRAM or NULL_SRAM_ADDR for failure.
|
||||
*/
|
||||
cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
|
||||
u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
|
||||
{
|
||||
struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
cc_sram_addr_t p;
|
||||
u32 p;
|
||||
|
||||
if ((size & 0x3)) {
|
||||
dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
|
||||
size);
|
||||
return NULL_SRAM_ADDR;
|
||||
}
|
||||
if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
|
||||
dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
|
||||
size, smgr_ctx->sram_free_offset);
|
||||
if (size > (CC_CC_SRAM_SIZE - drvdata->sram_free_offset)) {
|
||||
dev_err(dev, "Not enough space to allocate %u B (at offset %u)\n",
|
||||
size, drvdata->sram_free_offset);
|
||||
return NULL_SRAM_ADDR;
|
||||
}
|
||||
|
||||
p = smgr_ctx->sram_free_offset;
|
||||
smgr_ctx->sram_free_offset += size;
|
||||
dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
|
||||
p = drvdata->sram_free_offset;
|
||||
drvdata->sram_free_offset += size;
|
||||
dev_dbg(dev, "Allocated %u B @ %u\n", size, p);
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -97,13 +70,12 @@ cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
|
||||
*
|
||||
* @src: A pointer to array of words to set as consts.
|
||||
* @dst: The target SRAM buffer to set into
|
||||
* @nelements: The number of words in "src" array
|
||||
* @nelement: The number of words in "src" array
|
||||
* @seq: A pointer to the given IN/OUT descriptor sequence
|
||||
* @seq_len: A pointer to the given IN/OUT sequence length
|
||||
*/
|
||||
void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
|
||||
unsigned int nelement, struct cc_hw_desc *seq,
|
||||
unsigned int *seq_len)
|
||||
void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement,
|
||||
struct cc_hw_desc *seq, unsigned int *seq_len)
|
||||
{
|
||||
u32 i;
|
||||
unsigned int idx = *seq_len;
|
||||
|
@ -10,42 +10,30 @@
|
||||
|
||||
struct cc_drvdata;
|
||||
|
||||
#define NULL_SRAM_ADDR ((u32)-1)
|
||||
|
||||
/**
|
||||
* Address (offset) within CC internal SRAM
|
||||
*/
|
||||
|
||||
typedef u64 cc_sram_addr_t;
|
||||
|
||||
#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
|
||||
|
||||
/*!
|
||||
* Initializes SRAM pool.
|
||||
* cc_sram_mgr_init() - Initializes SRAM pool.
|
||||
* The first X bytes of SRAM are reserved for ROM usage, hence, pool
|
||||
* starts right after X bytes.
|
||||
*
|
||||
* \param drvdata
|
||||
* @drvdata: Associated device driver context
|
||||
*
|
||||
* \return int Zero for success, negative value otherwise.
|
||||
* Return:
|
||||
* Zero for success, negative value otherwise.
|
||||
*/
|
||||
int cc_sram_mgr_init(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Uninits SRAM pool.
|
||||
/**
|
||||
* cc_sram_alloc() - Allocate buffer from SRAM pool.
|
||||
*
|
||||
* \param drvdata
|
||||
*/
|
||||
void cc_sram_mgr_fini(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Allocated buffer from SRAM pool.
|
||||
* Note: Caller is responsible to free the LAST allocated buffer.
|
||||
* This function does not taking care of any fragmentation may occur
|
||||
* by the order of calls to alloc/free.
|
||||
* @drvdata: Associated device driver context
|
||||
* @size: The requested bytes to allocate
|
||||
*
|
||||
* \param drvdata
|
||||
* \param size The requested bytes to allocate
|
||||
* Return:
|
||||
* Address offset in SRAM or NULL_SRAM_ADDR for failure.
|
||||
*/
|
||||
cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
|
||||
u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
|
||||
|
||||
/**
|
||||
* cc_set_sram_desc() - Create const descriptors sequence to
|
||||
@ -54,12 +42,11 @@ cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
|
||||
*
|
||||
* @src: A pointer to array of words to set as consts.
|
||||
* @dst: The target SRAM buffer to set into
|
||||
* @nelements: The number of words in "src" array
|
||||
* @nelement: The number of words in "src" array
|
||||
* @seq: A pointer to the given IN/OUT descriptor sequence
|
||||
* @seq_len: A pointer to the given IN/OUT sequence length
|
||||
*/
|
||||
void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
|
||||
unsigned int nelement, struct cc_hw_desc *seq,
|
||||
unsigned int *seq_len);
|
||||
void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement,
|
||||
struct cc_hw_desc *seq, unsigned int *seq_len);
|
||||
|
||||
#endif /*__CC_SRAM_MGR_H__*/
|
||||
|
@ -715,6 +715,52 @@ static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
|
||||
return err;
|
||||
|
||||
}
|
||||
|
||||
static inline int get_qidxs(struct crypto_async_request *req,
|
||||
unsigned int *txqidx, unsigned int *rxqidx)
|
||||
{
|
||||
struct crypto_tfm *tfm = req->tfm;
|
||||
int ret = 0;
|
||||
|
||||
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_AEAD:
|
||||
{
|
||||
struct aead_request *aead_req =
|
||||
container_of(req, struct aead_request, base);
|
||||
struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
|
||||
*txqidx = reqctx->txqidx;
|
||||
*rxqidx = reqctx->rxqidx;
|
||||
break;
|
||||
}
|
||||
case CRYPTO_ALG_TYPE_SKCIPHER:
|
||||
{
|
||||
struct skcipher_request *sk_req =
|
||||
container_of(req, struct skcipher_request, base);
|
||||
struct chcr_skcipher_req_ctx *reqctx =
|
||||
skcipher_request_ctx(sk_req);
|
||||
*txqidx = reqctx->txqidx;
|
||||
*rxqidx = reqctx->rxqidx;
|
||||
break;
|
||||
}
|
||||
case CRYPTO_ALG_TYPE_AHASH:
|
||||
{
|
||||
struct ahash_request *ahash_req =
|
||||
container_of(req, struct ahash_request, base);
|
||||
struct chcr_ahash_req_ctx *reqctx =
|
||||
ahash_request_ctx(ahash_req);
|
||||
*txqidx = reqctx->txqidx;
|
||||
*rxqidx = reqctx->rxqidx;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
/* should never get here */
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void create_wreq(struct chcr_context *ctx,
|
||||
struct chcr_wr *chcr_req,
|
||||
struct crypto_async_request *req,
|
||||
@ -725,7 +771,15 @@ static inline void create_wreq(struct chcr_context *ctx,
|
||||
unsigned int lcb)
|
||||
{
|
||||
struct uld_ctx *u_ctx = ULD_CTX(ctx);
|
||||
int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
|
||||
unsigned int tx_channel_id, rx_channel_id;
|
||||
unsigned int txqidx = 0, rxqidx = 0;
|
||||
unsigned int qid, fid;
|
||||
|
||||
get_qidxs(req, &txqidx, &rxqidx);
|
||||
qid = u_ctx->lldi.rxq_ids[rxqidx];
|
||||
fid = u_ctx->lldi.rxq_ids[0];
|
||||
tx_channel_id = txqidx / ctx->txq_perchan;
|
||||
rx_channel_id = rxqidx / ctx->rxq_perchan;
|
||||
|
||||
|
||||
chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
|
||||
@ -734,15 +788,12 @@ static inline void create_wreq(struct chcr_context *ctx,
|
||||
chcr_req->wreq.len16_pkd =
|
||||
htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
|
||||
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
|
||||
chcr_req->wreq.rx_chid_to_rx_q_id =
|
||||
FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
|
||||
!!lcb, ctx->tx_qidx);
|
||||
chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
|
||||
!!lcb, txqidx);
|
||||
|
||||
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
|
||||
qid);
|
||||
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
|
||||
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
|
||||
((sizeof(chcr_req->wreq)) >> 4)));
|
||||
|
||||
((sizeof(chcr_req->wreq)) >> 4)));
|
||||
chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
|
||||
chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
|
||||
sizeof(chcr_req->key_ctx) + sc_len);
|
||||
@ -758,7 +809,8 @@ static inline void create_wreq(struct chcr_context *ctx,
|
||||
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
|
||||
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
|
||||
struct chcr_context *ctx = c_ctx(tfm);
|
||||
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
||||
struct sk_buff *skb = NULL;
|
||||
struct chcr_wr *chcr_req;
|
||||
struct cpl_rx_phys_dsgl *phys_cpl;
|
||||
@ -771,7 +823,8 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
|
||||
unsigned int kctx_len;
|
||||
gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
struct adapter *adap = padap(c_ctx(tfm)->dev);
|
||||
struct adapter *adap = padap(ctx->dev);
|
||||
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
|
||||
|
||||
nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
|
||||
reqctx->dst_ofst);
|
||||
@ -791,7 +844,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
|
||||
}
|
||||
chcr_req = __skb_put_zero(skb, transhdr_len);
|
||||
chcr_req->sec_cpl.op_ivinsrtofst =
|
||||
FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
|
||||
FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
|
||||
|
||||
chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
|
||||
chcr_req->sec_cpl.aadstart_cipherstop_hi =
|
||||
@ -1086,8 +1139,12 @@ static int chcr_final_cipher_iv(struct skcipher_request *req,
|
||||
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
|
||||
ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
|
||||
AES_BLOCK_SIZE));
|
||||
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
|
||||
ret = chcr_update_tweak(req, iv, 1);
|
||||
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
|
||||
if (!reqctx->partial_req)
|
||||
memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
|
||||
else
|
||||
ret = chcr_update_tweak(req, iv, 1);
|
||||
}
|
||||
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
|
||||
/*Already updated for Decrypt*/
|
||||
if (!reqctx->op)
|
||||
@ -1102,12 +1159,13 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
|
||||
unsigned char *input, int err)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chcr_context *ctx = c_ctx(tfm);
|
||||
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
|
||||
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
|
||||
struct sk_buff *skb;
|
||||
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
|
||||
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
|
||||
struct cipher_wr_param wrparam;
|
||||
struct cipher_wr_param wrparam;
|
||||
struct chcr_dev *dev = c_ctx(tfm)->dev;
|
||||
int bytes;
|
||||
|
||||
@ -1152,7 +1210,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
|
||||
if (get_cryptoalg_subtype(tfm) ==
|
||||
CRYPTO_ALG_SUB_TYPE_CTR)
|
||||
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
|
||||
wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
|
||||
wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
|
||||
wrparam.req = req;
|
||||
wrparam.bytes = bytes;
|
||||
skb = create_cipher_wr(&wrparam);
|
||||
@ -1162,14 +1220,24 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
|
||||
goto unmap;
|
||||
}
|
||||
skb->dev = u_ctx->lldi.ports[0];
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
|
||||
chcr_send_wr(skb);
|
||||
reqctx->last_req_len = bytes;
|
||||
reqctx->processed += bytes;
|
||||
if (get_cryptoalg_subtype(tfm) ==
|
||||
CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP ) {
|
||||
complete(&ctx->cbc_aes_aio_done);
|
||||
}
|
||||
return 0;
|
||||
unmap:
|
||||
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
|
||||
complete:
|
||||
if (get_cryptoalg_subtype(tfm) ==
|
||||
CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP ) {
|
||||
complete(&ctx->cbc_aes_aio_done);
|
||||
}
|
||||
chcr_dec_wrcount(dev);
|
||||
req->base.complete(&req->base, err);
|
||||
return err;
|
||||
@ -1188,6 +1256,7 @@ static int process_cipher(struct skcipher_request *req,
|
||||
int bytes, err = -EINVAL;
|
||||
|
||||
reqctx->processed = 0;
|
||||
reqctx->partial_req = 0;
|
||||
if (!req->iv)
|
||||
goto error;
|
||||
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
|
||||
@ -1278,6 +1347,7 @@ static int process_cipher(struct skcipher_request *req,
|
||||
}
|
||||
reqctx->processed = bytes;
|
||||
reqctx->last_req_len = bytes;
|
||||
reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
|
||||
|
||||
return 0;
|
||||
unmap:
|
||||
@ -1289,31 +1359,43 @@ error:
|
||||
static int chcr_aes_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
|
||||
struct chcr_dev *dev = c_ctx(tfm)->dev;
|
||||
struct sk_buff *skb = NULL;
|
||||
int err, isfull = 0;
|
||||
int err;
|
||||
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
|
||||
struct chcr_context *ctx = c_ctx(tfm);
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
reqctx->txqidx = cpu % ctx->ntxq;
|
||||
reqctx->rxqidx = cpu % ctx->nrxq;
|
||||
put_cpu();
|
||||
|
||||
err = chcr_inc_wrcount(dev);
|
||||
if (err)
|
||||
return -ENXIO;
|
||||
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
|
||||
c_ctx(tfm)->tx_qidx))) {
|
||||
isfull = 1;
|
||||
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
|
||||
reqctx->txqidx) &&
|
||||
(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
|
||||
err = -ENOSPC;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
|
||||
err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
|
||||
&skb, CHCR_ENCRYPT_OP);
|
||||
if (err || !skb)
|
||||
return err;
|
||||
skb->dev = u_ctx->lldi.ports[0];
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
|
||||
chcr_send_wr(skb);
|
||||
return isfull ? -EBUSY : -EINPROGRESS;
|
||||
if (get_cryptoalg_subtype(tfm) ==
|
||||
CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP ) {
|
||||
reqctx->partial_req = 1;
|
||||
wait_for_completion(&ctx->cbc_aes_aio_done);
|
||||
}
|
||||
return -EINPROGRESS;
|
||||
error:
|
||||
chcr_dec_wrcount(dev);
|
||||
return err;
|
||||
@ -1322,44 +1404,45 @@ error:
|
||||
static int chcr_aes_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
|
||||
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
|
||||
struct chcr_dev *dev = c_ctx(tfm)->dev;
|
||||
struct sk_buff *skb = NULL;
|
||||
int err, isfull = 0;
|
||||
int err;
|
||||
struct chcr_context *ctx = c_ctx(tfm);
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
reqctx->txqidx = cpu % ctx->ntxq;
|
||||
reqctx->rxqidx = cpu % ctx->nrxq;
|
||||
put_cpu();
|
||||
|
||||
err = chcr_inc_wrcount(dev);
|
||||
if (err)
|
||||
return -ENXIO;
|
||||
|
||||
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
|
||||
c_ctx(tfm)->tx_qidx))) {
|
||||
isfull = 1;
|
||||
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||
reqctx->txqidx) &&
|
||||
(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
|
||||
err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
|
||||
&skb, CHCR_DECRYPT_OP);
|
||||
if (err || !skb)
|
||||
return err;
|
||||
skb->dev = u_ctx->lldi.ports[0];
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
|
||||
chcr_send_wr(skb);
|
||||
return isfull ? -EBUSY : -EINPROGRESS;
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
static int chcr_device_init(struct chcr_context *ctx)
|
||||
{
|
||||
struct uld_ctx *u_ctx = NULL;
|
||||
unsigned int id;
|
||||
int txq_perchan, txq_idx, ntxq;
|
||||
int err = 0, rxq_perchan, rxq_idx;
|
||||
int txq_perchan, ntxq;
|
||||
int err = 0, rxq_perchan;
|
||||
|
||||
id = smp_processor_id();
|
||||
if (!ctx->dev) {
|
||||
u_ctx = assign_chcr_device();
|
||||
if (!u_ctx) {
|
||||
err = -ENXIO;
|
||||
pr_err("chcr device assignment fails\n");
|
||||
goto out;
|
||||
}
|
||||
@ -1367,23 +1450,10 @@ static int chcr_device_init(struct chcr_context *ctx)
|
||||
ntxq = u_ctx->lldi.ntxq;
|
||||
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
|
||||
txq_perchan = ntxq / u_ctx->lldi.nchan;
|
||||
spin_lock(&ctx->dev->lock_chcr_dev);
|
||||
ctx->tx_chan_id = ctx->dev->tx_channel_id;
|
||||
ctx->dev->tx_channel_id =
|
||||
(ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan;
|
||||
spin_unlock(&ctx->dev->lock_chcr_dev);
|
||||
rxq_idx = ctx->tx_chan_id * rxq_perchan;
|
||||
rxq_idx += id % rxq_perchan;
|
||||
txq_idx = ctx->tx_chan_id * txq_perchan;
|
||||
txq_idx += id % txq_perchan;
|
||||
ctx->rx_qidx = rxq_idx;
|
||||
ctx->tx_qidx = txq_idx;
|
||||
/* Channel Id used by SGE to forward packet to Host.
|
||||
* Same value should be used in cpl_fw6_pld RSS_CH field
|
||||
* by FW. Driver programs PCI channel ID to be used in fw
|
||||
* at the time of queue allocation with value "pi->tx_chan"
|
||||
*/
|
||||
ctx->pci_chan_id = txq_idx / txq_perchan;
|
||||
ctx->ntxq = ntxq;
|
||||
ctx->nrxq = u_ctx->lldi.nrxq;
|
||||
ctx->rxq_perchan = rxq_perchan;
|
||||
ctx->txq_perchan = txq_perchan;
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
@ -1401,7 +1471,7 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm)
|
||||
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
|
||||
return PTR_ERR(ablkctx->sw_cipher);
|
||||
}
|
||||
|
||||
init_completion(&ctx->cbc_aes_aio_done);
|
||||
crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
|
||||
|
||||
return chcr_device_init(ctx);
|
||||
@ -1485,9 +1555,10 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
|
||||
{
|
||||
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
|
||||
struct chcr_context *ctx = h_ctx(tfm);
|
||||
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
|
||||
struct sk_buff *skb = NULL;
|
||||
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
|
||||
struct uld_ctx *u_ctx = ULD_CTX(ctx);
|
||||
struct chcr_wr *chcr_req;
|
||||
struct ulptx_sgl *ulptx;
|
||||
unsigned int nents = 0, transhdr_len;
|
||||
@ -1496,6 +1567,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
|
||||
GFP_ATOMIC;
|
||||
struct adapter *adap = padap(h_ctx(tfm)->dev);
|
||||
int error = 0;
|
||||
unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
|
||||
|
||||
transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
|
||||
req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
|
||||
@ -1513,7 +1585,8 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
|
||||
chcr_req = __skb_put_zero(skb, transhdr_len);
|
||||
|
||||
chcr_req->sec_cpl.op_ivinsrtofst =
|
||||
FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
|
||||
FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
|
||||
|
||||
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
|
||||
|
||||
chcr_req->sec_cpl.aadstart_cipherstop_hi =
|
||||
@ -1576,16 +1649,22 @@ static int chcr_ahash_update(struct ahash_request *req)
|
||||
{
|
||||
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
|
||||
struct uld_ctx *u_ctx = NULL;
|
||||
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
|
||||
struct chcr_context *ctx = h_ctx(rtfm);
|
||||
struct chcr_dev *dev = h_ctx(rtfm)->dev;
|
||||
struct sk_buff *skb;
|
||||
u8 remainder = 0, bs;
|
||||
unsigned int nbytes = req->nbytes;
|
||||
struct hash_wr_param params;
|
||||
int error, isfull = 0;
|
||||
int error;
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
req_ctx->txqidx = cpu % ctx->ntxq;
|
||||
req_ctx->rxqidx = cpu % ctx->nrxq;
|
||||
put_cpu();
|
||||
|
||||
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
|
||||
u_ctx = ULD_CTX(h_ctx(rtfm));
|
||||
|
||||
if (nbytes + req_ctx->reqlen >= bs) {
|
||||
remainder = (nbytes + req_ctx->reqlen) % bs;
|
||||
@ -1603,12 +1682,10 @@ static int chcr_ahash_update(struct ahash_request *req)
|
||||
* inflight count for dev guarantees that lldi and padap is valid
|
||||
*/
|
||||
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
|
||||
h_ctx(rtfm)->tx_qidx))) {
|
||||
isfull = 1;
|
||||
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
|
||||
req_ctx->txqidx) &&
|
||||
(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
|
||||
error = -ENOSPC;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
chcr_init_hctx_per_wr(req_ctx);
|
||||
@ -1650,10 +1727,9 @@ static int chcr_ahash_update(struct ahash_request *req)
|
||||
}
|
||||
req_ctx->reqlen = remainder;
|
||||
skb->dev = u_ctx->lldi.ports[0];
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
|
||||
chcr_send_wr(skb);
|
||||
|
||||
return isfull ? -EBUSY : -EINPROGRESS;
|
||||
return -EINPROGRESS;
|
||||
unmap:
|
||||
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
|
||||
err:
|
||||
@ -1678,16 +1754,22 @@ static int chcr_ahash_final(struct ahash_request *req)
|
||||
struct chcr_dev *dev = h_ctx(rtfm)->dev;
|
||||
struct hash_wr_param params;
|
||||
struct sk_buff *skb;
|
||||
struct uld_ctx *u_ctx = NULL;
|
||||
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
|
||||
struct chcr_context *ctx = h_ctx(rtfm);
|
||||
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
|
||||
int error = -EINVAL;
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
req_ctx->txqidx = cpu % ctx->ntxq;
|
||||
req_ctx->rxqidx = cpu % ctx->nrxq;
|
||||
put_cpu();
|
||||
|
||||
error = chcr_inc_wrcount(dev);
|
||||
if (error)
|
||||
return -ENXIO;
|
||||
|
||||
chcr_init_hctx_per_wr(req_ctx);
|
||||
u_ctx = ULD_CTX(h_ctx(rtfm));
|
||||
if (is_hmac(crypto_ahash_tfm(rtfm)))
|
||||
params.opad_needed = 1;
|
||||
else
|
||||
@ -1727,7 +1809,7 @@ static int chcr_ahash_final(struct ahash_request *req)
|
||||
}
|
||||
req_ctx->reqlen = 0;
|
||||
skb->dev = u_ctx->lldi.ports[0];
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
|
||||
chcr_send_wr(skb);
|
||||
return -EINPROGRESS;
|
||||
err:
|
||||
@ -1740,25 +1822,29 @@ static int chcr_ahash_finup(struct ahash_request *req)
|
||||
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
|
||||
struct chcr_dev *dev = h_ctx(rtfm)->dev;
|
||||
struct uld_ctx *u_ctx = NULL;
|
||||
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
|
||||
struct chcr_context *ctx = h_ctx(rtfm);
|
||||
struct sk_buff *skb;
|
||||
struct hash_wr_param params;
|
||||
u8 bs;
|
||||
int error, isfull = 0;
|
||||
int error;
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
req_ctx->txqidx = cpu % ctx->ntxq;
|
||||
req_ctx->rxqidx = cpu % ctx->nrxq;
|
||||
put_cpu();
|
||||
|
||||
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
|
||||
u_ctx = ULD_CTX(h_ctx(rtfm));
|
||||
error = chcr_inc_wrcount(dev);
|
||||
if (error)
|
||||
return -ENXIO;
|
||||
|
||||
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
|
||||
h_ctx(rtfm)->tx_qidx))) {
|
||||
isfull = 1;
|
||||
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
|
||||
req_ctx->txqidx) &&
|
||||
(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
|
||||
error = -ENOSPC;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
chcr_init_hctx_per_wr(req_ctx);
|
||||
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
|
||||
@ -1816,10 +1902,9 @@ static int chcr_ahash_finup(struct ahash_request *req)
|
||||
req_ctx->reqlen = 0;
|
||||
req_ctx->hctx_wr.processed += params.sg_len;
|
||||
skb->dev = u_ctx->lldi.ports[0];
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
|
||||
chcr_send_wr(skb);
|
||||
|
||||
return isfull ? -EBUSY : -EINPROGRESS;
|
||||
return -EINPROGRESS;
|
||||
unmap:
|
||||
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
|
||||
err:
|
||||
@ -1832,11 +1917,18 @@ static int chcr_ahash_digest(struct ahash_request *req)
|
||||
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
|
||||
struct chcr_dev *dev = h_ctx(rtfm)->dev;
|
||||
struct uld_ctx *u_ctx = NULL;
|
||||
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
|
||||
struct chcr_context *ctx = h_ctx(rtfm);
|
||||
struct sk_buff *skb;
|
||||
struct hash_wr_param params;
|
||||
u8 bs;
|
||||
int error, isfull = 0;
|
||||
int error;
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
req_ctx->txqidx = cpu % ctx->ntxq;
|
||||
req_ctx->rxqidx = cpu % ctx->nrxq;
|
||||
put_cpu();
|
||||
|
||||
rtfm->init(req);
|
||||
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
|
||||
@ -1844,14 +1936,11 @@ static int chcr_ahash_digest(struct ahash_request *req)
|
||||
if (error)
|
||||
return -ENXIO;
|
||||
|
||||
u_ctx = ULD_CTX(h_ctx(rtfm));
|
||||
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
|
||||
h_ctx(rtfm)->tx_qidx))) {
|
||||
isfull = 1;
|
||||
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
|
||||
req_ctx->txqidx) &&
|
||||
(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
|
||||
error = -ENOSPC;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
chcr_init_hctx_per_wr(req_ctx);
|
||||
@ -1907,9 +1996,9 @@ static int chcr_ahash_digest(struct ahash_request *req)
|
||||
}
|
||||
req_ctx->hctx_wr.processed += params.sg_len;
|
||||
skb->dev = u_ctx->lldi.ports[0];
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
|
||||
chcr_send_wr(skb);
|
||||
return isfull ? -EBUSY : -EINPROGRESS;
|
||||
return -EINPROGRESS;
|
||||
unmap:
|
||||
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
|
||||
err:
|
||||
@ -1922,14 +2011,20 @@ static int chcr_ahash_continue(struct ahash_request *req)
|
||||
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
|
||||
struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
|
||||
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
|
||||
struct uld_ctx *u_ctx = NULL;
|
||||
struct chcr_context *ctx = h_ctx(rtfm);
|
||||
struct uld_ctx *u_ctx = ULD_CTX(ctx);
|
||||
struct sk_buff *skb;
|
||||
struct hash_wr_param params;
|
||||
u8 bs;
|
||||
int error;
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
reqctx->txqidx = cpu % ctx->ntxq;
|
||||
reqctx->rxqidx = cpu % ctx->nrxq;
|
||||
put_cpu();
|
||||
|
||||
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
|
||||
u_ctx = ULD_CTX(h_ctx(rtfm));
|
||||
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
|
||||
params.kctx_len = roundup(params.alg_prm.result_size, 16);
|
||||
if (is_hmac(crypto_ahash_tfm(rtfm))) {
|
||||
@ -1969,7 +2064,7 @@ static int chcr_ahash_continue(struct ahash_request *req)
|
||||
}
|
||||
hctx_wr->processed += params.sg_len;
|
||||
skb->dev = u_ctx->lldi.ports[0];
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
|
||||
chcr_send_wr(skb);
|
||||
return 0;
|
||||
err:
|
||||
@ -2315,7 +2410,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
|
||||
int size)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
|
||||
struct chcr_context *ctx = a_ctx(tfm);
|
||||
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
|
||||
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
|
||||
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
|
||||
struct sk_buff *skb = NULL;
|
||||
@ -2331,7 +2427,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
|
||||
int null = 0;
|
||||
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
|
||||
GFP_ATOMIC;
|
||||
struct adapter *adap = padap(a_ctx(tfm)->dev);
|
||||
struct adapter *adap = padap(ctx->dev);
|
||||
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
|
||||
|
||||
if (req->cryptlen == 0)
|
||||
return NULL;
|
||||
@ -2351,7 +2448,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
|
||||
snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
|
||||
CHCR_SRC_SG_SIZE, 0);
|
||||
dst_size = get_space_for_phys_dsgl(dnents);
|
||||
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
|
||||
kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
|
||||
- sizeof(chcr_req->key_ctx);
|
||||
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
|
||||
reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
|
||||
@ -2383,7 +2480,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
|
||||
* to the hardware spec
|
||||
*/
|
||||
chcr_req->sec_cpl.op_ivinsrtofst =
|
||||
FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
|
||||
FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
|
||||
chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
|
||||
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
|
||||
null ? 0 : 1 + IV,
|
||||
@ -2471,8 +2568,9 @@ int chcr_aead_dma_map(struct device *dev,
|
||||
else
|
||||
reqctx->b0_dma = 0;
|
||||
if (req->src == req->dst) {
|
||||
error = dma_map_sg(dev, req->src, sg_nents(req->src),
|
||||
DMA_BIDIRECTIONAL);
|
||||
error = dma_map_sg(dev, req->src,
|
||||
sg_nents_for_len(req->src, dst_size),
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!error)
|
||||
goto err;
|
||||
} else {
|
||||
@ -2558,13 +2656,14 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
||||
unsigned int authsize = crypto_aead_authsize(tfm);
|
||||
struct chcr_context *ctx = a_ctx(tfm);
|
||||
u32 temp;
|
||||
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
|
||||
|
||||
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
||||
dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
|
||||
temp = req->assoclen + req->cryptlen +
|
||||
(reqctx->op ? -authsize : authsize);
|
||||
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
|
||||
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
||||
dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
|
||||
}
|
||||
|
||||
void chcr_add_cipher_src_ent(struct skcipher_request *req,
|
||||
@ -2599,14 +2698,14 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
|
||||
struct chcr_context *ctx = c_ctx(tfm);
|
||||
struct dsgl_walk dsgl_walk;
|
||||
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
|
||||
|
||||
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
||||
dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
|
||||
reqctx->dst_ofst);
|
||||
reqctx->dstsg = dsgl_walk.last_sg;
|
||||
reqctx->dst_ofst = dsgl_walk.last_sg_len;
|
||||
|
||||
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
||||
dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
|
||||
}
|
||||
|
||||
void chcr_add_hash_src_ent(struct ahash_request *req,
|
||||
@ -2804,10 +2903,12 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
|
||||
unsigned short op_type)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
|
||||
struct chcr_context *ctx = a_ctx(tfm);
|
||||
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
|
||||
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
|
||||
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
|
||||
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
|
||||
unsigned int c_id = a_ctx(tfm)->tx_chan_id;
|
||||
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
|
||||
unsigned int ccm_xtra;
|
||||
unsigned char tag_offset = 0, auth_offset = 0;
|
||||
unsigned int assoclen;
|
||||
@ -2828,9 +2929,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
|
||||
auth_offset = 0;
|
||||
}
|
||||
|
||||
|
||||
sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
|
||||
2, 1);
|
||||
sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
|
||||
sec_cpl->pldlen =
|
||||
htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
|
||||
/* For CCM there wil be b0 always. So AAD start will be 1 always */
|
||||
@ -2973,7 +3072,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
|
||||
int size)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
|
||||
struct chcr_context *ctx = a_ctx(tfm);
|
||||
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
|
||||
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
|
||||
struct sk_buff *skb = NULL;
|
||||
struct chcr_wr *chcr_req;
|
||||
@ -2986,7 +3086,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
|
||||
u8 *ivptr;
|
||||
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
|
||||
GFP_ATOMIC;
|
||||
struct adapter *adap = padap(a_ctx(tfm)->dev);
|
||||
struct adapter *adap = padap(ctx->dev);
|
||||
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
|
||||
|
||||
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
|
||||
assoclen = req->assoclen - 8;
|
||||
@ -3028,7 +3129,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
|
||||
//Offset of tag from end
|
||||
temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
|
||||
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
|
||||
a_ctx(tfm)->tx_chan_id, 2, 1);
|
||||
rx_channel_id, 2, 1);
|
||||
chcr_req->sec_cpl.pldlen =
|
||||
htonl(req->assoclen + IV + req->cryptlen);
|
||||
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
|
||||
@ -3576,9 +3677,9 @@ static int chcr_aead_op(struct aead_request *req,
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
|
||||
struct uld_ctx *u_ctx;
|
||||
struct chcr_context *ctx = a_ctx(tfm);
|
||||
struct uld_ctx *u_ctx = ULD_CTX(ctx);
|
||||
struct sk_buff *skb;
|
||||
int isfull = 0;
|
||||
struct chcr_dev *cdev;
|
||||
|
||||
cdev = a_ctx(tfm)->dev;
|
||||
@ -3594,18 +3695,15 @@ static int chcr_aead_op(struct aead_request *req,
|
||||
return chcr_aead_fallback(req, reqctx->op);
|
||||
}
|
||||
|
||||
u_ctx = ULD_CTX(a_ctx(tfm));
|
||||
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
|
||||
a_ctx(tfm)->tx_qidx)) {
|
||||
isfull = 1;
|
||||
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
|
||||
reqctx->txqidx) &&
|
||||
(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
|
||||
chcr_dec_wrcount(cdev);
|
||||
return -ENOSPC;
|
||||
}
|
||||
}
|
||||
|
||||
/* Form a WR from req */
|
||||
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
|
||||
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
|
||||
|
||||
if (IS_ERR_OR_NULL(skb)) {
|
||||
chcr_dec_wrcount(cdev);
|
||||
@ -3613,15 +3711,22 @@ static int chcr_aead_op(struct aead_request *req,
|
||||
}
|
||||
|
||||
skb->dev = u_ctx->lldi.ports[0];
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
|
||||
chcr_send_wr(skb);
|
||||
return isfull ? -EBUSY : -EINPROGRESS;
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
static int chcr_aead_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
|
||||
struct chcr_context *ctx = a_ctx(tfm);
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
reqctx->txqidx = cpu % ctx->ntxq;
|
||||
reqctx->rxqidx = cpu % ctx->nrxq;
|
||||
put_cpu();
|
||||
|
||||
reqctx->verify = VERIFY_HW;
|
||||
reqctx->op = CHCR_ENCRYPT_OP;
|
||||
@ -3643,9 +3748,16 @@ static int chcr_aead_encrypt(struct aead_request *req)
|
||||
static int chcr_aead_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
|
||||
struct chcr_context *ctx = a_ctx(tfm);
|
||||
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
|
||||
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
|
||||
int size;
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
reqctx->txqidx = cpu % ctx->ntxq;
|
||||
reqctx->rxqidx = cpu % ctx->nrxq;
|
||||
put_cpu();
|
||||
|
||||
if (aeadctx->mayverify == VERIFY_SW) {
|
||||
size = crypto_aead_maxauthsize(tfm);
|
||||
|
@ -195,6 +195,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
|
||||
struct uld_ctx *u_ctx;
|
||||
|
||||
/* Create the device and add it in the device list */
|
||||
pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
|
||||
if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
@ -287,6 +288,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
|
||||
|
||||
case CXGB4_STATE_DETACH:
|
||||
chcr_detach_device(u_ctx);
|
||||
if (!atomic_read(&drv_data.dev_count))
|
||||
stop_crypto();
|
||||
break;
|
||||
|
||||
case CXGB4_STATE_START_RECOVERY:
|
||||
|
@ -43,7 +43,8 @@
|
||||
#include "cxgb4_uld.h"
|
||||
|
||||
#define DRV_MODULE_NAME "chcr"
|
||||
#define DRV_VERSION "1.0.0.0"
|
||||
#define DRV_VERSION "1.0.0.0-ko"
|
||||
#define DRV_DESC "Chelsio T6 Crypto Co-processor Driver"
|
||||
|
||||
#define MAX_PENDING_REQ_TO_HW 20
|
||||
#define CHCR_TEST_RESPONSE_TIMEOUT 1000
|
||||
@ -67,7 +68,7 @@ struct _key_ctx {
|
||||
__be32 ctx_hdr;
|
||||
u8 salt[MAX_SALT];
|
||||
__be64 iv_to_auth;
|
||||
unsigned char key[0];
|
||||
unsigned char key[];
|
||||
};
|
||||
|
||||
#define KEYCTX_TX_WR_IV_S 55
|
||||
@ -147,7 +148,6 @@ struct chcr_dev {
|
||||
int wqretry;
|
||||
struct delayed_work detach_work;
|
||||
struct completion detach_comp;
|
||||
unsigned char tx_channel_id;
|
||||
};
|
||||
|
||||
struct uld_ctx {
|
||||
|
@ -187,6 +187,8 @@ struct chcr_aead_reqctx {
|
||||
unsigned int op;
|
||||
u16 imm;
|
||||
u16 verify;
|
||||
u16 txqidx;
|
||||
u16 rxqidx;
|
||||
u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
|
||||
u8 *scratch_pad;
|
||||
};
|
||||
@ -250,10 +252,11 @@ struct __crypto_ctx {
|
||||
|
||||
struct chcr_context {
|
||||
struct chcr_dev *dev;
|
||||
unsigned char tx_qidx;
|
||||
unsigned char rx_qidx;
|
||||
unsigned char tx_chan_id;
|
||||
unsigned char pci_chan_id;
|
||||
unsigned char rxq_perchan;
|
||||
unsigned char txq_perchan;
|
||||
unsigned int ntxq;
|
||||
unsigned int nrxq;
|
||||
struct completion cbc_aes_aio_done;
|
||||
struct __crypto_ctx crypto_ctx[0];
|
||||
};
|
||||
|
||||
@ -279,6 +282,8 @@ struct chcr_ahash_req_ctx {
|
||||
u8 *skbfr;
|
||||
/* SKB which is being sent to the hardware for processing */
|
||||
u64 data_len; /* Data len till time */
|
||||
u16 txqidx;
|
||||
u16 rxqidx;
|
||||
u8 reqlen;
|
||||
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
|
||||
u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
|
||||
@ -290,12 +295,15 @@ struct chcr_skcipher_req_ctx {
|
||||
struct scatterlist *dstsg;
|
||||
unsigned int processed;
|
||||
unsigned int last_req_len;
|
||||
unsigned int partial_req;
|
||||
struct scatterlist *srcsg;
|
||||
unsigned int src_ofst;
|
||||
unsigned int dst_ofst;
|
||||
unsigned int op;
|
||||
u16 imm;
|
||||
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
|
||||
u16 txqidx;
|
||||
u16 rxqidx;
|
||||
};
|
||||
|
||||
struct chcr_alg_template {
|
||||
|
@ -1110,10 +1110,10 @@ new_buf:
|
||||
pg_size = page_size(page);
|
||||
if (off < pg_size &&
|
||||
skb_can_coalesce(skb, i, page, off)) {
|
||||
merge = 1;
|
||||
merge = true;
|
||||
goto copy;
|
||||
}
|
||||
merge = 0;
|
||||
merge = false;
|
||||
if (i == (is_tls_tx(csk) ? (MAX_SKB_FRAGS - 1) :
|
||||
MAX_SKB_FRAGS))
|
||||
goto new_buf;
|
||||
@ -1428,6 +1428,8 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
{
|
||||
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
|
||||
struct chtls_hws *hws = &csk->tlshws;
|
||||
struct net_device *dev = csk->egress_dev;
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
unsigned long avail;
|
||||
int buffers_freed;
|
||||
@ -1585,6 +1587,7 @@ skip_copy:
|
||||
tp->copied_seq += skb->len;
|
||||
hws->rcvpld = skb->hdr_len;
|
||||
} else {
|
||||
atomic_inc(&adap->chcr_stats.tls_pdu_rx);
|
||||
tp->copied_seq += hws->rcvpld;
|
||||
}
|
||||
chtls_free_skb(sk, skb);
|
||||
|
@ -174,9 +174,16 @@ static inline void chtls_dev_release(struct kref *kref)
|
||||
{
|
||||
struct tls_toe_device *dev;
|
||||
struct chtls_dev *cdev;
|
||||
struct adapter *adap;
|
||||
|
||||
dev = container_of(kref, struct tls_toe_device, kref);
|
||||
cdev = to_chtls_dev(dev);
|
||||
|
||||
/* Reset tls rx/tx stats */
|
||||
adap = pci_get_drvdata(cdev->pdev);
|
||||
atomic_set(&adap->chcr_stats.tls_pdu_tx, 0);
|
||||
atomic_set(&adap->chcr_stats.tls_pdu_rx, 0);
|
||||
|
||||
chtls_free_uld(cdev);
|
||||
}
|
||||
|
||||
@ -229,8 +236,7 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info)
|
||||
struct chtls_dev *cdev;
|
||||
int i, j;
|
||||
|
||||
cdev = kzalloc(sizeof(*cdev) + info->nports *
|
||||
(sizeof(struct net_device *)), GFP_KERNEL);
|
||||
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
|
||||
if (!cdev)
|
||||
goto out;
|
||||
|
||||
|
@ -40,6 +40,7 @@ config CRYPTO_DEV_HISI_QM
|
||||
tristate
|
||||
depends on ARM64 || COMPILE_TEST
|
||||
depends on PCI && PCI_MSI
|
||||
depends on UACCE || UACCE=n
|
||||
help
|
||||
HiSilicon accelerator engines use a common queue management
|
||||
interface. Specific engine driver may use this module.
|
||||
@ -49,6 +50,7 @@ config CRYPTO_DEV_HISI_ZIP
|
||||
depends on PCI && PCI_MSI
|
||||
depends on ARM64 || (COMPILE_TEST && 64BIT)
|
||||
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
|
||||
depends on UACCE || UACCE=n
|
||||
select CRYPTO_DEV_HISI_QM
|
||||
help
|
||||
Support for HiSilicon ZIP Driver
|
||||
|
@ -46,7 +46,6 @@ struct hpre_debug {
|
||||
|
||||
struct hpre {
|
||||
struct hisi_qm qm;
|
||||
struct list_head list;
|
||||
struct hpre_debug debug;
|
||||
u32 num_vfs;
|
||||
unsigned long status;
|
||||
@ -76,7 +75,7 @@ struct hpre_sqe {
|
||||
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
|
||||
};
|
||||
|
||||
struct hpre *hpre_find_device(int node);
|
||||
struct hisi_qp *hpre_create_qp(void);
|
||||
int hpre_algs_register(void);
|
||||
void hpre_algs_unregister(void);
|
||||
|
||||
|
@ -147,26 +147,18 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
|
||||
static struct hisi_qp *hpre_get_qp_and_start(void)
|
||||
{
|
||||
struct hisi_qp *qp;
|
||||
struct hpre *hpre;
|
||||
int ret;
|
||||
|
||||
/* find the proper hpre device, which is near the current CPU core */
|
||||
hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
|
||||
if (!hpre) {
|
||||
pr_err("Can not find proper hpre device!\n");
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
qp = hisi_qm_create_qp(&hpre->qm, 0);
|
||||
if (IS_ERR(qp)) {
|
||||
pci_err(hpre->qm.pdev, "Can not create qp!\n");
|
||||
qp = hpre_create_qp();
|
||||
if (!qp) {
|
||||
pr_err("Can not create hpre qp!\n");
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
ret = hisi_qm_start_qp(qp, 0);
|
||||
if (ret < 0) {
|
||||
hisi_qm_release_qp(qp);
|
||||
pci_err(hpre->qm.pdev, "Can not start qp!\n");
|
||||
hisi_qm_free_qps(&qp, 1);
|
||||
pci_err(qp->qm->pdev, "Can not start qp!\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
@ -338,7 +330,7 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
|
||||
if (is_clear_all) {
|
||||
idr_destroy(&ctx->req_idr);
|
||||
kfree(ctx->req_list);
|
||||
hisi_qm_release_qp(ctx->qp);
|
||||
hisi_qm_free_qps(&ctx->qp, 1);
|
||||
}
|
||||
|
||||
ctx->crt_g2_mode = false;
|
||||
|
@ -82,8 +82,7 @@
|
||||
|
||||
#define HPRE_VIA_MSI_DSM 1
|
||||
|
||||
static LIST_HEAD(hpre_list);
|
||||
static DEFINE_MUTEX(hpre_list_lock);
|
||||
static struct hisi_qm_list hpre_devices;
|
||||
static const char hpre_name[] = "hisi_hpre";
|
||||
static struct dentry *hpre_debugfs_root;
|
||||
static const struct pci_device_id hpre_dev_ids[] = {
|
||||
@ -196,43 +195,17 @@ static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
|
||||
module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
|
||||
MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
|
||||
|
||||
static inline void hpre_add_to_list(struct hpre *hpre)
|
||||
struct hisi_qp *hpre_create_qp(void)
|
||||
{
|
||||
mutex_lock(&hpre_list_lock);
|
||||
list_add_tail(&hpre->list, &hpre_list);
|
||||
mutex_unlock(&hpre_list_lock);
|
||||
}
|
||||
int node = cpu_to_node(smp_processor_id());
|
||||
struct hisi_qp *qp = NULL;
|
||||
int ret;
|
||||
|
||||
static inline void hpre_remove_from_list(struct hpre *hpre)
|
||||
{
|
||||
mutex_lock(&hpre_list_lock);
|
||||
list_del(&hpre->list);
|
||||
mutex_unlock(&hpre_list_lock);
|
||||
}
|
||||
ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp);
|
||||
if (!ret)
|
||||
return qp;
|
||||
|
||||
struct hpre *hpre_find_device(int node)
|
||||
{
|
||||
struct hpre *hpre, *ret = NULL;
|
||||
int min_distance = INT_MAX;
|
||||
struct device *dev;
|
||||
int dev_node = 0;
|
||||
|
||||
mutex_lock(&hpre_list_lock);
|
||||
list_for_each_entry(hpre, &hpre_list, list) {
|
||||
dev = &hpre->qm.pdev->dev;
|
||||
#ifdef CONFIG_NUMA
|
||||
dev_node = dev->numa_node;
|
||||
if (dev_node < 0)
|
||||
dev_node = 0;
|
||||
#endif
|
||||
if (node_distance(dev_node, node) < min_distance) {
|
||||
ret = hpre;
|
||||
min_distance = node_distance(dev_node, node);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&hpre_list_lock);
|
||||
|
||||
return ret;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
|
||||
@ -349,18 +322,14 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
|
||||
hisi_qm_debug_regs_clear(qm);
|
||||
}
|
||||
|
||||
static void hpre_hw_error_disable(struct hpre *hpre)
|
||||
static void hpre_hw_error_disable(struct hisi_qm *qm)
|
||||
{
|
||||
struct hisi_qm *qm = &hpre->qm;
|
||||
|
||||
/* disable hpre hw error interrupts */
|
||||
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
|
||||
}
|
||||
|
||||
static void hpre_hw_error_enable(struct hpre *hpre)
|
||||
static void hpre_hw_error_enable(struct hisi_qm *qm)
|
||||
{
|
||||
struct hisi_qm *qm = &hpre->qm;
|
||||
|
||||
/* enable hpre hw error interrupts */
|
||||
writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
|
||||
writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
|
||||
@ -713,13 +682,39 @@ static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hpre_hw_err_init(struct hpre *hpre)
|
||||
static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
|
||||
{
|
||||
hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, QM_BASE_NFE,
|
||||
0, QM_DB_RANDOM_INVALID);
|
||||
hpre_hw_error_enable(hpre);
|
||||
const struct hpre_hw_error *err = hpre_hw_errors;
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
|
||||
while (err->msg) {
|
||||
if (err->int_msk & err_sts)
|
||||
dev_warn(dev, "%s [error status=0x%x] found\n",
|
||||
err->msg, err->int_msk);
|
||||
err++;
|
||||
}
|
||||
|
||||
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
|
||||
}
|
||||
|
||||
static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
|
||||
{
|
||||
return readl(qm->io_base + HPRE_HAC_INT_STATUS);
|
||||
}
|
||||
|
||||
static const struct hisi_qm_err_ini hpre_err_ini = {
|
||||
.hw_err_enable = hpre_hw_error_enable,
|
||||
.hw_err_disable = hpre_hw_error_disable,
|
||||
.get_dev_hw_err_status = hpre_get_hw_err_status,
|
||||
.log_dev_hw_err = hpre_log_hw_error,
|
||||
.err_info = {
|
||||
.ce = QM_BASE_CE,
|
||||
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
|
||||
.fe = 0,
|
||||
.msi = QM_DB_RANDOM_INVALID,
|
||||
}
|
||||
};
|
||||
|
||||
static int hpre_pf_probe_init(struct hpre *hpre)
|
||||
{
|
||||
struct hisi_qm *qm = &hpre->qm;
|
||||
@ -731,7 +726,8 @@ static int hpre_pf_probe_init(struct hpre *hpre)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hpre_hw_err_init(hpre);
|
||||
qm->err_ini = &hpre_err_ini;
|
||||
hisi_qm_dev_err_init(qm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -776,22 +772,21 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (ret)
|
||||
dev_warn(&pdev->dev, "init debugfs fail!\n");
|
||||
|
||||
hpre_add_to_list(hpre);
|
||||
hisi_qm_add_to_list(qm, &hpre_devices);
|
||||
|
||||
ret = hpre_algs_register();
|
||||
if (ret < 0) {
|
||||
hpre_remove_from_list(hpre);
|
||||
pci_err(pdev, "fail to register algs to crypto!\n");
|
||||
goto err_with_qm_start;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_with_qm_start:
|
||||
hisi_qm_del_from_list(qm, &hpre_devices);
|
||||
hisi_qm_stop(qm);
|
||||
|
||||
err_with_err_init:
|
||||
if (pdev->is_physfn)
|
||||
hpre_hw_error_disable(hpre);
|
||||
hisi_qm_dev_err_uninit(qm);
|
||||
|
||||
err_with_qm_init:
|
||||
hisi_qm_uninit(qm);
|
||||
@ -907,7 +902,7 @@ static void hpre_remove(struct pci_dev *pdev)
|
||||
int ret;
|
||||
|
||||
hpre_algs_unregister();
|
||||
hpre_remove_from_list(hpre);
|
||||
hisi_qm_del_from_list(qm, &hpre_devices);
|
||||
if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
|
||||
ret = hpre_sriov_disable(pdev);
|
||||
if (ret) {
|
||||
@ -922,69 +917,13 @@ static void hpre_remove(struct pci_dev *pdev)
|
||||
|
||||
hpre_debugfs_exit(hpre);
|
||||
hisi_qm_stop(qm);
|
||||
if (qm->fun_type == QM_HW_PF)
|
||||
hpre_hw_error_disable(hpre);
|
||||
hisi_qm_dev_err_uninit(qm);
|
||||
hisi_qm_uninit(qm);
|
||||
}
|
||||
|
||||
static void hpre_log_hw_error(struct hpre *hpre, u32 err_sts)
|
||||
{
|
||||
const struct hpre_hw_error *err = hpre_hw_errors;
|
||||
struct device *dev = &hpre->qm.pdev->dev;
|
||||
|
||||
while (err->msg) {
|
||||
if (err->int_msk & err_sts)
|
||||
dev_warn(dev, "%s [error status=0x%x] found\n",
|
||||
err->msg, err->int_msk);
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
static pci_ers_result_t hpre_hw_error_handle(struct hpre *hpre)
|
||||
{
|
||||
u32 err_sts;
|
||||
|
||||
/* read err sts */
|
||||
err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS);
|
||||
if (err_sts) {
|
||||
hpre_log_hw_error(hpre, err_sts);
|
||||
|
||||
/* clear error interrupts */
|
||||
writel(err_sts, hpre->qm.io_base + HPRE_HAC_SOURCE_INT);
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static pci_ers_result_t hpre_process_hw_error(struct pci_dev *pdev)
|
||||
{
|
||||
struct hpre *hpre = pci_get_drvdata(pdev);
|
||||
pci_ers_result_t qm_ret, hpre_ret;
|
||||
|
||||
/* log qm error */
|
||||
qm_ret = hisi_qm_hw_error_handle(&hpre->qm);
|
||||
|
||||
/* log hpre error */
|
||||
hpre_ret = hpre_hw_error_handle(hpre);
|
||||
|
||||
return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
|
||||
hpre_ret == PCI_ERS_RESULT_NEED_RESET) ?
|
||||
PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state)
|
||||
{
|
||||
pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
|
||||
if (state == pci_channel_io_perm_failure)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
return hpre_process_hw_error(pdev);
|
||||
}
|
||||
|
||||
static const struct pci_error_handlers hpre_err_handler = {
|
||||
.error_detected = hpre_error_detected,
|
||||
.error_detected = hisi_qm_dev_err_detected,
|
||||
};
|
||||
|
||||
static struct pci_driver hpre_pci_driver = {
|
||||
@ -1013,6 +952,7 @@ static int __init hpre_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
hisi_qm_init_list(&hpre_devices);
|
||||
hpre_register_debugfs();
|
||||
|
||||
ret = pci_register_driver(&hpre_pci_driver);
|
||||
|
@ -9,6 +9,9 @@
|
||||
#include <linux/log2.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uacce.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <uapi/misc/uacce/hisi_qm.h>
|
||||
#include "qm.h"
|
||||
|
||||
/* eq/aeq irq enable */
|
||||
@ -269,6 +272,12 @@ struct qm_doorbell {
|
||||
__le16 priority;
|
||||
};
|
||||
|
||||
struct hisi_qm_resource {
|
||||
struct hisi_qm *qm;
|
||||
int distance;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct hisi_qm_hw_ops {
|
||||
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
|
||||
void (*qm_db)(struct hisi_qm *qm, u16 qn,
|
||||
@ -277,6 +286,7 @@ struct hisi_qm_hw_ops {
|
||||
int (*debug_init)(struct hisi_qm *qm);
|
||||
void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
|
||||
u32 msi);
|
||||
void (*hw_error_uninit)(struct hisi_qm *qm);
|
||||
pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm);
|
||||
};
|
||||
|
||||
@ -465,9 +475,14 @@ static void qm_cq_head_update(struct hisi_qp *qp)
|
||||
|
||||
static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
|
||||
{
|
||||
struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
|
||||
if (qp->event_cb) {
|
||||
qp->event_cb(qp);
|
||||
return;
|
||||
}
|
||||
|
||||
if (qp->req_cb) {
|
||||
struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
|
||||
|
||||
while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
|
||||
dma_rmb();
|
||||
qp->req_cb(qp, qp->sqe + qm->sqe_size *
|
||||
@ -485,17 +500,9 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
|
||||
}
|
||||
}
|
||||
|
||||
static void qm_qp_work_func(struct work_struct *work)
|
||||
static void qm_work_process(struct work_struct *work)
|
||||
{
|
||||
struct hisi_qp *qp;
|
||||
|
||||
qp = container_of(work, struct hisi_qp, work);
|
||||
qm_poll_qp(qp, qp->qm);
|
||||
}
|
||||
|
||||
static irqreturn_t qm_irq_handler(int irq, void *data)
|
||||
{
|
||||
struct hisi_qm *qm = data;
|
||||
struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
|
||||
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
|
||||
struct hisi_qp *qp;
|
||||
int eqe_num = 0;
|
||||
@ -504,7 +511,7 @@ static irqreturn_t qm_irq_handler(int irq, void *data)
|
||||
eqe_num++;
|
||||
qp = qm_to_hisi_qp(qm, eqe);
|
||||
if (qp)
|
||||
queue_work(qp->wq, &qp->work);
|
||||
qm_poll_qp(qp, qm);
|
||||
|
||||
if (qm->status.eq_head == QM_Q_DEPTH - 1) {
|
||||
qm->status.eqc_phase = !qm->status.eqc_phase;
|
||||
@ -522,6 +529,17 @@ static irqreturn_t qm_irq_handler(int irq, void *data)
|
||||
}
|
||||
|
||||
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
|
||||
}
|
||||
|
||||
static irqreturn_t do_qm_irq(int irq, void *data)
|
||||
{
|
||||
struct hisi_qm *qm = (struct hisi_qm *)data;
|
||||
|
||||
/* the workqueue created by device driver of QM */
|
||||
if (qm->wq)
|
||||
queue_work(qm->wq, &qm->work);
|
||||
else
|
||||
schedule_work(&qm->work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -531,7 +549,7 @@ static irqreturn_t qm_irq(int irq, void *data)
|
||||
struct hisi_qm *qm = data;
|
||||
|
||||
if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
|
||||
return qm_irq_handler(irq, data);
|
||||
return do_qm_irq(irq, data);
|
||||
|
||||
dev_err(&qm->pdev->dev, "invalid int source\n");
|
||||
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
|
||||
@ -1011,43 +1029,45 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
|
||||
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
|
||||
}
|
||||
|
||||
static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
|
||||
{
|
||||
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
|
||||
}
|
||||
|
||||
static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
|
||||
{
|
||||
const struct hisi_qm_hw_error *err = qm_hw_error;
|
||||
const struct hisi_qm_hw_error *err;
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
u32 reg_val, type, vf_num;
|
||||
int i;
|
||||
|
||||
while (err->msg) {
|
||||
if (err->int_msk & error_status) {
|
||||
dev_err(dev, "%s [error status=0x%x] found\n",
|
||||
err->msg, err->int_msk);
|
||||
for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
|
||||
err = &qm_hw_error[i];
|
||||
if (!(err->int_msk & error_status))
|
||||
continue;
|
||||
|
||||
if (error_status & QM_DB_TIMEOUT) {
|
||||
reg_val = readl(qm->io_base +
|
||||
QM_ABNORMAL_INF01);
|
||||
type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
|
||||
QM_DB_TIMEOUT_TYPE_SHIFT;
|
||||
vf_num = reg_val & QM_DB_TIMEOUT_VF;
|
||||
dev_err(dev, "qm %s doorbell timeout in function %u\n",
|
||||
qm_db_timeout[type], vf_num);
|
||||
}
|
||||
dev_err(dev, "%s [error status=0x%x] found\n",
|
||||
err->msg, err->int_msk);
|
||||
|
||||
if (error_status & QM_OF_FIFO_OF) {
|
||||
reg_val = readl(qm->io_base +
|
||||
QM_ABNORMAL_INF00);
|
||||
type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
|
||||
QM_FIFO_OVERFLOW_TYPE_SHIFT;
|
||||
vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
|
||||
if (err->int_msk & QM_DB_TIMEOUT) {
|
||||
reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
|
||||
type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
|
||||
QM_DB_TIMEOUT_TYPE_SHIFT;
|
||||
vf_num = reg_val & QM_DB_TIMEOUT_VF;
|
||||
dev_err(dev, "qm %s doorbell timeout in function %u\n",
|
||||
qm_db_timeout[type], vf_num);
|
||||
} else if (err->int_msk & QM_OF_FIFO_OF) {
|
||||
reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
|
||||
type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
|
||||
QM_FIFO_OVERFLOW_TYPE_SHIFT;
|
||||
vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
|
||||
|
||||
if (type < ARRAY_SIZE(qm_fifo_overflow))
|
||||
dev_err(dev, "qm %s fifo overflow in function %u\n",
|
||||
qm_fifo_overflow[type],
|
||||
vf_num);
|
||||
else
|
||||
dev_err(dev, "unknown error type\n");
|
||||
}
|
||||
if (type < ARRAY_SIZE(qm_fifo_overflow))
|
||||
dev_err(dev, "qm %s fifo overflow in function %u\n",
|
||||
qm_fifo_overflow[type], vf_num);
|
||||
else
|
||||
dev_err(dev, "unknown error type\n");
|
||||
}
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1082,6 +1102,7 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
|
||||
.qm_db = qm_db_v2,
|
||||
.get_irq_num = qm_get_irq_num_v2,
|
||||
.hw_error_init = qm_hw_error_init_v2,
|
||||
.hw_error_uninit = qm_hw_error_uninit_v2,
|
||||
.hw_error_handle = qm_hw_error_handle_v2,
|
||||
};
|
||||
|
||||
@ -1147,20 +1168,9 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
|
||||
|
||||
qp->qp_id = qp_id;
|
||||
qp->alg_type = alg_type;
|
||||
INIT_WORK(&qp->work, qm_qp_work_func);
|
||||
qp->wq = alloc_workqueue("hisi_qm", WQ_UNBOUND | WQ_HIGHPRI |
|
||||
WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0);
|
||||
if (!qp->wq) {
|
||||
ret = -EFAULT;
|
||||
goto err_free_qp_mem;
|
||||
}
|
||||
|
||||
return qp;
|
||||
|
||||
err_free_qp_mem:
|
||||
if (qm->use_dma_api)
|
||||
dma_free_coherent(dev, qp->qdma.size, qp->qdma.va,
|
||||
qp->qdma.dma);
|
||||
err_clear_bit:
|
||||
write_lock(&qm->qps_lock);
|
||||
qm->qp_array[qp_id] = NULL;
|
||||
@ -1269,7 +1279,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
|
||||
* @qp: The qp we want to start to run.
|
||||
* @arg: Accelerator specific argument.
|
||||
*
|
||||
* After this function, qp can receive request from user. Return qp_id if
|
||||
* After this function, qp can receive request from user. Return 0 if
|
||||
* successful, Return -EBUSY if failed.
|
||||
*/
|
||||
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
|
||||
@ -1314,7 +1324,7 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
|
||||
|
||||
dev_dbg(dev, "queue %d started\n", qp_id);
|
||||
|
||||
return qp_id;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
|
||||
|
||||
@ -1395,6 +1405,214 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
|
||||
}
|
||||
}
|
||||
|
||||
static void qm_qp_event_notifier(struct hisi_qp *qp)
|
||||
{
|
||||
wake_up_interruptible(&qp->uacce_q->wait);
|
||||
}
|
||||
|
||||
static int hisi_qm_get_available_instances(struct uacce_device *uacce)
|
||||
{
|
||||
int i, ret;
|
||||
struct hisi_qm *qm = uacce->priv;
|
||||
|
||||
read_lock(&qm->qps_lock);
|
||||
for (i = 0, ret = 0; i < qm->qp_num; i++)
|
||||
if (!qm->qp_array[i])
|
||||
ret++;
|
||||
read_unlock(&qm->qps_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
|
||||
unsigned long arg,
|
||||
struct uacce_queue *q)
|
||||
{
|
||||
struct hisi_qm *qm = uacce->priv;
|
||||
struct hisi_qp *qp;
|
||||
u8 alg_type = 0;
|
||||
|
||||
qp = hisi_qm_create_qp(qm, alg_type);
|
||||
if (IS_ERR(qp))
|
||||
return PTR_ERR(qp);
|
||||
|
||||
q->priv = qp;
|
||||
q->uacce = uacce;
|
||||
qp->uacce_q = q;
|
||||
qp->event_cb = qm_qp_event_notifier;
|
||||
qp->pasid = arg;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
|
||||
{
|
||||
struct hisi_qp *qp = q->priv;
|
||||
|
||||
hisi_qm_cache_wb(qp->qm);
|
||||
hisi_qm_release_qp(qp);
|
||||
}
|
||||
|
||||
/* map sq/cq/doorbell to user space */
|
||||
static int hisi_qm_uacce_mmap(struct uacce_queue *q,
|
||||
struct vm_area_struct *vma,
|
||||
struct uacce_qfile_region *qfr)
|
||||
{
|
||||
struct hisi_qp *qp = q->priv;
|
||||
struct hisi_qm *qm = qp->qm;
|
||||
size_t sz = vma->vm_end - vma->vm_start;
|
||||
struct pci_dev *pdev = qm->pdev;
|
||||
struct device *dev = &pdev->dev;
|
||||
unsigned long vm_pgoff;
|
||||
int ret;
|
||||
|
||||
switch (qfr->type) {
|
||||
case UACCE_QFRT_MMIO:
|
||||
if (qm->ver == QM_HW_V2) {
|
||||
if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
|
||||
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO;
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start,
|
||||
qm->phys_base >> PAGE_SHIFT,
|
||||
sz, pgprot_noncached(vma->vm_page_prot));
|
||||
case UACCE_QFRT_DUS:
|
||||
if (sz != qp->qdma.size)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* dma_mmap_coherent() requires vm_pgoff as 0
|
||||
* restore vm_pfoff to initial value for mmap()
|
||||
*/
|
||||
vm_pgoff = vma->vm_pgoff;
|
||||
vma->vm_pgoff = 0;
|
||||
ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
|
||||
qp->qdma.dma, sz);
|
||||
vma->vm_pgoff = vm_pgoff;
|
||||
return ret;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
|
||||
{
|
||||
struct hisi_qp *qp = q->priv;
|
||||
|
||||
return hisi_qm_start_qp(qp, qp->pasid);
|
||||
}
|
||||
|
||||
static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
|
||||
{
|
||||
hisi_qm_stop_qp(q->priv);
|
||||
}
|
||||
|
||||
static int qm_set_sqctype(struct uacce_queue *q, u16 type)
|
||||
{
|
||||
struct hisi_qm *qm = q->uacce->priv;
|
||||
struct hisi_qp *qp = q->priv;
|
||||
|
||||
write_lock(&qm->qps_lock);
|
||||
qp->alg_type = type;
|
||||
write_unlock(&qm->qps_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct hisi_qp *qp = q->priv;
|
||||
struct hisi_qp_ctx qp_ctx;
|
||||
|
||||
if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
|
||||
if (copy_from_user(&qp_ctx, (void __user *)arg,
|
||||
sizeof(struct hisi_qp_ctx)))
|
||||
return -EFAULT;
|
||||
|
||||
if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
|
||||
return -EINVAL;
|
||||
|
||||
qm_set_sqctype(q, qp_ctx.qc_type);
|
||||
qp_ctx.id = qp->qp_id;
|
||||
|
||||
if (copy_to_user((void __user *)arg, &qp_ctx,
|
||||
sizeof(struct hisi_qp_ctx)))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct uacce_ops uacce_qm_ops = {
|
||||
.get_available_instances = hisi_qm_get_available_instances,
|
||||
.get_queue = hisi_qm_uacce_get_queue,
|
||||
.put_queue = hisi_qm_uacce_put_queue,
|
||||
.start_queue = hisi_qm_uacce_start_queue,
|
||||
.stop_queue = hisi_qm_uacce_stop_queue,
|
||||
.mmap = hisi_qm_uacce_mmap,
|
||||
.ioctl = hisi_qm_uacce_ioctl,
|
||||
};
|
||||
|
||||
static int qm_alloc_uacce(struct hisi_qm *qm)
|
||||
{
|
||||
struct pci_dev *pdev = qm->pdev;
|
||||
struct uacce_device *uacce;
|
||||
unsigned long mmio_page_nr;
|
||||
unsigned long dus_page_nr;
|
||||
struct uacce_interface interface = {
|
||||
.flags = UACCE_DEV_SVA,
|
||||
.ops = &uacce_qm_ops,
|
||||
};
|
||||
|
||||
strncpy(interface.name, pdev->driver->name, sizeof(interface.name));
|
||||
|
||||
uacce = uacce_alloc(&pdev->dev, &interface);
|
||||
if (IS_ERR(uacce))
|
||||
return PTR_ERR(uacce);
|
||||
|
||||
if (uacce->flags & UACCE_DEV_SVA) {
|
||||
qm->use_sva = true;
|
||||
} else {
|
||||
/* only consider sva case */
|
||||
uacce_remove(uacce);
|
||||
qm->uacce = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
uacce->is_vf = pdev->is_virtfn;
|
||||
uacce->priv = qm;
|
||||
uacce->algs = qm->algs;
|
||||
|
||||
if (qm->ver == QM_HW_V1) {
|
||||
mmio_page_nr = QM_DOORBELL_PAGE_NR;
|
||||
uacce->api_ver = HISI_QM_API_VER_BASE;
|
||||
} else {
|
||||
mmio_page_nr = QM_DOORBELL_PAGE_NR +
|
||||
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
|
||||
uacce->api_ver = HISI_QM_API_VER2_BASE;
|
||||
}
|
||||
|
||||
dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
|
||||
sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
|
||||
|
||||
uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
|
||||
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
|
||||
|
||||
qm->uacce = uacce;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hisi_qm_get_free_qp_num() - Get free number of qp in qm.
|
||||
* @qm: The qm which want to get free qp.
|
||||
@ -1437,10 +1655,14 @@ int hisi_qm_init(struct hisi_qm *qm)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = qm_alloc_uacce(qm);
|
||||
if (ret < 0)
|
||||
dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
|
||||
|
||||
ret = pci_enable_device_mem(pdev);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Failed to enable device mem!\n");
|
||||
return ret;
|
||||
goto err_remove_uacce;
|
||||
}
|
||||
|
||||
ret = pci_request_mem_regions(pdev, qm->dev_name);
|
||||
@ -1449,8 +1671,9 @@ int hisi_qm_init(struct hisi_qm *qm)
|
||||
goto err_disable_pcidev;
|
||||
}
|
||||
|
||||
qm->io_base = ioremap(pci_resource_start(pdev, PCI_BAR_2),
|
||||
pci_resource_len(qm->pdev, PCI_BAR_2));
|
||||
qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
|
||||
qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
|
||||
qm->io_base = ioremap(qm->phys_base, qm->phys_size);
|
||||
if (!qm->io_base) {
|
||||
ret = -EIO;
|
||||
goto err_release_mem_regions;
|
||||
@ -1479,6 +1702,7 @@ int hisi_qm_init(struct hisi_qm *qm)
|
||||
qm->qp_in_used = 0;
|
||||
mutex_init(&qm->mailbox_lock);
|
||||
rwlock_init(&qm->qps_lock);
|
||||
INIT_WORK(&qm->work, qm_work_process);
|
||||
|
||||
dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf",
|
||||
qm->use_dma_api ? "dma api" : "iommu api");
|
||||
@ -1493,6 +1717,9 @@ err_release_mem_regions:
|
||||
pci_release_mem_regions(pdev);
|
||||
err_disable_pcidev:
|
||||
pci_disable_device(pdev);
|
||||
err_remove_uacce:
|
||||
uacce_remove(qm->uacce);
|
||||
qm->uacce = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1509,6 +1736,9 @@ void hisi_qm_uninit(struct hisi_qm *qm)
|
||||
struct pci_dev *pdev = qm->pdev;
|
||||
struct device *dev = &pdev->dev;
|
||||
|
||||
uacce_remove(qm->uacce);
|
||||
qm->uacce = NULL;
|
||||
|
||||
if (qm->use_dma_api && qm->qdma.va) {
|
||||
hisi_qm_cache_wb(qm);
|
||||
dma_free_coherent(dev, qm->qdma.size,
|
||||
@ -1856,43 +2086,30 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
|
||||
|
||||
/**
|
||||
* hisi_qm_hw_error_init() - Configure qm hardware error report method.
|
||||
* @qm: The qm which we want to configure.
|
||||
* @ce: Bit mask of correctable error configure.
|
||||
* @nfe: Bit mask of non-fatal error configure.
|
||||
* @fe: Bit mask of fatal error configure.
|
||||
* @msi: Bit mask of error reported by message signal interrupt.
|
||||
*
|
||||
* Hardware errors of qm can be reported either by RAS interrupts which will
|
||||
* be handled by UEFI and then PCIe AER or by device MSI. User can configure
|
||||
* each error to use either of above two methods. For RAS interrupts, we can
|
||||
* configure an error as one of correctable error, non-fatal error or
|
||||
* fatal error.
|
||||
*
|
||||
* Bits indicating errors can be configured to ce, nfe, fe and msi to enable
|
||||
* related report methods. Error report will be masked if related error bit
|
||||
* does not configure.
|
||||
*/
|
||||
void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
|
||||
u32 msi)
|
||||
static void qm_hw_error_init(struct hisi_qm *qm)
|
||||
{
|
||||
const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
|
||||
|
||||
if (!qm->ops->hw_error_init) {
|
||||
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
qm->ops->hw_error_init(qm, ce, nfe, fe, msi);
|
||||
qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe,
|
||||
err_info->fe, err_info->msi);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init);
|
||||
|
||||
/**
|
||||
* hisi_qm_hw_error_handle() - Handle qm non-fatal hardware errors.
|
||||
* @qm: The qm which has non-fatal hardware errors.
|
||||
*
|
||||
* Accelerators use this function to handle qm non-fatal hardware errors.
|
||||
*/
|
||||
pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
|
||||
static void qm_hw_error_uninit(struct hisi_qm *qm)
|
||||
{
|
||||
if (!qm->ops->hw_error_uninit) {
|
||||
dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
qm->ops->hw_error_uninit(qm);
|
||||
}
|
||||
|
||||
static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm)
|
||||
{
|
||||
if (!qm->ops->hw_error_handle) {
|
||||
dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
|
||||
@ -1901,7 +2118,6 @@ pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
|
||||
|
||||
return qm->ops->hw_error_handle(qm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_hw_error_handle);
|
||||
|
||||
/**
|
||||
* hisi_qm_get_hw_version() - Get hardware version of a qm.
|
||||
@ -1922,6 +2138,229 @@ enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version);
|
||||
|
||||
/**
|
||||
* hisi_qm_dev_err_init() - Initialize device error configuration.
|
||||
* @qm: The qm for which we want to do error initialization.
|
||||
*
|
||||
* Initialize QM and device error related configuration.
|
||||
*/
|
||||
void hisi_qm_dev_err_init(struct hisi_qm *qm)
|
||||
{
|
||||
if (qm->fun_type == QM_HW_VF)
|
||||
return;
|
||||
|
||||
qm_hw_error_init(qm);
|
||||
|
||||
if (!qm->err_ini->hw_err_enable) {
|
||||
dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
|
||||
return;
|
||||
}
|
||||
qm->err_ini->hw_err_enable(qm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
|
||||
|
||||
/**
|
||||
* hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
|
||||
* @qm: The qm for which we want to do error uninitialization.
|
||||
*
|
||||
* Uninitialize QM and device error related configuration.
|
||||
*/
|
||||
void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
|
||||
{
|
||||
if (qm->fun_type == QM_HW_VF)
|
||||
return;
|
||||
|
||||
qm_hw_error_uninit(qm);
|
||||
|
||||
if (!qm->err_ini->hw_err_disable) {
|
||||
dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
|
||||
return;
|
||||
}
|
||||
qm->err_ini->hw_err_disable(qm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
|
||||
|
||||
/**
|
||||
* hisi_qm_free_qps() - free multiple queue pairs.
|
||||
* @qps: The queue pairs need to be freed.
|
||||
* @qp_num: The num of queue pairs.
|
||||
*/
|
||||
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!qps || qp_num <= 0)
|
||||
return;
|
||||
|
||||
for (i = qp_num - 1; i >= 0; i--)
|
||||
hisi_qm_release_qp(qps[i]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
|
||||
|
||||
static void free_list(struct list_head *head)
|
||||
{
|
||||
struct hisi_qm_resource *res, *tmp;
|
||||
|
||||
list_for_each_entry_safe(res, tmp, head, list) {
|
||||
list_del(&res->list);
|
||||
kfree(res);
|
||||
}
|
||||
}
|
||||
|
||||
static int hisi_qm_sort_devices(int node, struct list_head *head,
|
||||
struct hisi_qm_list *qm_list)
|
||||
{
|
||||
struct hisi_qm_resource *res, *tmp;
|
||||
struct hisi_qm *qm;
|
||||
struct list_head *n;
|
||||
struct device *dev;
|
||||
int dev_node = 0;
|
||||
|
||||
list_for_each_entry(qm, &qm_list->list, list) {
|
||||
dev = &qm->pdev->dev;
|
||||
|
||||
if (IS_ENABLED(CONFIG_NUMA)) {
|
||||
dev_node = dev_to_node(dev);
|
||||
if (dev_node < 0)
|
||||
dev_node = 0;
|
||||
}
|
||||
|
||||
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
|
||||
res->qm = qm;
|
||||
res->distance = node_distance(dev_node, node);
|
||||
n = head;
|
||||
list_for_each_entry(tmp, head, list) {
|
||||
if (res->distance < tmp->distance) {
|
||||
n = &tmp->list;
|
||||
break;
|
||||
}
|
||||
}
|
||||
list_add_tail(&res->list, n);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hisi_qm_alloc_qps_node() - Create multiple queue pairs.
|
||||
* @qm_list: The list of all available devices.
|
||||
* @qp_num: The number of queue pairs need created.
|
||||
* @alg_type: The algorithm type.
|
||||
* @node: The numa node.
|
||||
* @qps: The queue pairs need created.
|
||||
*
|
||||
* This function will sort all available device according to numa distance.
|
||||
* Then try to create all queue pairs from one device, if all devices do
|
||||
* not meet the requirements will return error.
|
||||
*/
|
||||
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
|
||||
u8 alg_type, int node, struct hisi_qp **qps)
|
||||
{
|
||||
struct hisi_qm_resource *tmp;
|
||||
int ret = -ENODEV;
|
||||
LIST_HEAD(head);
|
||||
int i;
|
||||
|
||||
if (!qps || !qm_list || qp_num <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&qm_list->lock);
|
||||
if (hisi_qm_sort_devices(node, &head, qm_list)) {
|
||||
mutex_unlock(&qm_list->lock);
|
||||
goto err;
|
||||
}
|
||||
|
||||
list_for_each_entry(tmp, &head, list) {
|
||||
for (i = 0; i < qp_num; i++) {
|
||||
qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
|
||||
if (IS_ERR(qps[i])) {
|
||||
hisi_qm_free_qps(qps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == qp_num) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&qm_list->lock);
|
||||
if (ret)
|
||||
pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
|
||||
node, alg_type, qp_num);
|
||||
|
||||
err:
|
||||
free_list(&head);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
|
||||
|
||||
static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm)
|
||||
{
|
||||
u32 err_sts;
|
||||
|
||||
if (!qm->err_ini->get_dev_hw_err_status) {
|
||||
dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
|
||||
return PCI_ERS_RESULT_NONE;
|
||||
}
|
||||
|
||||
/* get device hardware error status */
|
||||
err_sts = qm->err_ini->get_dev_hw_err_status(qm);
|
||||
if (err_sts) {
|
||||
if (!qm->err_ini->log_dev_hw_err) {
|
||||
dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
qm->err_ini->log_dev_hw_err(qm, err_sts);
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
|
||||
{
|
||||
struct hisi_qm *qm = pci_get_drvdata(pdev);
|
||||
pci_ers_result_t qm_ret, dev_ret;
|
||||
|
||||
/* log qm error */
|
||||
qm_ret = qm_hw_error_handle(qm);
|
||||
|
||||
/* log device error */
|
||||
dev_ret = qm_dev_err_handle(qm);
|
||||
|
||||
return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
|
||||
dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
|
||||
PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
/**
|
||||
* hisi_qm_dev_err_detected() - Get device and qm error status then log it.
|
||||
* @pdev: The PCI device which need report error.
|
||||
* @state: The connectivity between CPU and device.
|
||||
*
|
||||
* We register this function into PCIe AER handlers, It will report device or
|
||||
* qm hardware error status when error occur.
|
||||
*/
|
||||
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state)
|
||||
{
|
||||
if (pdev->is_virtfn)
|
||||
return PCI_ERS_RESULT_NONE;
|
||||
|
||||
pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
|
||||
if (state == pci_channel_io_perm_failure)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
return qm_process_dev_error(pdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
|
||||
MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
|
||||
|
@ -77,6 +77,9 @@
|
||||
|
||||
#define HISI_ACC_SGL_SGE_NR_MAX 255
|
||||
|
||||
/* page number for queue file region */
|
||||
#define QM_DOORBELL_PAGE_NR 1
|
||||
|
||||
enum qp_state {
|
||||
QP_STOP,
|
||||
};
|
||||
@ -125,6 +128,28 @@ struct hisi_qm_status {
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
struct hisi_qm;
|
||||
|
||||
struct hisi_qm_err_info {
|
||||
u32 ce;
|
||||
u32 nfe;
|
||||
u32 fe;
|
||||
u32 msi;
|
||||
};
|
||||
|
||||
struct hisi_qm_err_ini {
|
||||
void (*hw_err_enable)(struct hisi_qm *qm);
|
||||
void (*hw_err_disable)(struct hisi_qm *qm);
|
||||
u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
|
||||
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
|
||||
struct hisi_qm_err_info err_info;
|
||||
};
|
||||
|
||||
struct hisi_qm_list {
|
||||
struct mutex lock;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct hisi_qm {
|
||||
enum qm_hw_ver ver;
|
||||
enum qm_fun_type fun_type;
|
||||
@ -136,6 +161,7 @@ struct hisi_qm {
|
||||
u32 qp_num;
|
||||
u32 qp_in_used;
|
||||
u32 ctrl_qp_num;
|
||||
struct list_head list;
|
||||
|
||||
struct qm_dma qdma;
|
||||
struct qm_sqc *sqc;
|
||||
@ -148,6 +174,7 @@ struct hisi_qm {
|
||||
dma_addr_t aeqe_dma;
|
||||
|
||||
struct hisi_qm_status status;
|
||||
const struct hisi_qm_err_ini *err_ini;
|
||||
|
||||
rwlock_t qps_lock;
|
||||
unsigned long *qp_bitmap;
|
||||
@ -162,7 +189,15 @@ struct hisi_qm {
|
||||
u32 error_mask;
|
||||
u32 msi_mask;
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct work;
|
||||
|
||||
const char *algs;
|
||||
bool use_dma_api;
|
||||
bool use_sva;
|
||||
resource_size_t phys_base;
|
||||
resource_size_t phys_size;
|
||||
struct uacce_device *uacce;
|
||||
};
|
||||
|
||||
struct hisi_qp_status {
|
||||
@ -192,12 +227,35 @@ struct hisi_qp {
|
||||
struct hisi_qp_ops *hw_ops;
|
||||
void *qp_ctx;
|
||||
void (*req_cb)(struct hisi_qp *qp, void *data);
|
||||
struct work_struct work;
|
||||
struct workqueue_struct *wq;
|
||||
void (*event_cb)(struct hisi_qp *qp);
|
||||
|
||||
struct hisi_qm *qm;
|
||||
u16 pasid;
|
||||
struct uacce_queue *uacce_q;
|
||||
};
|
||||
|
||||
static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
|
||||
{
|
||||
INIT_LIST_HEAD(&qm_list->list);
|
||||
mutex_init(&qm_list->lock);
|
||||
}
|
||||
|
||||
static inline void hisi_qm_add_to_list(struct hisi_qm *qm,
|
||||
struct hisi_qm_list *qm_list)
|
||||
{
|
||||
mutex_lock(&qm_list->lock);
|
||||
list_add_tail(&qm->list, &qm_list->list);
|
||||
mutex_unlock(&qm_list->lock);
|
||||
}
|
||||
|
||||
static inline void hisi_qm_del_from_list(struct hisi_qm *qm,
|
||||
struct hisi_qm_list *qm_list)
|
||||
{
|
||||
mutex_lock(&qm_list->lock);
|
||||
list_del(&qm->list);
|
||||
mutex_unlock(&qm_list->lock);
|
||||
}
|
||||
|
||||
int hisi_qm_init(struct hisi_qm *qm);
|
||||
void hisi_qm_uninit(struct hisi_qm *qm);
|
||||
int hisi_qm_start(struct hisi_qm *qm);
|
||||
@ -211,11 +269,12 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
|
||||
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
|
||||
int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
|
||||
int hisi_qm_debug_init(struct hisi_qm *qm);
|
||||
void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
|
||||
u32 msi);
|
||||
pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm);
|
||||
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
|
||||
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
|
||||
void hisi_qm_dev_err_init(struct hisi_qm *qm);
|
||||
void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
|
||||
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state);
|
||||
|
||||
struct hisi_acc_sgl_pool;
|
||||
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
|
||||
@ -227,4 +286,7 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
|
||||
u32 count, u32 sge_nr);
|
||||
void hisi_acc_free_sgl_pool(struct device *dev,
|
||||
struct hisi_acc_sgl_pool *pool);
|
||||
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
|
||||
u8 alg_type, int node, struct hisi_qp **qps);
|
||||
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
|
||||
#endif
|
||||
|
@ -11,6 +11,8 @@
|
||||
|
||||
/* Algorithm resource per hardware SEC queue */
|
||||
struct sec_alg_res {
|
||||
u8 *pbuf;
|
||||
dma_addr_t pbuf_dma;
|
||||
u8 *c_ivin;
|
||||
dma_addr_t c_ivin_dma;
|
||||
u8 *out_mac;
|
||||
@ -23,6 +25,8 @@ struct sec_cipher_req {
|
||||
dma_addr_t c_in_dma;
|
||||
struct hisi_acc_hw_sgl *c_out;
|
||||
dma_addr_t c_out_dma;
|
||||
u8 *c_ivin;
|
||||
dma_addr_t c_ivin_dma;
|
||||
struct skcipher_request *sk_req;
|
||||
u32 c_len;
|
||||
bool encrypt;
|
||||
@ -48,6 +52,7 @@ struct sec_req {
|
||||
|
||||
/* Status of the SEC request */
|
||||
bool fake_busy;
|
||||
bool use_pbuf;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -114,6 +119,7 @@ struct sec_ctx {
|
||||
struct sec_qp_ctx *qp_ctx;
|
||||
struct sec_dev *sec;
|
||||
const struct sec_req_op *req_op;
|
||||
struct hisi_qp **qps;
|
||||
|
||||
/* Half queues for encipher, and half for decipher */
|
||||
u32 hlf_q_num;
|
||||
@ -128,6 +134,7 @@ struct sec_ctx {
|
||||
atomic_t dec_qcyclic;
|
||||
|
||||
enum sec_alg_type alg_type;
|
||||
bool pbuf_supported;
|
||||
struct sec_cipher_ctx c_ctx;
|
||||
struct sec_auth_ctx a_ctx;
|
||||
};
|
||||
@ -162,14 +169,15 @@ struct sec_debug {
|
||||
|
||||
struct sec_dev {
|
||||
struct hisi_qm qm;
|
||||
struct list_head list;
|
||||
struct sec_debug debug;
|
||||
u32 ctx_q_num;
|
||||
bool iommu_used;
|
||||
u32 num_vfs;
|
||||
unsigned long status;
|
||||
};
|
||||
|
||||
struct sec_dev *sec_find_device(int node);
|
||||
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
|
||||
struct hisi_qp **sec_create_qps(void);
|
||||
int sec_register_to_crypto(void);
|
||||
void sec_unregister_from_crypto(void);
|
||||
#endif
|
||||
|
@ -46,7 +46,21 @@
|
||||
#define SEC_CIPHER_AUTH 0xfe
|
||||
#define SEC_AUTH_CIPHER 0x1
|
||||
#define SEC_MAX_MAC_LEN 64
|
||||
#define SEC_MAX_AAD_LEN 65535
|
||||
#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
|
||||
|
||||
#define SEC_PBUF_SZ 512
|
||||
#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
|
||||
#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
|
||||
#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
|
||||
SEC_MAX_MAC_LEN * 2)
|
||||
#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
|
||||
#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
|
||||
#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
|
||||
SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
|
||||
#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
|
||||
SEC_PBUF_LEFT_SZ)
|
||||
|
||||
#define SEC_SQE_LEN_RATE 4
|
||||
#define SEC_SQE_CFLAG 2
|
||||
#define SEC_SQE_AEAD_FLAG 3
|
||||
@ -110,12 +124,12 @@ static void sec_free_req_id(struct sec_req *req)
|
||||
mutex_unlock(&qp_ctx->req_lock);
|
||||
}
|
||||
|
||||
static int sec_aead_verify(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
|
||||
static int sec_aead_verify(struct sec_req *req)
|
||||
{
|
||||
struct aead_request *aead_req = req->aead_req.aead_req;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
|
||||
u8 *mac_out = qp_ctx->res[req->req_id].out_mac;
|
||||
size_t authsize = crypto_aead_authsize(tfm);
|
||||
u8 *mac_out = req->aead_req.out_mac;
|
||||
u8 *mac = mac_out + SEC_MAX_MAC_LEN;
|
||||
struct scatterlist *sgl = aead_req->src;
|
||||
size_t sz;
|
||||
@ -163,7 +177,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
|
||||
}
|
||||
|
||||
if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
|
||||
err = sec_aead_verify(req, qp_ctx);
|
||||
err = sec_aead_verify(req);
|
||||
|
||||
atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
|
||||
|
||||
@ -245,6 +259,50 @@ static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
|
||||
res->out_mac, res->out_mac_dma);
|
||||
}
|
||||
|
||||
static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
|
||||
{
|
||||
if (res->pbuf)
|
||||
dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
|
||||
res->pbuf, res->pbuf_dma);
|
||||
}
|
||||
|
||||
/*
|
||||
* To improve performance, pbuffer is used for
|
||||
* small packets (< 512Bytes) as IOMMU translation using.
|
||||
*/
|
||||
static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
|
||||
{
|
||||
int pbuf_page_offset;
|
||||
int i, j, k;
|
||||
|
||||
res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
|
||||
&res->pbuf_dma, GFP_KERNEL);
|
||||
if (!res->pbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* SEC_PBUF_PKG contains data pbuf, iv and
|
||||
* out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
|
||||
* Every PAGE contains six SEC_PBUF_PKG
|
||||
* The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
|
||||
* So we need SEC_PBUF_PAGE_NUM numbers of PAGE
|
||||
* for the SEC_TOTAL_PBUF_SZ
|
||||
*/
|
||||
for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
|
||||
pbuf_page_offset = PAGE_SIZE * i;
|
||||
for (j = 0; j < SEC_PBUF_NUM; j++) {
|
||||
k = i * SEC_PBUF_NUM + j;
|
||||
if (k == QM_Q_DEPTH)
|
||||
break;
|
||||
res[k].pbuf = res->pbuf +
|
||||
j * SEC_PBUF_PKG + pbuf_page_offset;
|
||||
res[k].pbuf_dma = res->pbuf_dma +
|
||||
j * SEC_PBUF_PKG + pbuf_page_offset;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sec_alg_resource_alloc(struct sec_ctx *ctx,
|
||||
struct sec_qp_ctx *qp_ctx)
|
||||
{
|
||||
@ -259,11 +317,18 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
|
||||
if (ctx->alg_type == SEC_AEAD) {
|
||||
ret = sec_alloc_mac_resource(dev, res);
|
||||
if (ret)
|
||||
goto get_fail;
|
||||
goto alloc_fail;
|
||||
}
|
||||
if (ctx->pbuf_supported) {
|
||||
ret = sec_alloc_pbuf_resource(dev, res);
|
||||
if (ret) {
|
||||
dev_err(dev, "fail to alloc pbuf dma resource!\n");
|
||||
goto alloc_fail;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
get_fail:
|
||||
alloc_fail:
|
||||
sec_free_civ_resource(dev, res);
|
||||
|
||||
return ret;
|
||||
@ -276,6 +341,8 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
|
||||
|
||||
sec_free_civ_resource(dev, qp_ctx->res);
|
||||
|
||||
if (ctx->pbuf_supported)
|
||||
sec_free_pbuf_resource(dev, qp_ctx->res);
|
||||
if (ctx->alg_type == SEC_AEAD)
|
||||
sec_free_mac_resource(dev, qp_ctx->res);
|
||||
}
|
||||
@ -288,11 +355,8 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
|
||||
struct hisi_qp *qp;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
qp = hisi_qm_create_qp(qm, alg_type);
|
||||
if (IS_ERR(qp))
|
||||
return PTR_ERR(qp);
|
||||
|
||||
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
|
||||
qp = ctx->qps[qp_ctx_id];
|
||||
qp->req_type = 0;
|
||||
qp->qp_ctx = qp_ctx;
|
||||
qp->req_cb = sec_req_cb;
|
||||
@ -335,7 +399,6 @@ err_free_c_in_pool:
|
||||
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
|
||||
err_destroy_idr:
|
||||
idr_destroy(&qp_ctx->req_idr);
|
||||
hisi_qm_release_qp(qp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -352,7 +415,6 @@ static void sec_release_qp_ctx(struct sec_ctx *ctx,
|
||||
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
|
||||
|
||||
idr_destroy(&qp_ctx->req_idr);
|
||||
hisi_qm_release_qp(qp_ctx->qp);
|
||||
}
|
||||
|
||||
static int sec_ctx_base_init(struct sec_ctx *ctx)
|
||||
@ -360,14 +422,18 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
|
||||
struct sec_dev *sec;
|
||||
int i, ret;
|
||||
|
||||
sec = sec_find_device(cpu_to_node(smp_processor_id()));
|
||||
if (!sec) {
|
||||
pr_err("Can not find proper Hisilicon SEC device!\n");
|
||||
ctx->qps = sec_create_qps();
|
||||
if (!ctx->qps) {
|
||||
pr_err("Can not create sec qps!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
|
||||
ctx->sec = sec;
|
||||
ctx->hlf_q_num = sec->ctx_q_num >> 1;
|
||||
|
||||
ctx->pbuf_supported = ctx->sec->iommu_used;
|
||||
|
||||
/* Half of queue depth is taken as fake requests limit in the queue. */
|
||||
ctx->fake_req_limit = QM_Q_DEPTH >> 1;
|
||||
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
|
||||
@ -386,6 +452,7 @@ err_sec_release_qp_ctx:
|
||||
for (i = i - 1; i >= 0; i--)
|
||||
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
|
||||
|
||||
sec_destroy_qps(ctx->qps, sec->ctx_q_num);
|
||||
kfree(ctx->qp_ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -397,6 +464,7 @@ static void sec_ctx_base_uninit(struct sec_ctx *ctx)
|
||||
for (i = 0; i < ctx->sec->ctx_q_num; i++)
|
||||
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
|
||||
|
||||
sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
|
||||
kfree(ctx->qp_ctx);
|
||||
}
|
||||
|
||||
@ -447,7 +515,6 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
|
||||
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
ctx = crypto_skcipher_ctx(tfm);
|
||||
ctx->alg_type = SEC_SKCIPHER;
|
||||
crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
|
||||
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
|
||||
@ -591,11 +658,94 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
|
||||
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
|
||||
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
|
||||
|
||||
static int sec_cipher_map(struct device *dev, struct sec_req *req,
|
||||
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
|
||||
struct scatterlist *src)
|
||||
{
|
||||
struct aead_request *aead_req = req->aead_req.aead_req;
|
||||
struct sec_cipher_req *c_req = &req->c_req;
|
||||
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
|
||||
struct device *dev = SEC_CTX_DEV(ctx);
|
||||
int copy_size, pbuf_length;
|
||||
int req_id = req->req_id;
|
||||
|
||||
if (ctx->alg_type == SEC_AEAD)
|
||||
copy_size = aead_req->cryptlen + aead_req->assoclen;
|
||||
else
|
||||
copy_size = c_req->c_len;
|
||||
|
||||
pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
|
||||
qp_ctx->res[req_id].pbuf,
|
||||
copy_size);
|
||||
|
||||
if (unlikely(pbuf_length != copy_size)) {
|
||||
dev_err(dev, "copy src data to pbuf error!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
|
||||
|
||||
if (!c_req->c_in_dma) {
|
||||
dev_err(dev, "fail to set pbuffer address!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
c_req->c_out_dma = c_req->c_in_dma;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
|
||||
struct scatterlist *dst)
|
||||
{
|
||||
struct aead_request *aead_req = req->aead_req.aead_req;
|
||||
struct sec_cipher_req *c_req = &req->c_req;
|
||||
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
|
||||
struct device *dev = SEC_CTX_DEV(ctx);
|
||||
int copy_size, pbuf_length;
|
||||
int req_id = req->req_id;
|
||||
|
||||
if (ctx->alg_type == SEC_AEAD)
|
||||
copy_size = c_req->c_len + aead_req->assoclen;
|
||||
else
|
||||
copy_size = c_req->c_len;
|
||||
|
||||
pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
|
||||
qp_ctx->res[req_id].pbuf,
|
||||
copy_size);
|
||||
|
||||
if (unlikely(pbuf_length != copy_size))
|
||||
dev_err(dev, "copy pbuf data to dst error!\n");
|
||||
|
||||
}
|
||||
|
||||
static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
|
||||
struct scatterlist *src, struct scatterlist *dst)
|
||||
{
|
||||
struct sec_cipher_req *c_req = &req->c_req;
|
||||
struct sec_aead_req *a_req = &req->aead_req;
|
||||
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
|
||||
struct sec_alg_res *res = &qp_ctx->res[req->req_id];
|
||||
struct device *dev = SEC_CTX_DEV(ctx);
|
||||
int ret;
|
||||
|
||||
if (req->use_pbuf) {
|
||||
ret = sec_cipher_pbuf_map(ctx, req, src);
|
||||
c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
|
||||
c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
|
||||
if (ctx->alg_type == SEC_AEAD) {
|
||||
a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
|
||||
a_req->out_mac_dma = res->pbuf_dma +
|
||||
SEC_PBUF_MAC_OFFSET;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
c_req->c_ivin = res->c_ivin;
|
||||
c_req->c_ivin_dma = res->c_ivin_dma;
|
||||
if (ctx->alg_type == SEC_AEAD) {
|
||||
a_req->out_mac = res->out_mac;
|
||||
a_req->out_mac_dma = res->out_mac_dma;
|
||||
}
|
||||
|
||||
c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
|
||||
qp_ctx->c_in_pool,
|
||||
@ -626,29 +776,34 @@ static int sec_cipher_map(struct device *dev, struct sec_req *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req,
|
||||
static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
|
||||
struct scatterlist *src, struct scatterlist *dst)
|
||||
{
|
||||
if (dst != src)
|
||||
hisi_acc_sg_buf_unmap(dev, src, req->c_in);
|
||||
struct sec_cipher_req *c_req = &req->c_req;
|
||||
struct device *dev = SEC_CTX_DEV(ctx);
|
||||
|
||||
hisi_acc_sg_buf_unmap(dev, dst, req->c_out);
|
||||
if (req->use_pbuf) {
|
||||
sec_cipher_pbuf_unmap(ctx, req, dst);
|
||||
} else {
|
||||
if (dst != src)
|
||||
hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
|
||||
|
||||
hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
|
||||
}
|
||||
}
|
||||
|
||||
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
|
||||
{
|
||||
struct skcipher_request *sq = req->c_req.sk_req;
|
||||
|
||||
return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst);
|
||||
return sec_cipher_map(ctx, req, sq->src, sq->dst);
|
||||
}
|
||||
|
||||
static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
|
||||
{
|
||||
struct device *dev = SEC_CTX_DEV(ctx);
|
||||
struct sec_cipher_req *c_req = &req->c_req;
|
||||
struct skcipher_request *sk_req = c_req->sk_req;
|
||||
struct skcipher_request *sq = req->c_req.sk_req;
|
||||
|
||||
sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst);
|
||||
sec_cipher_unmap(ctx, req, sq->src, sq->dst);
|
||||
}
|
||||
|
||||
static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
|
||||
@ -759,16 +914,14 @@ static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
|
||||
{
|
||||
struct aead_request *aq = req->aead_req.aead_req;
|
||||
|
||||
return sec_cipher_map(SEC_CTX_DEV(ctx), req, aq->src, aq->dst);
|
||||
return sec_cipher_map(ctx, req, aq->src, aq->dst);
|
||||
}
|
||||
|
||||
static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
|
||||
{
|
||||
struct device *dev = SEC_CTX_DEV(ctx);
|
||||
struct sec_cipher_req *cq = &req->c_req;
|
||||
struct aead_request *aq = req->aead_req.aead_req;
|
||||
|
||||
sec_cipher_unmap(dev, cq, aq->src, aq->dst);
|
||||
sec_cipher_unmap(ctx, req, aq->src, aq->dst);
|
||||
}
|
||||
|
||||
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
|
||||
@ -801,9 +954,9 @@ static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
|
||||
static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
|
||||
{
|
||||
struct skcipher_request *sk_req = req->c_req.sk_req;
|
||||
u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
|
||||
struct sec_cipher_req *c_req = &req->c_req;
|
||||
|
||||
memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
|
||||
memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
|
||||
}
|
||||
|
||||
static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
|
||||
@ -818,8 +971,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
|
||||
memset(sec_sqe, 0, sizeof(struct sec_sqe));
|
||||
|
||||
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
|
||||
sec_sqe->type2.c_ivin_addr =
|
||||
cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma);
|
||||
sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
|
||||
sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
|
||||
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
|
||||
|
||||
@ -836,7 +988,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
|
||||
cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
|
||||
sec_sqe->type_cipher_auth = bd_type | cipher;
|
||||
|
||||
sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
|
||||
if (req->use_pbuf)
|
||||
sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
|
||||
else
|
||||
sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
|
||||
scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
|
||||
if (c_req->c_in_dma != c_req->c_out_dma)
|
||||
de = 0x1 << SEC_DE_OFFSET;
|
||||
@ -844,7 +999,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
|
||||
sec_sqe->sds_sa_type = (de | scene | sa_type);
|
||||
|
||||
/* Just set DST address type */
|
||||
da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
|
||||
if (req->use_pbuf)
|
||||
da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
|
||||
else
|
||||
da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
|
||||
sec_sqe->sdm_addr_type |= da_type;
|
||||
|
||||
sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
|
||||
@ -904,9 +1062,9 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
|
||||
static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
|
||||
{
|
||||
struct aead_request *aead_req = req->aead_req.aead_req;
|
||||
u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
|
||||
struct sec_cipher_req *c_req = &req->c_req;
|
||||
|
||||
memcpy(c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
|
||||
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
|
||||
}
|
||||
|
||||
static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
|
||||
@ -939,8 +1097,7 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
|
||||
|
||||
sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
|
||||
|
||||
sec_sqe->type2.mac_addr =
|
||||
cpu_to_le64(req->qp_ctx->res[req->req_id].out_mac_dma);
|
||||
sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
|
||||
}
|
||||
|
||||
static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
|
||||
@ -964,6 +1121,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
|
||||
{
|
||||
struct aead_request *a_req = req->aead_req.aead_req;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
|
||||
struct sec_aead_req *aead_req = &req->aead_req;
|
||||
struct sec_cipher_req *c_req = &req->c_req;
|
||||
size_t authsize = crypto_aead_authsize(tfm);
|
||||
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
|
||||
@ -979,7 +1137,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
|
||||
struct scatterlist *sgl = a_req->dst;
|
||||
|
||||
sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
|
||||
qp_ctx->res[req->req_id].out_mac,
|
||||
aead_req->out_mac,
|
||||
authsize, a_req->cryptlen +
|
||||
a_req->assoclen);
|
||||
|
||||
@ -1031,6 +1189,7 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
|
||||
|
||||
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
|
||||
{
|
||||
struct sec_cipher_req *c_req = &req->c_req;
|
||||
int ret;
|
||||
|
||||
ret = sec_request_init(ctx, req);
|
||||
@ -1057,12 +1216,10 @@ err_send_req:
|
||||
/* As failing, restore the IV from user */
|
||||
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
|
||||
if (ctx->alg_type == SEC_SKCIPHER)
|
||||
memcpy(req->c_req.sk_req->iv,
|
||||
req->qp_ctx->res[req->req_id].c_ivin,
|
||||
memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
|
||||
ctx->c_ctx.ivsize);
|
||||
else
|
||||
memcpy(req->aead_req.aead_req->iv,
|
||||
req->qp_ctx->res[req->req_id].c_ivin,
|
||||
memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
|
||||
ctx->c_ctx.ivsize);
|
||||
}
|
||||
|
||||
@ -1208,6 +1365,12 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
|
||||
return -EINVAL;
|
||||
}
|
||||
sreq->c_req.c_len = sk_req->cryptlen;
|
||||
|
||||
if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
|
||||
sreq->use_pbuf = true;
|
||||
else
|
||||
sreq->use_pbuf = false;
|
||||
|
||||
if (c_alg == SEC_CALG_3DES) {
|
||||
if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
|
||||
dev_err(dev, "skcipher 3des input length error!\n");
|
||||
@ -1321,11 +1484,18 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
size_t authsize = crypto_aead_authsize(tfm);
|
||||
|
||||
if (unlikely(!req->src || !req->dst || !req->cryptlen)) {
|
||||
if (unlikely(!req->src || !req->dst || !req->cryptlen ||
|
||||
req->assoclen > SEC_MAX_AAD_LEN)) {
|
||||
dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
|
||||
SEC_PBUF_SZ)
|
||||
sreq->use_pbuf = true;
|
||||
else
|
||||
sreq->use_pbuf = false;
|
||||
|
||||
/* Support AES only */
|
||||
if (unlikely(c_alg != SEC_CALG_AES)) {
|
||||
dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
@ -89,8 +90,7 @@ struct sec_hw_error {
|
||||
|
||||
static const char sec_name[] = "hisi_sec2";
|
||||
static struct dentry *sec_debugfs_root;
|
||||
static LIST_HEAD(sec_list);
|
||||
static DEFINE_MUTEX(sec_list_lock);
|
||||
static struct hisi_qm_list sec_devices;
|
||||
|
||||
static const struct sec_hw_error sec_hw_errors[] = {
|
||||
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
|
||||
@ -105,37 +105,6 @@ static const struct sec_hw_error sec_hw_errors[] = {
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
struct sec_dev *sec_find_device(int node)
|
||||
{
|
||||
#define SEC_NUMA_MAX_DISTANCE 100
|
||||
int min_distance = SEC_NUMA_MAX_DISTANCE;
|
||||
int dev_node = 0, free_qp_num = 0;
|
||||
struct sec_dev *sec, *ret = NULL;
|
||||
struct hisi_qm *qm;
|
||||
struct device *dev;
|
||||
|
||||
mutex_lock(&sec_list_lock);
|
||||
list_for_each_entry(sec, &sec_list, list) {
|
||||
qm = &sec->qm;
|
||||
dev = &qm->pdev->dev;
|
||||
#ifdef CONFIG_NUMA
|
||||
dev_node = dev->numa_node;
|
||||
if (dev_node < 0)
|
||||
dev_node = 0;
|
||||
#endif
|
||||
if (node_distance(dev_node, node) < min_distance) {
|
||||
free_qp_num = hisi_qm_get_free_qp_num(qm);
|
||||
if (free_qp_num >= sec->ctx_q_num) {
|
||||
ret = sec;
|
||||
min_distance = node_distance(dev_node, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&sec_list_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char * const sec_dbg_file_name[] = {
|
||||
[SEC_CURRENT_QM] = "current_qm",
|
||||
[SEC_CLEAR_ENABLE] = "clear_enable",
|
||||
@ -238,6 +207,32 @@ static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
|
||||
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
|
||||
MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
|
||||
|
||||
void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
|
||||
{
|
||||
hisi_qm_free_qps(qps, qp_num);
|
||||
kfree(qps);
|
||||
}
|
||||
|
||||
struct hisi_qp **sec_create_qps(void)
|
||||
{
|
||||
int node = cpu_to_node(smp_processor_id());
|
||||
u32 ctx_num = ctx_q_num;
|
||||
struct hisi_qp **qps;
|
||||
int ret;
|
||||
|
||||
qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
|
||||
if (!qps)
|
||||
return NULL;
|
||||
|
||||
ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
|
||||
if (!ret)
|
||||
return qps;
|
||||
|
||||
kfree(qps);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static const struct pci_device_id sec_dev_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
|
||||
@ -245,20 +240,6 @@ static const struct pci_device_id sec_dev_ids[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, sec_dev_ids);
|
||||
|
||||
static inline void sec_add_to_list(struct sec_dev *sec)
|
||||
{
|
||||
mutex_lock(&sec_list_lock);
|
||||
list_add_tail(&sec->list, &sec_list);
|
||||
mutex_unlock(&sec_list_lock);
|
||||
}
|
||||
|
||||
static inline void sec_remove_from_list(struct sec_dev *sec)
|
||||
{
|
||||
mutex_lock(&sec_list_lock);
|
||||
list_del(&sec->list);
|
||||
mutex_unlock(&sec_list_lock);
|
||||
}
|
||||
|
||||
static u8 sec_get_endian(struct sec_dev *sec)
|
||||
{
|
||||
struct hisi_qm *qm = &sec->qm;
|
||||
@ -384,9 +365,8 @@ static void sec_debug_regs_clear(struct hisi_qm *qm)
|
||||
hisi_qm_debug_regs_clear(qm);
|
||||
}
|
||||
|
||||
static void sec_hw_error_enable(struct sec_dev *sec)
|
||||
static void sec_hw_error_enable(struct hisi_qm *qm)
|
||||
{
|
||||
struct hisi_qm *qm = &sec->qm;
|
||||
u32 val;
|
||||
|
||||
if (qm->ver == QM_HW_V1) {
|
||||
@ -414,9 +394,8 @@ static void sec_hw_error_enable(struct sec_dev *sec)
|
||||
writel(val, qm->io_base + SEC_CONTROL_REG);
|
||||
}
|
||||
|
||||
static void sec_hw_error_disable(struct sec_dev *sec)
|
||||
static void sec_hw_error_disable(struct hisi_qm *qm)
|
||||
{
|
||||
struct hisi_qm *qm = &sec->qm;
|
||||
u32 val;
|
||||
|
||||
val = readl(qm->io_base + SEC_CONTROL_REG);
|
||||
@ -435,27 +414,6 @@ static void sec_hw_error_disable(struct sec_dev *sec)
|
||||
writel(val, qm->io_base + SEC_CONTROL_REG);
|
||||
}
|
||||
|
||||
static void sec_hw_error_init(struct sec_dev *sec)
|
||||
{
|
||||
if (sec->qm.fun_type == QM_HW_VF)
|
||||
return;
|
||||
|
||||
hisi_qm_hw_error_init(&sec->qm, QM_BASE_CE,
|
||||
QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT
|
||||
| QM_ACC_WB_NOT_READY_TIMEOUT, 0,
|
||||
QM_DB_RANDOM_INVALID);
|
||||
sec_hw_error_enable(sec);
|
||||
}
|
||||
|
||||
static void sec_hw_error_uninit(struct sec_dev *sec)
|
||||
{
|
||||
if (sec->qm.fun_type == QM_HW_VF)
|
||||
return;
|
||||
|
||||
sec_hw_error_disable(sec);
|
||||
writel(GENMASK(12, 0), sec->qm.io_base + SEC_QM_ABNORMAL_INT_MASK);
|
||||
}
|
||||
|
||||
static u32 sec_current_qm_read(struct sec_debug_file *file)
|
||||
{
|
||||
struct hisi_qm *qm = file->qm;
|
||||
@ -695,6 +653,51 @@ static void sec_debugfs_exit(struct sec_dev *sec)
|
||||
debugfs_remove_recursive(sec->qm.debug.debug_root);
|
||||
}
|
||||
|
||||
static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
|
||||
{
|
||||
const struct sec_hw_error *errs = sec_hw_errors;
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
u32 err_val;
|
||||
|
||||
while (errs->msg) {
|
||||
if (errs->int_msk & err_sts) {
|
||||
dev_err(dev, "%s [error status=0x%x] found\n",
|
||||
errs->msg, errs->int_msk);
|
||||
|
||||
if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
|
||||
err_val = readl(qm->io_base +
|
||||
SEC_CORE_SRAM_ECC_ERR_INFO);
|
||||
dev_err(dev, "multi ecc sram num=0x%x\n",
|
||||
SEC_ECC_NUM(err_val));
|
||||
dev_err(dev, "multi ecc sram addr=0x%x\n",
|
||||
SEC_ECC_ADDR(err_val));
|
||||
}
|
||||
}
|
||||
errs++;
|
||||
}
|
||||
|
||||
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
|
||||
}
|
||||
|
||||
static u32 sec_get_hw_err_status(struct hisi_qm *qm)
|
||||
{
|
||||
return readl(qm->io_base + SEC_CORE_INT_STATUS);
|
||||
}
|
||||
|
||||
static const struct hisi_qm_err_ini sec_err_ini = {
|
||||
.hw_err_enable = sec_hw_error_enable,
|
||||
.hw_err_disable = sec_hw_error_disable,
|
||||
.get_dev_hw_err_status = sec_get_hw_err_status,
|
||||
.log_dev_hw_err = sec_log_hw_error,
|
||||
.err_info = {
|
||||
.ce = QM_BASE_CE,
|
||||
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
|
||||
QM_ACC_WB_NOT_READY_TIMEOUT,
|
||||
.fe = 0,
|
||||
.msi = QM_DB_RANDOM_INVALID,
|
||||
}
|
||||
};
|
||||
|
||||
static int sec_pf_probe_init(struct sec_dev *sec)
|
||||
{
|
||||
struct hisi_qm *qm = &sec->qm;
|
||||
@ -713,11 +716,13 @@ static int sec_pf_probe_init(struct sec_dev *sec)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qm->err_ini = &sec_err_ini;
|
||||
|
||||
ret = sec_set_user_domain_and_cache(sec);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sec_hw_error_init(sec);
|
||||
hisi_qm_dev_err_init(qm);
|
||||
sec_debug_regs_clear(qm);
|
||||
|
||||
return 0;
|
||||
@ -750,12 +755,30 @@ static void sec_qm_uninit(struct hisi_qm *qm)
|
||||
|
||||
static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* WQ_HIGHPRI: SEC request must be low delayed,
|
||||
* so need a high priority workqueue.
|
||||
* WQ_UNBOUND: SEC task is likely with long
|
||||
* running CPU intensive workloads.
|
||||
*/
|
||||
qm->wq = alloc_workqueue("%s", WQ_HIGHPRI |
|
||||
WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
|
||||
pci_name(qm->pdev));
|
||||
if (!qm->wq) {
|
||||
pci_err(qm->pdev, "fail to alloc workqueue\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (qm->fun_type == QM_HW_PF) {
|
||||
qm->qp_base = SEC_PF_DEF_Q_BASE;
|
||||
qm->qp_num = pf_q_num;
|
||||
qm->debug.curr_qm_qp_num = pf_q_num;
|
||||
|
||||
return sec_pf_probe_init(sec);
|
||||
ret = sec_pf_probe_init(sec);
|
||||
if (ret)
|
||||
goto err_probe_uninit;
|
||||
} else if (qm->fun_type == QM_HW_VF) {
|
||||
/*
|
||||
* have no way to get qm configure in VM in v1 hardware,
|
||||
@ -768,18 +791,43 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
|
||||
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
|
||||
} else if (qm->ver == QM_HW_V2) {
|
||||
/* v2 starts to support get vft by mailbox */
|
||||
return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
|
||||
ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
|
||||
if (ret)
|
||||
goto err_probe_uninit;
|
||||
}
|
||||
} else {
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto err_probe_uninit;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_probe_uninit:
|
||||
destroy_workqueue(qm->wq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sec_probe_uninit(struct sec_dev *sec)
|
||||
static void sec_probe_uninit(struct hisi_qm *qm)
|
||||
{
|
||||
sec_hw_error_uninit(sec);
|
||||
hisi_qm_dev_err_uninit(qm);
|
||||
|
||||
destroy_workqueue(qm->wq);
|
||||
}
|
||||
|
||||
static void sec_iommu_used_check(struct sec_dev *sec)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
struct device *dev = &sec->qm.pdev->dev;
|
||||
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
/* Check if iommu is used */
|
||||
sec->iommu_used = false;
|
||||
if (domain) {
|
||||
if (domain->type & __IOMMU_DOMAIN_PAGING)
|
||||
sec->iommu_used = true;
|
||||
dev_info(dev, "SMMU Opened, the iommu type = %u\n",
|
||||
domain->type);
|
||||
}
|
||||
}
|
||||
|
||||
static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
@ -795,6 +843,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pci_set_drvdata(pdev, sec);
|
||||
|
||||
sec->ctx_q_num = ctx_q_num;
|
||||
sec_iommu_used_check(sec);
|
||||
|
||||
qm = &sec->qm;
|
||||
|
||||
@ -820,7 +869,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (ret)
|
||||
pci_warn(pdev, "Failed to init debugfs!\n");
|
||||
|
||||
sec_add_to_list(sec);
|
||||
hisi_qm_add_to_list(qm, &sec_devices);
|
||||
|
||||
ret = sec_register_to_crypto();
|
||||
if (ret < 0) {
|
||||
@ -831,12 +880,12 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
return 0;
|
||||
|
||||
err_remove_from_list:
|
||||
sec_remove_from_list(sec);
|
||||
hisi_qm_del_from_list(qm, &sec_devices);
|
||||
sec_debugfs_exit(sec);
|
||||
hisi_qm_stop(qm);
|
||||
|
||||
err_probe_uninit:
|
||||
sec_probe_uninit(sec);
|
||||
sec_probe_uninit(qm);
|
||||
|
||||
err_qm_uninit:
|
||||
sec_qm_uninit(qm);
|
||||
@ -955,7 +1004,7 @@ static void sec_remove(struct pci_dev *pdev)
|
||||
|
||||
sec_unregister_from_crypto();
|
||||
|
||||
sec_remove_from_list(sec);
|
||||
hisi_qm_del_from_list(qm, &sec_devices);
|
||||
|
||||
if (qm->fun_type == QM_HW_PF && sec->num_vfs)
|
||||
(void)sec_sriov_disable(pdev);
|
||||
@ -967,89 +1016,13 @@ static void sec_remove(struct pci_dev *pdev)
|
||||
if (qm->fun_type == QM_HW_PF)
|
||||
sec_debug_regs_clear(qm);
|
||||
|
||||
sec_probe_uninit(sec);
|
||||
sec_probe_uninit(qm);
|
||||
|
||||
sec_qm_uninit(qm);
|
||||
}
|
||||
|
||||
static void sec_log_hw_error(struct sec_dev *sec, u32 err_sts)
|
||||
{
|
||||
const struct sec_hw_error *errs = sec_hw_errors;
|
||||
struct device *dev = &sec->qm.pdev->dev;
|
||||
u32 err_val;
|
||||
|
||||
while (errs->msg) {
|
||||
if (errs->int_msk & err_sts) {
|
||||
dev_err(dev, "%s [error status=0x%x] found\n",
|
||||
errs->msg, errs->int_msk);
|
||||
|
||||
if (SEC_CORE_INT_STATUS_M_ECC & err_sts) {
|
||||
err_val = readl(sec->qm.io_base +
|
||||
SEC_CORE_SRAM_ECC_ERR_INFO);
|
||||
dev_err(dev, "multi ecc sram num=0x%x\n",
|
||||
SEC_ECC_NUM(err_val));
|
||||
dev_err(dev, "multi ecc sram addr=0x%x\n",
|
||||
SEC_ECC_ADDR(err_val));
|
||||
}
|
||||
}
|
||||
errs++;
|
||||
}
|
||||
}
|
||||
|
||||
static pci_ers_result_t sec_hw_error_handle(struct sec_dev *sec)
|
||||
{
|
||||
u32 err_sts;
|
||||
|
||||
/* read err sts */
|
||||
err_sts = readl(sec->qm.io_base + SEC_CORE_INT_STATUS);
|
||||
if (err_sts) {
|
||||
sec_log_hw_error(sec, err_sts);
|
||||
|
||||
/* clear error interrupts */
|
||||
writel(err_sts, sec->qm.io_base + SEC_CORE_INT_SOURCE);
|
||||
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static pci_ers_result_t sec_process_hw_error(struct pci_dev *pdev)
|
||||
{
|
||||
struct sec_dev *sec = pci_get_drvdata(pdev);
|
||||
pci_ers_result_t qm_ret, sec_ret;
|
||||
|
||||
if (!sec) {
|
||||
pci_err(pdev, "Can't recover error during device init\n");
|
||||
return PCI_ERS_RESULT_NONE;
|
||||
}
|
||||
|
||||
/* log qm error */
|
||||
qm_ret = hisi_qm_hw_error_handle(&sec->qm);
|
||||
|
||||
/* log sec error */
|
||||
sec_ret = sec_hw_error_handle(sec);
|
||||
|
||||
return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
|
||||
sec_ret == PCI_ERS_RESULT_NEED_RESET) ?
|
||||
PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static pci_ers_result_t sec_error_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state)
|
||||
{
|
||||
if (pdev->is_virtfn)
|
||||
return PCI_ERS_RESULT_NONE;
|
||||
|
||||
pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
|
||||
if (state == pci_channel_io_perm_failure)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
return sec_process_hw_error(pdev);
|
||||
}
|
||||
|
||||
static const struct pci_error_handlers sec_err_handler = {
|
||||
.error_detected = sec_error_detected,
|
||||
.error_detected = hisi_qm_dev_err_detected,
|
||||
};
|
||||
|
||||
static struct pci_driver sec_pci_driver = {
|
||||
@ -1078,6 +1051,7 @@ static int __init sec_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
hisi_qm_init_list(&sec_devices);
|
||||
sec_register_debugfs();
|
||||
|
||||
ret = pci_register_driver(&sec_pci_driver);
|
||||
|
@ -68,7 +68,7 @@ struct hisi_zip_sqe {
|
||||
u32 rsvd1[4];
|
||||
};
|
||||
|
||||
struct hisi_zip *find_zip_device(int node);
|
||||
int zip_create_qps(struct hisi_qp **qps, int ctx_num);
|
||||
int hisi_zip_register_to_crypto(void);
|
||||
void hisi_zip_unregister_from_crypto(void);
|
||||
#endif
|
||||
|
@ -132,29 +132,25 @@ static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
|
||||
sqe->dest_addr_h = upper_32_bits(d_addr);
|
||||
}
|
||||
|
||||
static int hisi_zip_create_qp(struct hisi_qm *qm, struct hisi_zip_qp_ctx *ctx,
|
||||
int alg_type, int req_type)
|
||||
static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
|
||||
int alg_type, int req_type)
|
||||
{
|
||||
struct hisi_qp *qp;
|
||||
struct device *dev = &qp->qm->pdev->dev;
|
||||
int ret;
|
||||
|
||||
qp = hisi_qm_create_qp(qm, alg_type);
|
||||
if (IS_ERR(qp))
|
||||
return PTR_ERR(qp);
|
||||
|
||||
qp->req_type = req_type;
|
||||
qp->alg_type = alg_type;
|
||||
qp->qp_ctx = ctx;
|
||||
ctx->qp = qp;
|
||||
|
||||
ret = hisi_qm_start_qp(qp, 0);
|
||||
if (ret < 0)
|
||||
goto err_release_qp;
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "start qp failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ctx->qp = qp;
|
||||
|
||||
return 0;
|
||||
|
||||
err_release_qp:
|
||||
hisi_qm_release_qp(qp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
|
||||
@ -165,34 +161,34 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
|
||||
|
||||
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type)
|
||||
{
|
||||
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
|
||||
struct hisi_zip *hisi_zip;
|
||||
struct hisi_qm *qm;
|
||||
int ret, i, j;
|
||||
|
||||
/* find the proper zip device */
|
||||
hisi_zip = find_zip_device(cpu_to_node(smp_processor_id()));
|
||||
if (!hisi_zip) {
|
||||
pr_err("Failed to find a proper ZIP device!\n");
|
||||
ret = zip_create_qps(qps, HZIP_CTX_Q_NUM);
|
||||
if (ret) {
|
||||
pr_err("Can not create zip qps!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
qm = &hisi_zip->qm;
|
||||
|
||||
hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
|
||||
|
||||
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
|
||||
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
|
||||
ret = hisi_zip_create_qp(qm, &hisi_zip_ctx->qp_ctx[i], i,
|
||||
req_type);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i,
|
||||
req_type);
|
||||
if (ret) {
|
||||
for (j = i - 1; j >= 0; j--)
|
||||
hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
|
||||
|
||||
hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
|
||||
return ret;
|
||||
}
|
||||
|
||||
hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
for (j = i - 1; j >= 0; j--)
|
||||
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[j]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/uacce.h>
|
||||
#include "zip.h"
|
||||
|
||||
#define PCI_DEVICE_ID_ZIP_PF 0xa250
|
||||
@ -60,13 +61,17 @@
|
||||
#define HZIP_CORE_DEBUG_DECOMP_5 0x309000
|
||||
|
||||
#define HZIP_CORE_INT_SOURCE 0x3010A0
|
||||
#define HZIP_CORE_INT_MASK 0x3010A4
|
||||
#define HZIP_CORE_INT_MASK_REG 0x3010A4
|
||||
#define HZIP_CORE_INT_STATUS 0x3010AC
|
||||
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
|
||||
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
|
||||
#define SRAM_ECC_ERR_NUM_SHIFT 16
|
||||
#define SRAM_ECC_ERR_ADDR_SHIFT 24
|
||||
#define HZIP_CORE_INT_DISABLE 0x000007FF
|
||||
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
|
||||
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
|
||||
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
|
||||
#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
|
||||
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
|
||||
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
|
||||
#define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0)
|
||||
#define HZIP_COMP_CORE_NUM 2
|
||||
#define HZIP_DECOMP_CORE_NUM 6
|
||||
#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
|
||||
@ -83,77 +88,7 @@
|
||||
|
||||
static const char hisi_zip_name[] = "hisi_zip";
|
||||
static struct dentry *hzip_debugfs_root;
|
||||
static LIST_HEAD(hisi_zip_list);
|
||||
static DEFINE_MUTEX(hisi_zip_list_lock);
|
||||
|
||||
struct hisi_zip_resource {
|
||||
struct hisi_zip *hzip;
|
||||
int distance;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static void free_list(struct list_head *head)
|
||||
{
|
||||
struct hisi_zip_resource *res, *tmp;
|
||||
|
||||
list_for_each_entry_safe(res, tmp, head, list) {
|
||||
list_del(&res->list);
|
||||
kfree(res);
|
||||
}
|
||||
}
|
||||
|
||||
struct hisi_zip *find_zip_device(int node)
|
||||
{
|
||||
struct hisi_zip_resource *res, *tmp;
|
||||
struct hisi_zip *ret = NULL;
|
||||
struct hisi_zip *hisi_zip;
|
||||
struct list_head *n;
|
||||
struct device *dev;
|
||||
LIST_HEAD(head);
|
||||
|
||||
mutex_lock(&hisi_zip_list_lock);
|
||||
|
||||
if (IS_ENABLED(CONFIG_NUMA)) {
|
||||
list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
|
||||
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
if (!res)
|
||||
goto err;
|
||||
|
||||
dev = &hisi_zip->qm.pdev->dev;
|
||||
res->hzip = hisi_zip;
|
||||
res->distance = node_distance(dev_to_node(dev), node);
|
||||
|
||||
n = &head;
|
||||
list_for_each_entry(tmp, &head, list) {
|
||||
if (res->distance < tmp->distance) {
|
||||
n = &tmp->list;
|
||||
break;
|
||||
}
|
||||
}
|
||||
list_add_tail(&res->list, n);
|
||||
}
|
||||
|
||||
list_for_each_entry(tmp, &head, list) {
|
||||
if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) {
|
||||
ret = tmp->hzip;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
free_list(&head);
|
||||
} else {
|
||||
ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
|
||||
}
|
||||
|
||||
mutex_unlock(&hisi_zip_list_lock);
|
||||
|
||||
return ret;
|
||||
|
||||
err:
|
||||
free_list(&head);
|
||||
mutex_unlock(&hisi_zip_list_lock);
|
||||
return NULL;
|
||||
}
|
||||
static struct hisi_qm_list zip_devices;
|
||||
|
||||
struct hisi_zip_hw_error {
|
||||
u32 int_msk;
|
||||
@ -297,9 +232,6 @@ static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
|
||||
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
|
||||
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
|
||||
|
||||
static int uacce_mode;
|
||||
module_param(uacce_mode, int, 0);
|
||||
|
||||
static u32 vfs_num;
|
||||
module_param(vfs_num, uint, 0444);
|
||||
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)");
|
||||
@ -311,18 +243,11 @@ static const struct pci_device_id hisi_zip_dev_ids[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
|
||||
|
||||
static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip)
|
||||
int zip_create_qps(struct hisi_qp **qps, int qp_num)
|
||||
{
|
||||
mutex_lock(&hisi_zip_list_lock);
|
||||
list_add_tail(&hisi_zip->list, &hisi_zip_list);
|
||||
mutex_unlock(&hisi_zip_list_lock);
|
||||
}
|
||||
int node = cpu_to_node(smp_processor_id());
|
||||
|
||||
static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip)
|
||||
{
|
||||
mutex_lock(&hisi_zip_list_lock);
|
||||
list_del(&hisi_zip->list);
|
||||
mutex_unlock(&hisi_zip_list_lock);
|
||||
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
|
||||
}
|
||||
|
||||
static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
|
||||
@ -353,8 +278,14 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
|
||||
writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
|
||||
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
|
||||
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
|
||||
writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
|
||||
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
|
||||
|
||||
if (hisi_zip->qm.use_sva) {
|
||||
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
|
||||
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
|
||||
} else {
|
||||
writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
|
||||
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
|
||||
}
|
||||
|
||||
/* let's open all compression/decompression cores */
|
||||
writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN,
|
||||
@ -366,27 +297,32 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
|
||||
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
|
||||
}
|
||||
|
||||
static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
|
||||
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
|
||||
{
|
||||
struct hisi_qm *qm = &hisi_zip->qm;
|
||||
|
||||
if (qm->ver == QM_HW_V1) {
|
||||
writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
|
||||
writel(HZIP_CORE_INT_MASK_ALL,
|
||||
qm->io_base + HZIP_CORE_INT_MASK_REG);
|
||||
dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (state) {
|
||||
/* clear ZIP hw error source if having */
|
||||
writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base +
|
||||
HZIP_CORE_INT_SOURCE);
|
||||
/* enable ZIP hw error interrupts */
|
||||
writel(0, hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
|
||||
} else {
|
||||
/* disable ZIP hw error interrupts */
|
||||
writel(HZIP_CORE_INT_DISABLE,
|
||||
hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
|
||||
}
|
||||
/* clear ZIP hw error source if having */
|
||||
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
|
||||
|
||||
/* configure error type */
|
||||
writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
|
||||
writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
|
||||
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
|
||||
qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
|
||||
|
||||
/* enable ZIP hw error interrupts */
|
||||
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
|
||||
}
|
||||
|
||||
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
|
||||
{
|
||||
/* disable ZIP hw error interrupts */
|
||||
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
|
||||
}
|
||||
|
||||
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
|
||||
@ -638,14 +574,53 @@ static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
|
||||
hisi_zip_debug_regs_clear(hisi_zip);
|
||||
}
|
||||
|
||||
static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip)
|
||||
static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
|
||||
{
|
||||
hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE,
|
||||
QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
|
||||
QM_DB_RANDOM_INVALID);
|
||||
hisi_zip_hw_error_set_state(hisi_zip, true);
|
||||
const struct hisi_zip_hw_error *err = zip_hw_error;
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
u32 err_val;
|
||||
|
||||
while (err->msg) {
|
||||
if (err->int_msk & err_sts) {
|
||||
dev_err(dev, "%s [error status=0x%x] found\n",
|
||||
err->msg, err->int_msk);
|
||||
|
||||
if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) {
|
||||
err_val = readl(qm->io_base +
|
||||
HZIP_CORE_SRAM_ECC_ERR_INFO);
|
||||
dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n",
|
||||
((err_val >>
|
||||
HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF));
|
||||
dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n",
|
||||
(err_val >>
|
||||
HZIP_SRAM_ECC_ERR_ADDR_SHIFT));
|
||||
}
|
||||
}
|
||||
err++;
|
||||
}
|
||||
|
||||
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
|
||||
}
|
||||
|
||||
static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
|
||||
{
|
||||
return readl(qm->io_base + HZIP_CORE_INT_STATUS);
|
||||
}
|
||||
|
||||
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
|
||||
.hw_err_enable = hisi_zip_hw_error_enable,
|
||||
.hw_err_disable = hisi_zip_hw_error_disable,
|
||||
.get_dev_hw_err_status = hisi_zip_get_hw_err_status,
|
||||
.log_dev_hw_err = hisi_zip_log_hw_error,
|
||||
.err_info = {
|
||||
.ce = QM_BASE_CE,
|
||||
.nfe = QM_BASE_NFE |
|
||||
QM_ACC_WB_NOT_READY_TIMEOUT,
|
||||
.fe = 0,
|
||||
.msi = QM_DB_RANDOM_INVALID,
|
||||
}
|
||||
};
|
||||
|
||||
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
|
||||
{
|
||||
struct hisi_qm *qm = &hisi_zip->qm;
|
||||
@ -671,8 +646,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qm->err_ini = &hisi_zip_err_ini;
|
||||
|
||||
hisi_zip_set_user_domain_and_cache(hisi_zip);
|
||||
hisi_zip_hw_error_init(hisi_zip);
|
||||
hisi_qm_dev_err_init(qm);
|
||||
hisi_zip_debug_regs_clear(hisi_zip);
|
||||
|
||||
return 0;
|
||||
@ -791,27 +768,15 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pci_set_drvdata(pdev, hisi_zip);
|
||||
|
||||
qm = &hisi_zip->qm;
|
||||
qm->use_dma_api = true;
|
||||
qm->pdev = pdev;
|
||||
qm->ver = rev_id;
|
||||
|
||||
qm->algs = "zlib\ngzip";
|
||||
qm->sqe_size = HZIP_SQE_SIZE;
|
||||
qm->dev_name = hisi_zip_name;
|
||||
qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
|
||||
QM_HW_VF;
|
||||
switch (uacce_mode) {
|
||||
case 0:
|
||||
qm->use_dma_api = true;
|
||||
break;
|
||||
case 1:
|
||||
qm->use_dma_api = false;
|
||||
break;
|
||||
case 2:
|
||||
qm->use_dma_api = true;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = hisi_qm_init(qm);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to init qm!\n");
|
||||
@ -849,7 +814,13 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (ret)
|
||||
dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
|
||||
|
||||
hisi_zip_add_to_list(hisi_zip);
|
||||
hisi_qm_add_to_list(qm, &zip_devices);
|
||||
|
||||
if (qm->uacce) {
|
||||
ret = uacce_register(qm->uacce);
|
||||
if (ret)
|
||||
goto err_qm_uninit;
|
||||
}
|
||||
|
||||
if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
|
||||
ret = hisi_zip_sriov_enable(pdev, vfs_num);
|
||||
@ -860,7 +831,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
return 0;
|
||||
|
||||
err_remove_from_list:
|
||||
hisi_zip_remove_from_list(hisi_zip);
|
||||
hisi_qm_del_from_list(qm, &zip_devices);
|
||||
hisi_zip_debugfs_exit(hisi_zip);
|
||||
hisi_qm_stop(qm);
|
||||
err_qm_uninit:
|
||||
@ -887,92 +858,13 @@ static void hisi_zip_remove(struct pci_dev *pdev)
|
||||
hisi_zip_debugfs_exit(hisi_zip);
|
||||
hisi_qm_stop(qm);
|
||||
|
||||
if (qm->fun_type == QM_HW_PF)
|
||||
hisi_zip_hw_error_set_state(hisi_zip, false);
|
||||
|
||||
hisi_qm_dev_err_uninit(qm);
|
||||
hisi_qm_uninit(qm);
|
||||
hisi_zip_remove_from_list(hisi_zip);
|
||||
}
|
||||
|
||||
static void hisi_zip_log_hw_error(struct hisi_zip *hisi_zip, u32 err_sts)
|
||||
{
|
||||
const struct hisi_zip_hw_error *err = zip_hw_error;
|
||||
struct device *dev = &hisi_zip->qm.pdev->dev;
|
||||
u32 err_val;
|
||||
|
||||
while (err->msg) {
|
||||
if (err->int_msk & err_sts) {
|
||||
dev_warn(dev, "%s [error status=0x%x] found\n",
|
||||
err->msg, err->int_msk);
|
||||
|
||||
if (HZIP_CORE_INT_STATUS_M_ECC & err->int_msk) {
|
||||
err_val = readl(hisi_zip->qm.io_base +
|
||||
HZIP_CORE_SRAM_ECC_ERR_INFO);
|
||||
dev_warn(dev, "hisi-zip multi ecc sram num=0x%x\n",
|
||||
((err_val >> SRAM_ECC_ERR_NUM_SHIFT) &
|
||||
0xFF));
|
||||
dev_warn(dev, "hisi-zip multi ecc sram addr=0x%x\n",
|
||||
(err_val >> SRAM_ECC_ERR_ADDR_SHIFT));
|
||||
}
|
||||
}
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
static pci_ers_result_t hisi_zip_hw_error_handle(struct hisi_zip *hisi_zip)
|
||||
{
|
||||
u32 err_sts;
|
||||
|
||||
/* read err sts */
|
||||
err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS);
|
||||
|
||||
if (err_sts) {
|
||||
hisi_zip_log_hw_error(hisi_zip, err_sts);
|
||||
/* clear error interrupts */
|
||||
writel(err_sts, hisi_zip->qm.io_base + HZIP_CORE_INT_SOURCE);
|
||||
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static pci_ers_result_t hisi_zip_process_hw_error(struct pci_dev *pdev)
|
||||
{
|
||||
struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
|
||||
struct device *dev = &pdev->dev;
|
||||
pci_ers_result_t qm_ret, zip_ret;
|
||||
|
||||
if (!hisi_zip) {
|
||||
dev_err(dev,
|
||||
"Can't recover ZIP-error occurred during device init\n");
|
||||
return PCI_ERS_RESULT_NONE;
|
||||
}
|
||||
|
||||
qm_ret = hisi_qm_hw_error_handle(&hisi_zip->qm);
|
||||
|
||||
zip_ret = hisi_zip_hw_error_handle(hisi_zip);
|
||||
|
||||
return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
|
||||
zip_ret == PCI_ERS_RESULT_NEED_RESET) ?
|
||||
PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state)
|
||||
{
|
||||
if (pdev->is_virtfn)
|
||||
return PCI_ERS_RESULT_NONE;
|
||||
|
||||
dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
|
||||
if (state == pci_channel_io_perm_failure)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
return hisi_zip_process_hw_error(pdev);
|
||||
hisi_qm_del_from_list(qm, &zip_devices);
|
||||
}
|
||||
|
||||
static const struct pci_error_handlers hisi_zip_err_handler = {
|
||||
.error_detected = hisi_zip_error_detected,
|
||||
.error_detected = hisi_qm_dev_err_detected,
|
||||
};
|
||||
|
||||
static struct pci_driver hisi_zip_pci_driver = {
|
||||
@ -1002,6 +894,7 @@ static int __init hisi_zip_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
hisi_qm_init_list(&zip_devices);
|
||||
hisi_zip_register_debugfs();
|
||||
|
||||
ret = pci_register_driver(&hisi_zip_pci_driver);
|
||||
@ -1010,12 +903,10 @@ static int __init hisi_zip_init(void)
|
||||
goto err_pci;
|
||||
}
|
||||
|
||||
if (uacce_mode == 0 || uacce_mode == 2) {
|
||||
ret = hisi_zip_register_to_crypto();
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to register driver to crypto.\n");
|
||||
goto err_crypto;
|
||||
}
|
||||
ret = hisi_zip_register_to_crypto();
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to register driver to crypto.\n");
|
||||
goto err_crypto;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1030,8 +921,7 @@ err_pci:
|
||||
|
||||
static void __exit hisi_zip_exit(void)
|
||||
{
|
||||
if (uacce_mode == 0 || uacce_mode == 2)
|
||||
hisi_zip_unregister_from_crypto();
|
||||
hisi_zip_unregister_from_crypto();
|
||||
pci_unregister_driver(&hisi_zip_pci_driver);
|
||||
hisi_zip_unregister_debugfs();
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ struct img_hash_request_ctx {
|
||||
struct ahash_request fallback_req;
|
||||
|
||||
/* Zero length buffer must remain last member of struct */
|
||||
u8 buffer[0] __aligned(sizeof(u32));
|
||||
u8 buffer[] __aligned(sizeof(u32));
|
||||
};
|
||||
|
||||
struct img_hash_ctx {
|
||||
|
37
drivers/crypto/marvell/Kconfig
Normal file
37
drivers/crypto/marvell/Kconfig
Normal file
@ -0,0 +1,37 @@
|
||||
#
|
||||
# Marvell crypto drivers configuration
|
||||
#
|
||||
|
||||
config CRYPTO_DEV_MARVELL
|
||||
tristate
|
||||
|
||||
config CRYPTO_DEV_MARVELL_CESA
|
||||
tristate "Marvell's Cryptographic Engine driver"
|
||||
depends on PLAT_ORION || ARCH_MVEBU
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_LIB_DES
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_HASH
|
||||
select SRAM
|
||||
select CRYPTO_DEV_MARVELL
|
||||
help
|
||||
This driver allows you to utilize the Cryptographic Engines and
|
||||
Security Accelerator (CESA) which can be found on MVEBU and ORION
|
||||
platforms.
|
||||
This driver supports CPU offload through DMA transfers.
|
||||
|
||||
config CRYPTO_DEV_OCTEONTX_CPT
|
||||
tristate "Support for Marvell OcteonTX CPT driver"
|
||||
depends on ARCH_THUNDER || COMPILE_TEST
|
||||
depends on PCI_MSI && 64BIT
|
||||
depends on CRYPTO_LIB_AES
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_DEV_MARVELL
|
||||
help
|
||||
This driver allows you to utilize the Marvell Cryptographic
|
||||
Accelerator Unit(CPT) found in OcteonTX series of processors.
|
||||
|
||||
To compile this driver as module, choose M here:
|
||||
the modules will be called octeontx-cpt and octeontx-cptvf
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user