crypto: crypto4xx - performance optimizations
This patch provides a cheap 2MiB/s+ (~ 6%) performance improvement over the current code. This is because the compiler can now optimize several endian swap memcpy. Signed-off-by: Christian Lamparter <chunkeey@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
5b0aa2556e
commit
a8d79d7bfb
@ -74,32 +74,38 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
|
|||||||
sa->sa_command_1.bf.copy_hdr = cp_hdr;
|
sa->sa_command_1.bf.copy_hdr = cp_hdr;
|
||||||
}
|
}
|
||||||
|
|
||||||
int crypto4xx_encrypt(struct ablkcipher_request *req)
|
static inline int crypto4xx_crypt(struct ablkcipher_request *req,
|
||||||
|
const unsigned int ivlen, bool decrypt)
|
||||||
{
|
{
|
||||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||||
unsigned int ivlen = crypto_ablkcipher_ivsize(
|
|
||||||
crypto_ablkcipher_reqtfm(req));
|
|
||||||
__le32 iv[ivlen];
|
__le32 iv[ivlen];
|
||||||
|
|
||||||
if (ivlen)
|
if (ivlen)
|
||||||
crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
|
crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
|
||||||
|
|
||||||
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
|
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
|
||||||
req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
|
req->nbytes, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
|
||||||
|
ctx->sa_len, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int crypto4xx_decrypt(struct ablkcipher_request *req)
|
int crypto4xx_encrypt_noiv(struct ablkcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
return crypto4xx_crypt(req, 0, false);
|
||||||
unsigned int ivlen = crypto_ablkcipher_ivsize(
|
}
|
||||||
crypto_ablkcipher_reqtfm(req));
|
|
||||||
__le32 iv[ivlen];
|
|
||||||
|
|
||||||
if (ivlen)
|
int crypto4xx_encrypt_iv(struct ablkcipher_request *req)
|
||||||
crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
|
{
|
||||||
|
return crypto4xx_crypt(req, AES_IV_SIZE, false);
|
||||||
|
}
|
||||||
|
|
||||||
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
|
int crypto4xx_decrypt_noiv(struct ablkcipher_request *req)
|
||||||
req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
|
{
|
||||||
|
return crypto4xx_crypt(req, 0, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
int crypto4xx_decrypt_iv(struct ablkcipher_request *req)
|
||||||
|
{
|
||||||
|
return crypto4xx_crypt(req, AES_IV_SIZE, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -580,7 +580,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
|
|||||||
struct scatterlist *dst = pd_uinfo->dest_va;
|
struct scatterlist *dst = pd_uinfo->dest_va;
|
||||||
size_t cp_len = crypto_aead_authsize(
|
size_t cp_len = crypto_aead_authsize(
|
||||||
crypto_aead_reqtfm(aead_req));
|
crypto_aead_reqtfm(aead_req));
|
||||||
u32 icv[cp_len];
|
u32 icv[AES_BLOCK_SIZE];
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (pd_uinfo->using_sd) {
|
if (pd_uinfo->using_sd) {
|
||||||
@ -595,7 +595,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
|
|||||||
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
|
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
|
||||||
/* append icv at the end */
|
/* append icv at the end */
|
||||||
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
|
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
|
||||||
cp_len);
|
sizeof(icv));
|
||||||
|
|
||||||
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
|
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
|
||||||
cp_len, 1);
|
cp_len, 1);
|
||||||
@ -605,7 +605,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
|
|||||||
aead_req->assoclen + aead_req->cryptlen -
|
aead_req->assoclen + aead_req->cryptlen -
|
||||||
cp_len, cp_len, 0);
|
cp_len, cp_len, 0);
|
||||||
|
|
||||||
crypto4xx_memcpy_from_le32(icv, icv, cp_len);
|
crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
|
||||||
|
|
||||||
if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
|
if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
|
||||||
err = -EBADMSG;
|
err = -EBADMSG;
|
||||||
@ -1122,8 +1122,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.max_keysize = AES_MAX_KEY_SIZE,
|
.max_keysize = AES_MAX_KEY_SIZE,
|
||||||
.ivsize = AES_IV_SIZE,
|
.ivsize = AES_IV_SIZE,
|
||||||
.setkey = crypto4xx_setkey_aes_cbc,
|
.setkey = crypto4xx_setkey_aes_cbc,
|
||||||
.encrypt = crypto4xx_encrypt,
|
.encrypt = crypto4xx_encrypt_iv,
|
||||||
.decrypt = crypto4xx_decrypt,
|
.decrypt = crypto4xx_decrypt_iv,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}},
|
}},
|
||||||
@ -1146,8 +1146,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.max_keysize = AES_MAX_KEY_SIZE,
|
.max_keysize = AES_MAX_KEY_SIZE,
|
||||||
.ivsize = AES_IV_SIZE,
|
.ivsize = AES_IV_SIZE,
|
||||||
.setkey = crypto4xx_setkey_aes_cfb,
|
.setkey = crypto4xx_setkey_aes_cfb,
|
||||||
.encrypt = crypto4xx_encrypt,
|
.encrypt = crypto4xx_encrypt_iv,
|
||||||
.decrypt = crypto4xx_decrypt,
|
.decrypt = crypto4xx_decrypt_iv,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} },
|
} },
|
||||||
@ -1195,8 +1195,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.min_keysize = AES_MIN_KEY_SIZE,
|
.min_keysize = AES_MIN_KEY_SIZE,
|
||||||
.max_keysize = AES_MAX_KEY_SIZE,
|
.max_keysize = AES_MAX_KEY_SIZE,
|
||||||
.setkey = crypto4xx_setkey_aes_ecb,
|
.setkey = crypto4xx_setkey_aes_ecb,
|
||||||
.encrypt = crypto4xx_encrypt,
|
.encrypt = crypto4xx_encrypt_noiv,
|
||||||
.decrypt = crypto4xx_decrypt,
|
.decrypt = crypto4xx_decrypt_noiv,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} },
|
} },
|
||||||
@ -1219,8 +1219,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.max_keysize = AES_MAX_KEY_SIZE,
|
.max_keysize = AES_MAX_KEY_SIZE,
|
||||||
.ivsize = AES_IV_SIZE,
|
.ivsize = AES_IV_SIZE,
|
||||||
.setkey = crypto4xx_setkey_aes_ofb,
|
.setkey = crypto4xx_setkey_aes_ofb,
|
||||||
.encrypt = crypto4xx_encrypt,
|
.encrypt = crypto4xx_encrypt_iv,
|
||||||
.decrypt = crypto4xx_decrypt,
|
.decrypt = crypto4xx_decrypt_iv,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} },
|
} },
|
||||||
|
@ -168,8 +168,10 @@ int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
|
|||||||
const u8 *key, unsigned int keylen);
|
const u8 *key, unsigned int keylen);
|
||||||
int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
|
int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
|
||||||
const u8 *key, unsigned int keylen);
|
const u8 *key, unsigned int keylen);
|
||||||
int crypto4xx_encrypt(struct ablkcipher_request *req);
|
int crypto4xx_encrypt_iv(struct ablkcipher_request *req);
|
||||||
int crypto4xx_decrypt(struct ablkcipher_request *req);
|
int crypto4xx_decrypt_iv(struct ablkcipher_request *req);
|
||||||
|
int crypto4xx_encrypt_noiv(struct ablkcipher_request *req);
|
||||||
|
int crypto4xx_decrypt_noiv(struct ablkcipher_request *req);
|
||||||
int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
|
int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
|
||||||
int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
|
int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
|
||||||
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
|
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user