1
0
mirror of https://github.com/samba-team/samba.git synced 2025-01-10 01:18:15 +03:00

lib/crypto: optimize aes_cmac_128

- We avoid variables in order to do a lazy cleanup
  in aes_cmac_128_final() via ZERO_STRUCTP(ctx)
- We avoid unused memcpy() calls
- We use the optimized aes_block_{xor,lshift}() functions
- Align AES_BLOCK_SIZE arrays to 8 bytes

BUG: https://bugzilla.samba.org/show_bug.cgi?id=11451

Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Jeremy Allison <jra@samba.org>
This commit is contained in:
Stefan Metzmacher 2015-08-12 00:59:58 +02:00 committed by Jeremy Allison
parent 0824221b4a
commit 8795ad2030
2 changed files with 25 additions and 85 deletions

View File

@ -33,92 +33,42 @@ static const uint8_t const_Rb[] = {
#define _MSB(x) (((x)[0] & 0x80)?1:0)
static inline void aes_cmac_128_left_shift_1(const uint8_t in[AES_BLOCK_SIZE],
uint8_t out[AES_BLOCK_SIZE])
{
uint8_t overflow = 0;
int8_t i;
for (i = AES_BLOCK_SIZE - 1; i >= 0; i--) {
out[i] = in[i] << 1;
out[i] |= overflow;
overflow = _MSB(&in[i]);
}
}
static inline void aes_cmac_128_xor(const uint8_t in1[AES_BLOCK_SIZE],
const uint8_t in2[AES_BLOCK_SIZE],
uint8_t out[AES_BLOCK_SIZE])
{
uint8_t i;
for (i = 0; i < AES_BLOCK_SIZE; i++) {
out[i] = in1[i] ^ in2[i];
}
}
void aes_cmac_128_init(struct aes_cmac_128_context *ctx,
const uint8_t K[AES_BLOCK_SIZE])
{
uint8_t L[AES_BLOCK_SIZE];
ZERO_STRUCTP(ctx);
AES_set_encrypt_key(K, 128, &ctx->aes_key);
/* step 1 - generate subkeys k1 and k2 */
AES_encrypt(const_Zero, L, &ctx->aes_key);
AES_encrypt(const_Zero, ctx->L, &ctx->aes_key);
if (_MSB(L) == 0) {
aes_cmac_128_left_shift_1(L, ctx->K1);
if (_MSB(ctx->L) == 0) {
aes_block_lshift(ctx->L, ctx->K1);
} else {
uint8_t tmp_block[AES_BLOCK_SIZE];
aes_cmac_128_left_shift_1(L, tmp_block);
aes_cmac_128_xor(tmp_block, const_Rb, ctx->K1);
ZERO_STRUCT(tmp_block);
aes_block_lshift(ctx->L, ctx->tmp);
aes_block_xor(ctx->tmp, const_Rb, ctx->K1);
}
if (_MSB(ctx->K1) == 0) {
aes_cmac_128_left_shift_1(ctx->K1, ctx->K2);
aes_block_lshift(ctx->K1, ctx->K2);
} else {
uint8_t tmp_block[AES_BLOCK_SIZE];
aes_cmac_128_left_shift_1(ctx->K1, tmp_block);
aes_cmac_128_xor(tmp_block, const_Rb, ctx->K2);
ZERO_STRUCT(tmp_block);
aes_block_lshift(ctx->K1, ctx->tmp);
aes_block_xor(ctx->tmp, const_Rb, ctx->K2);
}
ZERO_STRUCT(L);
}
void aes_cmac_128_update(struct aes_cmac_128_context *ctx,
const uint8_t *_msg, size_t _msg_len)
const uint8_t *msg, size_t msg_len)
{
uint8_t tmp_block[AES_BLOCK_SIZE];
uint8_t Y[AES_BLOCK_SIZE];
const uint8_t *msg = _msg;
size_t msg_len = _msg_len;
/*
* copy the remembered last block
*/
ZERO_STRUCT(tmp_block);
if (ctx->last_len) {
memcpy(tmp_block, ctx->last, ctx->last_len);
}
/*
* check if we expand the block
*/
if (ctx->last_len < AES_BLOCK_SIZE) {
size_t len = MIN(AES_BLOCK_SIZE - ctx->last_len, msg_len);
memcpy(&tmp_block[ctx->last_len], msg, len);
memcpy(ctx->last, tmp_block, AES_BLOCK_SIZE);
memcpy(&ctx->last[ctx->last_len], msg, len);
msg += len;
msg_len -= len;
ctx->last_len += len;
@ -126,59 +76,43 @@ void aes_cmac_128_update(struct aes_cmac_128_context *ctx,
if (msg_len == 0) {
/* if it is still the last block, we are done */
ZERO_STRUCT(tmp_block);
return;
}
/*
* It is not the last block anymore
*/
ZERO_STRUCT(ctx->last);
ctx->last_len = 0;
/*
* now checksum everything but the last block
*/
aes_cmac_128_xor(ctx->X, tmp_block, Y);
AES_encrypt(Y, ctx->X, &ctx->aes_key);
aes_block_xor(ctx->X, ctx->last, ctx->Y);
AES_encrypt(ctx->Y, ctx->X, &ctx->aes_key);
while (msg_len > AES_BLOCK_SIZE) {
memcpy(tmp_block, msg, AES_BLOCK_SIZE);
aes_block_xor(ctx->X, msg, ctx->Y);
AES_encrypt(ctx->Y, ctx->X, &ctx->aes_key);
msg += AES_BLOCK_SIZE;
msg_len -= AES_BLOCK_SIZE;
aes_cmac_128_xor(ctx->X, tmp_block, Y);
AES_encrypt(Y, ctx->X, &ctx->aes_key);
}
/*
* copy the last block, it will be processed in
* aes_cmac_128_final().
*/
ZERO_STRUCT(ctx->last);
memcpy(ctx->last, msg, msg_len);
ctx->last_len = msg_len;
ZERO_STRUCT(tmp_block);
ZERO_STRUCT(Y);
}
void aes_cmac_128_final(struct aes_cmac_128_context *ctx,
uint8_t T[AES_BLOCK_SIZE])
{
uint8_t tmp_block[AES_BLOCK_SIZE];
uint8_t Y[AES_BLOCK_SIZE];
if (ctx->last_len < AES_BLOCK_SIZE) {
ctx->last[ctx->last_len] = 0x80;
aes_cmac_128_xor(ctx->last, ctx->K2, tmp_block);
aes_block_xor(ctx->last, ctx->K2, ctx->tmp);
} else {
aes_cmac_128_xor(ctx->last, ctx->K1, tmp_block);
aes_block_xor(ctx->last, ctx->K1, ctx->tmp);
}
aes_cmac_128_xor(tmp_block, ctx->X, Y);
AES_encrypt(Y, T, &ctx->aes_key);
aes_block_xor(ctx->tmp, ctx->X, ctx->Y);
AES_encrypt(ctx->Y, T, &ctx->aes_key);
ZERO_STRUCT(tmp_block);
ZERO_STRUCT(Y);
ZERO_STRUCTP(ctx);
}

View File

@ -22,10 +22,16 @@
struct aes_cmac_128_context {
AES_KEY aes_key;
uint64_t __align;
uint8_t K1[AES_BLOCK_SIZE];
uint8_t K2[AES_BLOCK_SIZE];
uint8_t L[AES_BLOCK_SIZE];
uint8_t X[AES_BLOCK_SIZE];
uint8_t Y[AES_BLOCK_SIZE];
uint8_t tmp[AES_BLOCK_SIZE];
uint8_t last[AES_BLOCK_SIZE];
size_t last_len;