blake2s_compress_generic is weakly aliased by blake2s_compress. The current harness for function selection uses a function pointer, which is ordinarily inlined and resolved at compile time. But when Clang's CFI is enabled, CFI still triggers when making an indirect call via a weak symbol. This seems like a bug in Clang's CFI, as though it's bucketing weak symbols and strong symbols differently. It also only seems to trigger when "full LTO" mode is used, rather than "thin LTO". [ 0.000000][ T0] Kernel panic - not syncing: CFI failure (target: blake2s_compress_generic+0x0/0x1444) [ 0.000000][ T0] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.16.0-mainline-06981-g076c855b846e #1 [ 0.000000][ T0] Hardware name: MT6873 (DT) [ 0.000000][ T0] Call trace: [ 0.000000][ T0] dump_backtrace+0xfc/0x1dc [ 0.000000][ T0] dump_stack_lvl+0xa8/0x11c [ 0.000000][ T0] panic+0x194/0x464 [ 0.000000][ T0] __cfi_check_fail+0x54/0x58 [ 0.000000][ T0] __cfi_slowpath_diag+0x354/0x4b0 [ 0.000000][ T0] blake2s_update+0x14c/0x178 [ 0.000000][ T0] _extract_entropy+0xf4/0x29c [ 0.000000][ T0] crng_initialize_primary+0x24/0x94 [ 0.000000][ T0] rand_initialize+0x2c/0x6c [ 0.000000][ T0] start_kernel+0x2f8/0x65c [ 0.000000][ T0] __primary_switched+0xc4/0x7be4 [ 0.000000][ T0] Rebooting in 5 seconds.. Nonetheless, the function pointer method isn't so terrific anyway, so this patch replaces it with a simple boolean, which also gets inlined away. This successfully works around the Clang bug. In general, I'm not too keen on all of the indirection involved here; it clearly does more harm than good. Hopefully the whole thing can get cleaned up down the road when lib/crypto is overhauled more comprehensively. But for now, we go with a simple bandaid. Fixes: 6048fdcc5f26 ("lib/crypto: blake2s: include as built-in") Link: https://github.com/ClangBuiltLinux/linux/issues/1567 Reported-by: Miles Chen <miles.chen@mediatek.com> Tested-by: Miles Chen <miles.chen@mediatek.com> Tested-by: Nathan Chancellor <nathan@kernel.org> Tested-by: John Stultz <john.stultz@linaro.org> Acked-by: Nick Desaulniers <ndesaulniers@google.com> Reviewed-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
130 lines
3.4 KiB
C
130 lines
3.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
|
/*
|
|
* Helper functions for BLAKE2s implementations.
|
|
* Keep this in sync with the corresponding BLAKE2b header.
|
|
*/
|
|
|
|
#ifndef _CRYPTO_INTERNAL_BLAKE2S_H
|
|
#define _CRYPTO_INTERNAL_BLAKE2S_H
|
|
|
|
#include <crypto/blake2s.h>
|
|
#include <crypto/internal/hash.h>
|
|
#include <linux/string.h>
|
|
|
|
void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
|
|
size_t nblocks, const u32 inc);
|
|
|
|
void blake2s_compress(struct blake2s_state *state, const u8 *block,
|
|
size_t nblocks, const u32 inc);
|
|
|
|
bool blake2s_selftest(void);
|
|
|
|
static inline void blake2s_set_lastblock(struct blake2s_state *state)
|
|
{
|
|
state->f[0] = -1;
|
|
}
|
|
|
|
/* Helper functions for BLAKE2s shared by the library and shash APIs */
|
|
|
|
static __always_inline void
|
|
__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
|
|
bool force_generic)
|
|
{
|
|
const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
|
|
|
|
if (unlikely(!inlen))
|
|
return;
|
|
if (inlen > fill) {
|
|
memcpy(state->buf + state->buflen, in, fill);
|
|
if (force_generic)
|
|
blake2s_compress_generic(state, state->buf, 1,
|
|
BLAKE2S_BLOCK_SIZE);
|
|
else
|
|
blake2s_compress(state, state->buf, 1,
|
|
BLAKE2S_BLOCK_SIZE);
|
|
state->buflen = 0;
|
|
in += fill;
|
|
inlen -= fill;
|
|
}
|
|
if (inlen > BLAKE2S_BLOCK_SIZE) {
|
|
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
|
|
/* Hash one less (full) block than strictly possible */
|
|
if (force_generic)
|
|
blake2s_compress_generic(state, in, nblocks - 1,
|
|
BLAKE2S_BLOCK_SIZE);
|
|
else
|
|
blake2s_compress(state, in, nblocks - 1,
|
|
BLAKE2S_BLOCK_SIZE);
|
|
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
|
|
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
|
|
}
|
|
memcpy(state->buf + state->buflen, in, inlen);
|
|
state->buflen += inlen;
|
|
}
|
|
|
|
static __always_inline void
|
|
__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic)
|
|
{
|
|
blake2s_set_lastblock(state);
|
|
memset(state->buf + state->buflen, 0,
|
|
BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
|
|
if (force_generic)
|
|
blake2s_compress_generic(state, state->buf, 1, state->buflen);
|
|
else
|
|
blake2s_compress(state, state->buf, 1, state->buflen);
|
|
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
|
|
memcpy(out, state->h, state->outlen);
|
|
}
|
|
|
|
/* Helper functions for shash implementations of BLAKE2s */
|
|
|
|
struct blake2s_tfm_ctx {
|
|
u8 key[BLAKE2S_KEY_SIZE];
|
|
unsigned int keylen;
|
|
};
|
|
|
|
static inline int crypto_blake2s_setkey(struct crypto_shash *tfm,
|
|
const u8 *key, unsigned int keylen)
|
|
{
|
|
struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
|
|
|
|
if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE)
|
|
return -EINVAL;
|
|
|
|
memcpy(tctx->key, key, keylen);
|
|
tctx->keylen = keylen;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline int crypto_blake2s_init(struct shash_desc *desc)
|
|
{
|
|
const struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
|
struct blake2s_state *state = shash_desc_ctx(desc);
|
|
unsigned int outlen = crypto_shash_digestsize(desc->tfm);
|
|
|
|
__blake2s_init(state, outlen, tctx->key, tctx->keylen);
|
|
return 0;
|
|
}
|
|
|
|
static inline int crypto_blake2s_update(struct shash_desc *desc,
|
|
const u8 *in, unsigned int inlen,
|
|
bool force_generic)
|
|
{
|
|
struct blake2s_state *state = shash_desc_ctx(desc);
|
|
|
|
__blake2s_update(state, in, inlen, force_generic);
|
|
return 0;
|
|
}
|
|
|
|
static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out,
|
|
bool force_generic)
|
|
{
|
|
struct blake2s_state *state = shash_desc_ctx(desc);
|
|
|
|
__blake2s_final(state, out, force_generic);
|
|
return 0;
|
|
}
|
|
|
|
#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */
|