1
0
mirror of https://github.com/systemd/systemd.git synced 2025-01-10 05:18:17 +03:00

repart: derive hash partition size from SizeMaxBytes= of data sibling

This change makes it possible for repart to create dm-verity hash
partitions for a custom amount of protected data. When the property
`SizeMaxBytes=` is specified for a dm-verity data partition, the size
of the corresponding hash partition is set to accommodate hash data
for this maximum size, rather than the actual contents its data
sibling. However, the contained hash data continues to be generated
from said sibling.

(cherry picked from commit d3032e651e)
This commit is contained in:
WilliButz 2024-10-04 19:51:57 +02:00 committed by Luca Boccassi
parent 44e1774660
commit 5ea69f9f6d

View File

@ -407,6 +407,54 @@ static uint64_t round_up_size(uint64_t v, uint64_t p) {
return v * p;
}
/* calculates the size of a dm-verity hash partition's contents */
static int calculate_verity_hash_size(
uint64_t data_bytes,
uint64_t hash_block_size,
uint64_t data_block_size,
uint64_t *ret_bytes) {
/* The calculation here is based on the documented on-disk format of the dm-verity
* https://docs.kernel.org/admin-guide/device-mapper/verity.html#hash-tree
*
* Upstream implementation:
* https://gitlab.com/cryptsetup/cryptsetup/-/blob/v2.7.5/lib/verity/verity_hash.c */
uint64_t data_blocks = DIV_ROUND_UP(data_bytes, data_block_size);
if (data_blocks > UINT64_MAX / data_block_size)
return -EOVERFLOW;
/* hashes that fit in one hash block (node in the merkle tree) */
uint64_t hashes_per_hash_block = hash_block_size / SHA256_DIGEST_SIZE;
/* initialize with 2 for the root of the merkle tree + the superblock */
uint64_t hash_blocks = 2;
/* iterate through the levels of the merkle tree bottom up */
uint64_t remaining_blocks = data_blocks;
while (remaining_blocks > hashes_per_hash_block) {
uint64_t hash_blocks_for_level;
/* number of hash blocks required to reference the underlying blocks */
hash_blocks_for_level = DIV_ROUND_UP(remaining_blocks, hashes_per_hash_block);
if (hash_blocks > UINT64_MAX - hash_blocks_for_level)
return -EOVERFLOW;
/* add current layer to the total number of hash blocks */
hash_blocks += hash_blocks_for_level;
/* hashes on this level serve as the blocks on which the next level is built */
remaining_blocks = hash_blocks_for_level;
}
if (hash_blocks > UINT64_MAX / hash_block_size)
return -EOVERFLOW;
*ret_bytes = hash_blocks * hash_block_size;
return 0;
}
static Partition *partition_new(void) {
Partition *p;
@ -4352,11 +4400,6 @@ static int partition_format_verity_hash(
node = partition_target_path(t);
}
if (p->verity_data_block_size == UINT64_MAX)
p->verity_data_block_size = context->fs_sector_size;
if (p->verity_hash_block_size == UINT64_MAX)
p->verity_hash_block_size = context->fs_sector_size;
r = sym_crypt_init(&cd, node);
if (r < 0)
return log_error_errno(r, "Failed to allocate libcryptsetup context for %s: %m", node);
@ -6678,6 +6721,60 @@ static int context_crypttab(Context *context) {
return 0;
}
/* update block sizes for verity siblings, calculate hash partition size if requested */
static int context_update_verity_size(Context *context) {
int r;
assert(context);
LIST_FOREACH(partitions, p, context->partitions) {
Partition *dp;
if (p->verity != VERITY_HASH)
continue;
if (p->dropped)
continue;
if (PARTITION_EXISTS(p)) /* Never format existing partitions */
continue;
assert_se(dp = p->siblings[VERITY_DATA]);
if (p->verity_data_block_size == UINT64_MAX)
p->verity_data_block_size = context->fs_sector_size;
if (p->verity_hash_block_size == UINT64_MAX)
p->verity_hash_block_size = context->fs_sector_size;
uint64_t sz;
if (dp->size_max != UINT64_MAX) {
r = calculate_verity_hash_size(
dp->size_max,
p->verity_hash_block_size,
p->verity_data_block_size,
&sz);
if (r < 0)
return log_error_errno(r, "Failed to caculate size of dm-verity hash partition: %m");
if (sz > p->size_min || sz > p->size_max)
log_warning("The dm-verity hash partition %s may be too small for a data partition "
"with SizeMaxBytes=%s. The hash partition would require %s for a data "
"partition of specified max size. Consider increasing the size of the "
"hash partition, or decreasing SizeMaxBytes= of the data partition.",
p->definition_path, FORMAT_BYTES(dp->size_max), FORMAT_BYTES(sz));
else if (p->size_min == UINT64_MAX) {
log_debug("Derived size %s of verity hash partition %s from verity data partition %s.",
FORMAT_BYTES(sz), p->definition_path, dp->definition_path);
p->size_min = sz;
}
}
}
return 0;
}
static int context_minimize(Context *context) {
const char *vt = NULL;
int r;
@ -8309,6 +8406,10 @@ static int run(int argc, char *argv[]) {
if (r < 0)
return r;
r = context_update_verity_size(context);
if (r < 0)
return r;
r = context_minimize(context);
if (r < 0)
return r;