diff --git a/man/repart.d.xml b/man/repart.d.xml
index 3ffb6aba026..b01c0c04f00 100644
--- a/man/repart.d.xml
+++ b/man/repart.d.xml
@@ -76,16 +76,7 @@
Type=
The GPT partition type UUID to match. This may be a GPT partition type UUID such as
- 4f68bce3-e8cd-4db1-96e7-fbcaf984b709, or an identifier.
- Architecture specific partition types can use one of these architecture identifiers:
- alpha, arc, arm (32-bit),
- arm64 (64-bit, aka aarch64), ia64,
- loongarch64, mips-le, mips64-le,
- parisc, ppc, ppc64,
- ppc64-le, riscv32, riscv64,
- s390, s390x, tilegx,
- x86 (32-bit, aka i386) and x86-64 (64-bit, aka amd64).
-
+ 4f68bce3-e8cd-4db1-96e7-fbcaf984b709, or an identifier.
The supported identifiers are:
@@ -237,7 +228,14 @@
- This setting defaults to linux-generic.
+ Architecture specific partition types can use one of these architecture identifiers:
+ alpha, arc, arm (32-bit),
+ arm64 (64-bit, aka aarch64), ia64,
+ loongarch64, mips-le, mips64-le,
+ parisc, ppc, ppc64,
+ ppc64-le, riscv32, riscv64,
+ s390, s390x, tilegx,
+ x86 (32-bit, aka i386) and x86-64 (64-bit, aka amd64).
Most of the partition type UUIDs listed above are defined in the Discoverable Partitions
@@ -897,6 +895,59 @@
+
+
+ SupplementFor=
+
+ Takes a partition definition name, such as 10-esp. If specified,
+ systemd-repart will avoid creating this partition and instead prefer to partially
+ merge the two definitions. However, depending on the existing layout of partitions on disk,
+ systemd-repart may be forced to fall back onto un-merging the definitions and
+ using them as originally written, potentially creating this partition. Specifically,
+ systemd-repart will fall back if this partition is found to already exist on disk,
+ or if the target partition already exists on disk but is too small, or if it cannot allocate space
+ for the merged partition for some other reason.
+
+ The following fields are merged into the target definition in the specified ways:
+ Weight= and PaddingWeight= are simply overwritten;
+ SizeMinBytes= and PaddingMinBytes= use the larger of the two
+ values; SizeMaxBytes= and PaddingMaxBytes= use the smaller
+ value; and CopyFiles=, ExcludeFiles=,
+ ExcludeFilesTarget=, MakeDirectories=, and
+ Subvolumes= are concatenated.
+
+ Usage of this option in combination with CopyBlocks=,
+ Encrypt=, or Verity= is not supported. The target definition
+ cannot set these settings either. A definition cannot simultaneously be a supplement and act as a
+ target for some other supplement definition. A target cannot have more than one supplement partition
+ associated with it.
+
+ For example, distributions can use this to implement $BOOT as defined in
+ the Boot Loader
+ Specification. Distributions may prefer to use the ESP as $BOOT whenever
+ possible, but to adhere to the spec XBOOTLDR must sometimes be used instead. So, they should create
+ two definitions: the first defining an ESP big enough to hold just the bootloader, and a second for
+ the XBOOTLDR that's sufficiently large to hold kernels and configured as a supplement for the ESP.
+ Whenever possible, systemd-repart will try to merge the two definitions to create
+ one large ESP, but if that's not allowable due to the existing conditions on disk a small ESP and a
+ large XBOOTLDR will be created instead.
+
+ As another example, distributions can also use this to seamlessly share a single
+ /home partition in a multi-boot scenario, while preferring to keep
+ /home on the root partition by default. Having a /home
+ partition separated from the root partition entails some extra complexity: someone has to decide how
+ to split the space between the two partitions. On the other hand, it allows a user to share their
+ home area between multiple installed OSs (i.e. via systemd-homed.service
+ 8). Distributions should create two definitions:
+ the first for a root partition that takes up some relatively small percentage of the disk, and the
+ second as a supplement for the first to create a /home partition that takes up
+ all the remaining free space. On first boot, if systemd-repart finds an existing
+ /home partition on disk, it'll un-merge the definitions and create just a small
+ root partition. Otherwise, the definitions will be merged and a single large root partition will be
+ created.
+
+
+
diff --git a/src/partition/repart.c b/src/partition/repart.c
index e1608bd64dc..baef519eaf3 100644
--- a/src/partition/repart.c
+++ b/src/partition/repart.c
@@ -404,6 +404,10 @@ typedef struct Partition {
PartitionEncryptedVolume *encrypted_volume;
+ char *supplement_for_name;
+ struct Partition *supplement_for, *supplement_target_for;
+ struct Partition *suppressing;
+
struct Partition *siblings[_VERITY_MODE_MAX];
LIST_FIELDS(struct Partition, partitions);
@@ -411,6 +415,7 @@ typedef struct Partition {
#define PARTITION_IS_FOREIGN(p) (!(p)->definition_path)
#define PARTITION_EXISTS(p) (!!(p)->current_partition)
+#define PARTITION_SUPPRESSED(p) ((p)->supplement_for && (p)->supplement_for->suppressing == (p))
struct FreeArea {
Partition *after;
@@ -520,6 +525,28 @@ static Partition *partition_new(void) {
return p;
}
+static void partition_unlink_supplement(Partition *p) {
+ assert(p);
+
+ assert(!p->supplement_for || !p->supplement_target_for); /* Can't be both */
+
+ if (p->supplement_target_for) {
+ assert(p->supplement_target_for->supplement_for == p);
+
+ p->supplement_target_for->supplement_for = NULL;
+ }
+
+ if (p->supplement_for) {
+ assert(p->supplement_for->supplement_target_for == p);
+ assert(!p->supplement_for->suppressing || p->supplement_for->suppressing == p);
+
+ p->supplement_for->supplement_target_for = p->supplement_for->suppressing = NULL;
+ }
+
+ p->supplement_for_name = mfree(p->supplement_for_name);
+ p->supplement_target_for = p->supplement_for = p->suppressing = NULL;
+}
+
static Partition* partition_free(Partition *p) {
if (!p)
return NULL;
@@ -563,6 +590,8 @@ static Partition* partition_free(Partition *p) {
partition_encrypted_volume_free(p->encrypted_volume);
+ partition_unlink_supplement(p);
+
return mfree(p);
}
@@ -608,6 +637,8 @@ static void partition_foreignize(Partition *p) {
p->n_mountpoints = 0;
p->encrypted_volume = partition_encrypted_volume_free(p->encrypted_volume);
+
+ partition_unlink_supplement(p);
}
static bool partition_type_exclude(const GptPartitionType *type) {
@@ -740,6 +771,10 @@ static void partition_drop_or_foreignize(Partition *p) {
p->dropped = true;
p->allocated_to_area = NULL;
+
+ /* If a supplement partition is dropped, we don't want to merge in its settings. */
+ if (PARTITION_SUPPRESSED(p))
+ p->supplement_for->suppressing = NULL;
}
}
@@ -775,7 +810,7 @@ static bool context_drop_or_foreignize_one_priority(Context *context) {
}
static uint64_t partition_min_size(const Context *context, const Partition *p) {
- uint64_t sz;
+ uint64_t sz, override_min;
assert(context);
assert(p);
@@ -817,11 +852,13 @@ static uint64_t partition_min_size(const Context *context, const Partition *p) {
sz = d;
}
- return MAX(round_up_size(p->size_min != UINT64_MAX ? p->size_min : DEFAULT_MIN_SIZE, context->grain_size), sz);
+ override_min = p->suppressing ? MAX(p->size_min, p->suppressing->size_min) : p->size_min;
+
+ return MAX(round_up_size(override_min != UINT64_MAX ? override_min : DEFAULT_MIN_SIZE, context->grain_size), sz);
}
static uint64_t partition_max_size(const Context *context, const Partition *p) {
- uint64_t sm;
+ uint64_t sm, override_max;
/* Calculate how large the partition may become at max. This is generally the configured maximum
* size, except when it already exists and is larger than that. In that case it's the existing size,
@@ -839,10 +876,11 @@ static uint64_t partition_max_size(const Context *context, const Partition *p) {
if (p->verity == VERITY_SIG)
return VERITY_SIG_SIZE;
- if (p->size_max == UINT64_MAX)
+ override_max = p->suppressing ? MIN(p->size_max, p->suppressing->size_max) : p->size_max;
+ if (override_max == UINT64_MAX)
return UINT64_MAX;
- sm = round_down_size(p->size_max, context->grain_size);
+ sm = round_down_size(override_max, context->grain_size);
if (p->current_size != UINT64_MAX)
sm = MAX(p->current_size, sm);
@@ -851,13 +889,17 @@ static uint64_t partition_max_size(const Context *context, const Partition *p) {
}
static uint64_t partition_min_padding(const Partition *p) {
+ uint64_t override_min;
+
assert(p);
- return p->padding_min != UINT64_MAX ? p->padding_min : 0;
+
+ override_min = p->suppressing ? MAX(p->padding_min, p->suppressing->padding_min) : p->padding_min;
+ return override_min != UINT64_MAX ? override_min : 0;
}
static uint64_t partition_max_padding(const Partition *p) {
assert(p);
- return p->padding_max;
+ return p->suppressing ? MIN(p->padding_max, p->suppressing->padding_max) : p->padding_max;
}
static uint64_t partition_min_size_with_padding(Context *context, const Partition *p) {
@@ -977,7 +1019,7 @@ static bool context_allocate_partitions(Context *context, uint64_t *ret_largest_
uint64_t required;
FreeArea *a = NULL;
- if (p->dropped || PARTITION_IS_FOREIGN(p))
+ if (p->dropped || PARTITION_IS_FOREIGN(p) || PARTITION_SUPPRESSED(p))
continue;
/* How much do we need to fit? */
@@ -1015,6 +1057,57 @@ static bool context_allocate_partitions(Context *context, uint64_t *ret_largest_
return true;
}
+static bool context_unmerge_and_allocate_partitions(Context *context) {
+ assert(context);
+
+ /* This should only be called after plain context_allocate_partitions fails. This algorithm will
+ * try, in the order that minimizes the number of created supplement partitions, all combinations of
+ * un-suppressing supplement partitions until it finds one that works. */
+
+ /* First, let's try to un-suppress just one supplement partition and see if that gets us anywhere */
+ LIST_FOREACH(partitions, p, context->partitions) {
+ Partition *unsuppressed;
+
+ if (!p->suppressing)
+ continue;
+
+ unsuppressed = TAKE_PTR(p->suppressing);
+
+ if (context_allocate_partitions(context, NULL))
+ return true;
+
+ p->suppressing = unsuppressed;
+ }
+
+ /* Looks like not. So we have to un-suppress at least two partitions. We can do this recursively */
+ LIST_FOREACH(partitions, p, context->partitions) {
+ Partition *unsuppressed;
+
+ if (!p->suppressing)
+ continue;
+
+ unsuppressed = TAKE_PTR(p->suppressing);
+
+ if (context_unmerge_and_allocate_partitions(context))
+ return true;
+
+ p->suppressing = unsuppressed;
+ }
+
+ /* No combination of un-suppressed supplements made it possible to fit the partitions */
+ return false;
+}
+
+static uint32_t partition_weight(const Partition *p) {
+ assert(p);
+ return p->suppressing ? p->suppressing->weight : p->weight;
+}
+
+static uint32_t partition_padding_weight(const Partition *p) {
+ assert(p);
+ return p->suppressing ? p->suppressing->padding_weight : p->padding_weight;
+}
+
static int context_sum_weights(Context *context, FreeArea *a, uint64_t *ret) {
uint64_t weight_sum = 0;
@@ -1028,13 +1121,11 @@ static int context_sum_weights(Context *context, FreeArea *a, uint64_t *ret) {
if (p->padding_area != a && p->allocated_to_area != a)
continue;
- if (p->weight > UINT64_MAX - weight_sum)
+ if (!INC_SAFE(&weight_sum, partition_weight(p)))
goto overflow_sum;
- weight_sum += p->weight;
- if (p->padding_weight > UINT64_MAX - weight_sum)
+ if (!INC_SAFE(&weight_sum, partition_padding_weight(p)))
goto overflow_sum;
- weight_sum += p->padding_weight;
}
*ret = weight_sum;
@@ -1099,7 +1190,6 @@ static bool context_grow_partitions_phase(
* get any additional room from the left-overs. Similar, if two partitions have the same weight they
* should get the same space if possible, even if one has a smaller minimum size than the other. */
LIST_FOREACH(partitions, p, context->partitions) {
-
/* Look only at partitions associated with this free area, i.e. immediately
* preceding it, or allocated into it */
if (p->allocated_to_area != a && p->padding_area != a)
@@ -1107,11 +1197,14 @@ static bool context_grow_partitions_phase(
if (p->new_size == UINT64_MAX) {
uint64_t share, rsz, xsz;
+ uint32_t weight;
bool charge = false;
+ weight = partition_weight(p);
+
/* Calculate how much this space this partition needs if everyone would get
* the weight based share */
- share = scale_by_weight(*span, p->weight, *weight_sum);
+ share = scale_by_weight(*span, weight, *weight_sum);
rsz = partition_min_size(context, p);
xsz = partition_max_size(context, p);
@@ -1151,15 +1244,18 @@ static bool context_grow_partitions_phase(
if (charge) {
*span = charge_size(context, *span, p->new_size);
- *weight_sum = charge_weight(*weight_sum, p->weight);
+ *weight_sum = charge_weight(*weight_sum, weight);
}
}
if (p->new_padding == UINT64_MAX) {
uint64_t share, rsz, xsz;
+ uint32_t padding_weight;
bool charge = false;
- share = scale_by_weight(*span, p->padding_weight, *weight_sum);
+ padding_weight = partition_padding_weight(p);
+
+ share = scale_by_weight(*span, padding_weight, *weight_sum);
rsz = partition_min_padding(p);
xsz = partition_max_padding(p);
@@ -1178,7 +1274,7 @@ static bool context_grow_partitions_phase(
if (charge) {
*span = charge_size(context, *span, p->new_padding);
- *weight_sum = charge_weight(*weight_sum, p->padding_weight);
+ *weight_sum = charge_weight(*weight_sum, padding_weight);
}
}
}
@@ -2163,7 +2259,9 @@ static int partition_finalize_fstype(Partition *p, const char *path) {
static bool partition_needs_populate(const Partition *p) {
assert(p);
- return !strv_isempty(p->copy_files) || !strv_isempty(p->make_directories) || !strv_isempty(p->make_symlinks);
+ assert(!p->supplement_for || !p->suppressing); /* Avoid infinite recursion */
+ return !strv_isempty(p->copy_files) || !strv_isempty(p->make_directories) || !strv_isempty(p->make_symlinks) ||
+ (p->suppressing && partition_needs_populate(p->suppressing));
}
static int partition_read_definition(Partition *p, const char *path, const char *const *conf_file_dirs) {
@@ -2204,6 +2302,7 @@ static int partition_read_definition(Partition *p, const char *path, const char
{ "Partition", "EncryptedVolume", config_parse_encrypted_volume, 0, p },
{ "Partition", "Compression", config_parse_string, CONFIG_PARSE_STRING_SAFE_AND_ASCII, &p->compression },
{ "Partition", "CompressionLevel", config_parse_string, CONFIG_PARSE_STRING_SAFE_AND_ASCII, &p->compression_level },
+ { "Partition", "SupplementFor", config_parse_string, 0, &p->supplement_for_name },
{}
};
_cleanup_free_ char *filename = NULL;
@@ -2328,6 +2427,18 @@ static int partition_read_definition(Partition *p, const char *path, const char
return log_syntax(NULL, LOG_ERR, path, 1, SYNTHETIC_ERRNO(EINVAL),
"DefaultSubvolume= must be one of the paths in Subvolumes=.");
+ if (p->supplement_for_name) {
+ if (!filename_part_is_valid(p->supplement_for_name))
+ return log_syntax(NULL, LOG_ERR, path, 1, SYNTHETIC_ERRNO(EINVAL),
+ "SupplementFor= is an invalid filename: %s",
+ p->supplement_for_name);
+
+ if (p->copy_blocks_path || p->copy_blocks_auto || p->encrypt != ENCRYPT_OFF ||
+ p->verity != VERITY_OFF)
+ return log_syntax(NULL, LOG_ERR, path, 1, SYNTHETIC_ERRNO(EINVAL),
+ "SupplementFor= cannot be combined with CopyBlocks=/Encrypt=/Verity=");
+ }
+
/* Verity partitions are read only, let's imply the RO flag hence, unless explicitly configured otherwise. */
if ((IN_SET(p->type.designator,
PARTITION_ROOT_VERITY,
@@ -2634,6 +2745,58 @@ static int context_copy_from(Context *context) {
return 0;
}
+static bool check_cross_def_ranges_valid(uint64_t a_min, uint64_t a_max, uint64_t b_min, uint64_t b_max) {
+ if (a_min == UINT64_MAX && b_min == UINT64_MAX)
+ return true;
+
+ if (a_max == UINT64_MAX && b_max == UINT64_MAX)
+ return true;
+
+ return MAX(a_min != UINT64_MAX ? a_min : 0, b_min != UINT64_MAX ? b_min : 0) <= MIN(a_max, b_max);
+}
+
+static int supplement_find_target(const Context *context, const Partition *supplement, Partition **ret) {
+ int r;
+
+ assert(context);
+ assert(supplement);
+ assert(ret);
+
+ LIST_FOREACH(partitions, p, context->partitions) {
+ _cleanup_free_ char *filename = NULL;
+
+ if (p == supplement)
+ continue;
+
+ r = path_extract_filename(p->definition_path, &filename);
+ if (r < 0)
+ return log_error_errno(r,
+ "Failed to extract filename from path '%s': %m",
+ p->definition_path);
+
+ *ASSERT_PTR(endswith(filename, ".conf")) = 0; /* Remove the file extension */
+
+ if (!streq(supplement->supplement_for_name, filename))
+ continue;
+
+ if (p->supplement_for_name)
+ return log_syntax(NULL, LOG_ERR, supplement->definition_path, 1, SYNTHETIC_ERRNO(EINVAL),
+ "SupplementFor= target is itself configured as a supplement.");
+
+ if (p->suppressing)
+ return log_syntax(NULL, LOG_ERR, supplement->definition_path, 1, SYNTHETIC_ERRNO(EINVAL),
+ "SupplementFor= target already has a supplement defined: %s",
+ p->suppressing->definition_path);
+
+ *ret = p;
+ return 0;
+ }
+
+ return log_syntax(NULL, LOG_ERR, supplement->definition_path, 1, SYNTHETIC_ERRNO(EINVAL),
+ "Couldn't find target partition for SupplementFor=%s",
+ supplement->supplement_for_name);
+}
+
static int context_read_definitions(Context *context) {
_cleanup_strv_free_ char **files = NULL;
Partition *last = LIST_FIND_TAIL(partitions, context->partitions);
@@ -2725,7 +2888,33 @@ static int context_read_definitions(Context *context) {
if (dp->minimize == MINIMIZE_OFF && !(dp->copy_blocks_path || dp->copy_blocks_auto))
return log_syntax(NULL, LOG_ERR, p->definition_path, 1, SYNTHETIC_ERRNO(EINVAL),
"Minimize= set for verity hash partition but data partition does not set CopyBlocks= or Minimize=.");
+ }
+ LIST_FOREACH(partitions, p, context->partitions) {
+ Partition *tgt = NULL;
+
+ if (!p->supplement_for_name)
+ continue;
+
+ r = supplement_find_target(context, p, &tgt);
+ if (r < 0)
+ return r;
+
+ if (tgt->copy_blocks_path || tgt->copy_blocks_auto || tgt->encrypt != ENCRYPT_OFF ||
+ tgt->verity != VERITY_OFF)
+ return log_syntax(NULL, LOG_ERR, p->definition_path, 1, SYNTHETIC_ERRNO(EINVAL),
+ "SupplementFor= target uses CopyBlocks=/Encrypt=/Verity=");
+
+ if (!check_cross_def_ranges_valid(p->size_min, p->size_max, tgt->size_min, tgt->size_max))
+ return log_syntax(NULL, LOG_ERR, p->definition_path, 1, SYNTHETIC_ERRNO(EINVAL),
+ "SizeMinBytes= larger than SizeMaxBytes= when merged with SupplementFor= target.");
+
+ if (!check_cross_def_ranges_valid(p->padding_min, p->padding_max, tgt->padding_min, tgt->padding_max))
+ return log_syntax(NULL, LOG_ERR, p->definition_path, 1, SYNTHETIC_ERRNO(EINVAL),
+ "PaddingMinBytes= larger than PaddingMaxBytes= when merged with SupplementFor= target.");
+
+ p->supplement_for = tgt;
+ tgt->suppressing = tgt->supplement_target_for = p;
}
return 0;
@@ -3109,6 +3298,10 @@ static int context_load_partition_table(Context *context) {
}
}
+ LIST_FOREACH(partitions, p, context->partitions)
+ if (PARTITION_SUPPRESSED(p) && PARTITION_EXISTS(p))
+ p->supplement_for->suppressing = NULL;
+
add_initial_free_area:
nsectors = fdisk_get_nsectors(c);
assert(nsectors <= UINT64_MAX/secsz);
@@ -3200,6 +3393,11 @@ static void context_unload_partition_table(Context *context) {
p->current_uuid = SD_ID128_NULL;
p->current_label = mfree(p->current_label);
+
+ /* A supplement partition is only ever un-suppressed if the existing partition table prevented
+ * us from suppressing it. So when unloading the partition table, we must re-suppress. */
+ if (p->supplement_for)
+ p->supplement_for->suppressing = p;
}
context->start = UINT64_MAX;
@@ -4977,6 +5175,31 @@ static int add_exclude_path(const char *path, Hashmap **denylist, DenyType type)
return 0;
}
+static int shallow_join_strv(char ***ret, char **a, char **b) {
+ _cleanup_free_ char **joined = NULL;
+ char **iter;
+
+ assert(ret);
+
+ joined = new(char*, strv_length(a) + strv_length(b) + 1);
+ if (!joined)
+ return log_oom();
+
+ iter = joined;
+
+ STRV_FOREACH(i, a)
+ *(iter++) = *i;
+
+ STRV_FOREACH(i, b)
+ if (!strv_contains(joined, *i))
+ *(iter++) = *i;
+
+ *iter = NULL;
+
+ *ret = TAKE_PTR(joined);
+ return 0;
+}
+
static int make_copy_files_denylist(
Context *context,
const Partition *p,
@@ -4985,6 +5208,7 @@ static int make_copy_files_denylist(
Hashmap **ret) {
_cleanup_hashmap_free_ Hashmap *denylist = NULL;
+ _cleanup_free_ char **override_exclude_src = NULL, **override_exclude_tgt = NULL;
int r;
assert(context);
@@ -5004,13 +5228,26 @@ static int make_copy_files_denylist(
/* Add the user configured excludes. */
- STRV_FOREACH(e, p->exclude_files_source) {
+ if (p->suppressing) {
+ r = shallow_join_strv(&override_exclude_src,
+ p->exclude_files_source,
+ p->suppressing->exclude_files_source);
+ if (r < 0)
+ return r;
+ r = shallow_join_strv(&override_exclude_tgt,
+ p->exclude_files_target,
+ p->suppressing->exclude_files_target);
+ if (r < 0)
+ return r;
+ }
+
+ STRV_FOREACH(e, override_exclude_src ?: p->exclude_files_source) {
r = add_exclude_path(*e, &denylist, endswith(*e, "/") ? DENY_CONTENTS : DENY_INODE);
if (r < 0)
return r;
}
- STRV_FOREACH(e, p->exclude_files_target) {
+ STRV_FOREACH(e, override_exclude_tgt ?: p->exclude_files_target) {
_cleanup_free_ char *path = NULL;
const char *s = path_startswith(*e, target);
@@ -5104,6 +5341,7 @@ static int add_subvolume_path(const char *path, Set **subvolumes) {
static int make_subvolumes_strv(const Partition *p, char ***ret) {
_cleanup_strv_free_ char **subvolumes = NULL;
Subvolume *subvolume;
+ int r;
assert(p);
assert(ret);
@@ -5112,6 +5350,18 @@ static int make_subvolumes_strv(const Partition *p, char ***ret) {
if (strv_extend(&subvolumes, subvolume->path) < 0)
return log_oom();
+ if (p->suppressing) {
+ _cleanup_strv_free_ char **suppressing = NULL;
+
+ r = make_subvolumes_strv(p->suppressing, &suppressing);
+ if (r < 0)
+ return r;
+
+ r = strv_extend_strv(&subvolumes, suppressing, /* filter_duplicates= */ true);
+ if (r < 0)
+ return log_oom();
+ }
+
*ret = TAKE_PTR(subvolumes);
return 0;
}
@@ -5122,18 +5372,22 @@ static int make_subvolumes_set(
const char *target,
Set **ret) {
+ _cleanup_strv_free_ char **paths = NULL;
_cleanup_set_free_ Set *subvolumes = NULL;
- Subvolume *subvolume;
int r;
assert(p);
assert(target);
assert(ret);
- ORDERED_HASHMAP_FOREACH(subvolume, p->subvolumes) {
+ r = make_subvolumes_strv(p, &paths);
+ if (r < 0)
+ return r;
+
+ STRV_FOREACH(subvolume, paths) {
_cleanup_free_ char *path = NULL;
- const char *s = path_startswith(subvolume->path, target);
+ const char *s = path_startswith(*subvolume, target);
if (!s)
continue;
@@ -5176,6 +5430,7 @@ static usec_t epoch_or_infinity(void) {
static int do_copy_files(Context *context, Partition *p, const char *root) {
_cleanup_strv_free_ char **subvolumes = NULL;
+ _cleanup_free_ char **override_copy_files = NULL;
int r;
assert(p);
@@ -5185,11 +5440,17 @@ static int do_copy_files(Context *context, Partition *p, const char *root) {
if (r < 0)
return r;
+ if (p->suppressing) {
+ r = shallow_join_strv(&override_copy_files, p->copy_files, p->suppressing->copy_files);
+ if (r < 0)
+ return r;
+ }
+
/* copy_tree_at() automatically copies the permissions of source directories to target directories if
* it created them. However, the root directory is created by us, so we have to manually take care
* that it is initialized. We use the first source directory targeting "/" as the metadata source for
* the root directory. */
- STRV_FOREACH_PAIR(source, target, p->copy_files) {
+ STRV_FOREACH_PAIR(source, target, override_copy_files ?: p->copy_files) {
_cleanup_close_ int rfd = -EBADF, sfd = -EBADF;
if (!path_equal(*target, "/"))
@@ -5210,7 +5471,7 @@ static int do_copy_files(Context *context, Partition *p, const char *root) {
break;
}
- STRV_FOREACH_PAIR(source, target, p->copy_files) {
+ STRV_FOREACH_PAIR(source, target, override_copy_files ?: p->copy_files) {
_cleanup_hashmap_free_ Hashmap *denylist = NULL;
_cleanup_set_free_ Set *subvolumes_by_source_inode = NULL;
_cleanup_close_ int sfd = -EBADF, pfd = -EBADF, tfd = -EBADF;
@@ -5328,6 +5589,7 @@ static int do_copy_files(Context *context, Partition *p, const char *root) {
static int do_make_directories(Partition *p, const char *root) {
_cleanup_strv_free_ char **subvolumes = NULL;
+ _cleanup_free_ char **override_dirs = NULL;
int r;
assert(p);
@@ -5337,7 +5599,13 @@ static int do_make_directories(Partition *p, const char *root) {
if (r < 0)
return r;
- STRV_FOREACH(d, p->make_directories) {
+ if (p->suppressing) {
+ r = shallow_join_strv(&override_dirs, p->make_directories, p->suppressing->make_directories);
+ if (r < 0)
+ return r;
+ }
+
+ STRV_FOREACH(d, override_dirs ?: p->make_directories) {
r = mkdir_p_root_full(root, *d, UID_INVALID, GID_INVALID, 0755, epoch_or_infinity(), subvolumes);
if (r < 0)
return log_error_errno(r, "Failed to create directory '%s' in file system: %m", *d);
@@ -5385,6 +5653,12 @@ static int make_subvolumes_read_only(Partition *p, const char *root) {
return log_error_errno(r, "Failed to make subvolume '%s' read-only: %m", subvolume->path);
}
+ if (p->suppressing) {
+ r = make_subvolumes_read_only(p->suppressing, root);
+ if (r < 0)
+ return r;
+ }
+
return 0;
}
@@ -5504,6 +5778,38 @@ static int partition_populate_filesystem(Context *context, Partition *p, const c
return 0;
}
+static int append_btrfs_subvols(char ***l, OrderedHashmap *subvolumes, const char *default_subvolume) {
+ Subvolume *subvolume;
+ int r;
+
+ assert(l);
+
+ ORDERED_HASHMAP_FOREACH(subvolume, subvolumes) {
+ _cleanup_free_ char *s = NULL, *f = NULL;
+
+ s = strdup(subvolume->path);
+ if (!s)
+ return log_oom();
+
+ f = subvolume_flags_to_string(subvolume->flags);
+ if (!f)
+ return log_oom();
+
+ if (streq_ptr(subvolume->path, default_subvolume) &&
+ !strextend_with_separator(&f, ",", "default"))
+ return log_oom();
+
+ if (!isempty(f) && !strextend_with_separator(&s, ":", f))
+ return log_oom();
+
+ r = strv_extend_many(l, "--subvol", s);
+ if (r < 0)
+ return log_oom();
+ }
+
+ return 0;
+}
+
static int finalize_extra_mkfs_options(const Partition *p, const char *root, char ***ret) {
_cleanup_strv_free_ char **sv = NULL;
int r;
@@ -5518,28 +5824,14 @@ static int finalize_extra_mkfs_options(const Partition *p, const char *root, cha
p->format);
if (partition_needs_populate(p) && root && streq(p->format, "btrfs")) {
- Subvolume *subvolume;
+ r = append_btrfs_subvols(&sv, p->subvolumes, p->default_subvolume);
+ if (r < 0)
+ return r;
- ORDERED_HASHMAP_FOREACH(subvolume, p->subvolumes) {
- _cleanup_free_ char *s = NULL, *f = NULL;
-
- s = strdup(subvolume->path);
- if (!s)
- return log_oom();
-
- f = subvolume_flags_to_string(subvolume->flags);
- if (!f)
- return log_oom();
-
- if (streq_ptr(subvolume->path, p->default_subvolume) && !strextend_with_separator(&f, ",", "default"))
- return log_oom();
-
- if (!isempty(f) && !strextend_with_separator(&s, ":", f))
- return log_oom();
-
- r = strv_extend_many(&sv, "--subvol", s);
+ if (p->suppressing) {
+ r = append_btrfs_subvols(&sv, p->suppressing->subvolumes, NULL);
if (r < 0)
- return log_oom();
+ return r;
}
}
@@ -8532,7 +8824,7 @@ static int determine_auto_size(Context *c) {
LIST_FOREACH(partitions, p, c->partitions) {
uint64_t m;
- if (p->dropped)
+ if (p->dropped || PARTITION_SUPPRESSED(p))
continue;
m = partition_min_size_with_padding(c, p);
@@ -8764,13 +9056,36 @@ static int run(int argc, char *argv[]) {
if (context_allocate_partitions(context, &largest_free_area))
break; /* Success! */
- if (!context_drop_or_foreignize_one_priority(context)) {
- r = log_error_errno(SYNTHETIC_ERRNO(ENOSPC),
- "Can't fit requested partitions into available free space (%s), refusing.",
- FORMAT_BYTES(largest_free_area));
- determine_auto_size(context);
- return r;
- }
+ if (context_unmerge_and_allocate_partitions(context))
+ break; /* We had to un-suppress a supplement or few, but still success! */
+
+ if (context_drop_or_foreignize_one_priority(context))
+ continue; /* Still no luck. Let's drop a priority and try again. */
+
+ /* No more priorities left to drop. This configuration just doesn't fit on this disk... */
+ r = log_error_errno(SYNTHETIC_ERRNO(ENOSPC),
+ "Can't fit requested partitions into available free space (%s), refusing.",
+ FORMAT_BYTES(largest_free_area));
+ determine_auto_size(context);
+ return r;
+ }
+
+ LIST_FOREACH(partitions, p, context->partitions) {
+ if (!p->supplement_for)
+ continue;
+
+ if (PARTITION_SUPPRESSED(p)) {
+ assert(!p->allocated_to_area);
+ p->dropped = true;
+
+ log_debug("Partition %s can be merged into %s, suppressing supplement.",
+ p->definition_path, p->supplement_for->definition_path);
+ } else if (PARTITION_EXISTS(p))
+ log_info("Partition %s already exists on disk, using supplement verbatim.",
+ p->definition_path);
+ else
+ log_info("Couldn't allocate partitions with %s merged into %s, using supplement verbatim.",
+ p->definition_path, p->supplement_for->definition_path);
}
/* Now assign free space according to the weight logic */