1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

activation: extend handling of pending_delete

With previous patch 30a98e4d67 we
started to put devices one pending_delete list instead
of directly scheduling their removal.

However we have operations like 'snapshot merge' where we are
resuming device tree in 2 subsequent activation calls - so
1st such call will still have suspened devices and no chance
to push 'remove' ioctl.

Since we curently cannot easily solve this by doing just single
activation call (which would be preferred solution) - we introduce
a preservation of pending_delete via command structure and
then restore it on next activation call.

This way we keep to remove devices later - although it might be
not the best moment - this may need futher tunning.

Also we don't keep the list of operation in 1 trasaction
(unless we do verify udev symlinks) - this could probably
also make it more correct in terms of which 'remove' can
be combined we already running 'resume'.
This commit is contained in:
Zdenek Kabelac 2019-08-23 13:08:34 +02:00
parent a18f562913
commit 7833c45fbe
3 changed files with 20 additions and 13 deletions

View File

@ -3612,6 +3612,7 @@ static int _clean_tree(struct dev_manager *dm, struct dm_tree_node *root, const
if (!dm_tree_deactivate_children(root, dl->str, strlen(dl->str)))
return_0;
}
dm_list_init(&dm->pending_delete);
}
return 1;
@ -3738,25 +3739,22 @@ out_no_root:
int dev_manager_activate(struct dev_manager *dm, const struct logical_volume *lv,
struct lv_activate_opts *laopts)
{
dm_list_splice(&dm->pending_delete, &lv->vg->cmd->pending_delete);
if (!_tree_action(dm, lv, laopts, ACTIVATE))
return_0;
/*
* When lvm2 resumes a device and shortly after that it removes it,
* udevd rule will try to blindly call 'dmsetup info' on already removed
* device leaving the trace inside syslog about failing operation.
*
* TODO: It's not completely clear this call here is the best fix.
* Maybe there can be a better sequence, but ATM we do usually resume
* error device i.e. on cache deletion and remove it.
* TODO2: there could be more similar cases!
*/
if (!dm_list_empty(&dm->pending_delete))
fs_unlock();
if (!_tree_action(dm, lv, laopts, CLEAN))
return_0;
if (!dm_list_empty(&dm->pending_delete)) {
log_debug("Preserving %d device(s) for removal while being suspended.",
dm_list_size(&dm->pending_delete));
if (!(str_list_dup(lv->vg->cmd->mem, &lv->vg->cmd->pending_delete,
&dm->pending_delete)))
return_0;
}
return 1;
}

View File

@ -1734,6 +1734,8 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
cmd->current_settings = cmd->default_settings;
cmd->initialized.config = 1;
dm_list_init(&cmd->pending_delete);
out:
if (!cmd->initialized.config) {
destroy_toolcontext(cmd);
@ -1922,6 +1924,12 @@ int refresh_toolcontext(struct cmd_context *cmd)
cmd->initialized.config = 1;
if (!dm_list_empty(&cmd->pending_delete)) {
log_debug(INTERNAL_ERROR "Unprocessed pending delete for %d devices.",
dm_list_size(&cmd->pending_delete));
dm_list_init(&cmd->pending_delete);
}
if (cmd->initialized.connections && !init_connections(cmd))
return_0;

View File

@ -239,6 +239,7 @@ struct cmd_context {
const char *report_list_item_separator;
const char *time_format;
unsigned rand_seed;
struct dm_list pending_delete; /* list of LVs for removal */
};
/*