1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-10-03 17:50:03 +03:00

lvmlockd: avoid lockd_vg for local VGs

Previously, a command would call lockd_vg() for a local VG,
which would go to lvmlockd, which would send back ENOLS,
and the command would not care when it saw the VG was local.
The pointless back-and-forth to lvmlockd for local VGs can
be avoided by checking the VG lock_type in lvmcache (which
label_scan now saves there; this wasn't the case back when
the original lockd_vg logic was added.)  If the lock_type
saved during label_scan indicates a local VG, then the
lockd_vg step is skipped.
This commit is contained in:
David Teigland 2024-06-12 15:36:45 -05:00
parent 1b9bf5007b
commit bf60cb4da2
6 changed files with 71 additions and 17 deletions

10
lib/cache/lvmcache.c vendored
View File

@ -3002,6 +3002,16 @@ int lvmcache_vg_is_foreign(struct cmd_context *cmd, const char *vgname, const ch
return ret;
}
int lvmcache_vg_is_lockd_type(struct cmd_context *cmd, const char *vgname, const char *vgid)
{
struct lvmcache_vginfo *vginfo;
if ((vginfo = lvmcache_vginfo_from_vgname(vgname, vgid)))
return is_lockd_type(vginfo->lock_type);
return 0;
}
/*
* Example of reading four devs in sequence from the same VG:
*

View File

@ -179,6 +179,8 @@ void lvmcache_get_max_name_lengths(struct cmd_context *cmd,
int lvmcache_vg_is_foreign(struct cmd_context *cmd, const char *vgname, const char *vgid);
int lvmcache_vg_is_lockd_type(struct cmd_context *cmd, const char *vgname, const char *vgid);
bool lvmcache_scan_mismatch(struct cmd_context *cmd, const char *vgname, const char *vgid);
int lvmcache_vginfo_has_pvid(struct lvmcache_vginfo *vginfo, const char *pvid_arg);

View File

@ -2014,9 +2014,15 @@ int lockd_global(struct cmd_context *cmd, const char *def_mode)
* this result is passed into vg_read(). After vg_read() reads the VG,
* it checks if the VG lock_type (sanlock or dlm) requires a lock to be
* held, and if so, it verifies that the lock was correctly acquired by
* looking at lockd_state. If vg_read() sees that the VG is a local VG,
* i.e. lock_type is not sanlock or dlm, then no lock is required, and it
* ignores lockd_state (which would indicate no lock was found.)
* looking at lockd_state.
*
* If vg_read() sees that the VG is a local VG, i.e. lock_type is not
* sanlock or dlm, then no lock is required, and it ignores lockd_state,
* which would indicate no lock was found.... although a newer
* optimization avoids calling lockd_vg() at all for local VGs
* by checking the lock_type in lvmcache saved by label_scan. In extremely
* rare case where the lock_type changes between label_scan and vg_read,
* the caller will go back and repeat lockd_vg()+vg_read().
*/
int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,

View File

@ -5789,10 +5789,12 @@ static int _lvconvert_detach_writecache_when_clean(struct cmd_context *cmd,
struct logical_volume *lv_fast;
uint32_t lockd_state, error_flags;
uint64_t dirty;
int is_lockd;
int ret = 0;
idl = dm_list_item(dm_list_first(&lr->poll_idls), struct convert_poll_id_list);
id = idl->id;
is_lockd = lvmcache_vg_is_lockd_type(cmd, id->vg_name, NULL);
/*
* TODO: we should be able to save info about the dm device for this LV
@ -5807,7 +5809,7 @@ static int _lvconvert_detach_writecache_when_clean(struct cmd_context *cmd,
lockd_state = 0;
error_flags = 0;
if (!lockd_vg(cmd, id->vg_name, "ex", 0, &lockd_state)) {
if (is_lockd && !lockd_vg(cmd, id->vg_name, "ex", 0, &lockd_state)) {
log_error("Detaching writecache interrupted - locking VG failed.");
return 0;
}
@ -5844,7 +5846,7 @@ static int _lvconvert_detach_writecache_when_clean(struct cmd_context *cmd,
if (!lv_writecache_is_clean(cmd, lv, &dirty)) {
unlock_and_release_vg(cmd, vg, vg->name);
if (!lockd_vg(cmd, id->vg_name, "un", 0, &lockd_state))
if (is_lockd && !lockd_vg(cmd, id->vg_name, "un", 0, &lockd_state))
stack;
log_print_unless_silent("Detaching writecache cleaning %llu blocks", (unsigned long long)dirty);
@ -5897,7 +5899,7 @@ out_release:
unlock_and_release_vg(cmd, vg, vg->name);
out_lockd:
if (!lockd_vg(cmd, id->vg_name, "un", 0, &lockd_state))
if (is_lockd && !lockd_vg(cmd, id->vg_name, "un", 0, &lockd_state))
stack;
return ret;

View File

@ -156,6 +156,7 @@ int wait_for_single_lv(struct cmd_context *cmd, struct poll_operation_id *id,
int finished = 0;
uint32_t lockd_state = 0;
uint32_t error_flags = 0;
int is_lockd;
int ret;
unsigned wait_before_testing = parms->wait_before_testing;
@ -171,11 +172,13 @@ int wait_for_single_lv(struct cmd_context *cmd, struct poll_operation_id *id,
return 0;
}
is_lockd = lvmcache_vg_is_lockd_type(cmd, id->vg_name, NULL);
/*
* An ex VG lock is needed because the check can call finish_copy
* which writes the VG.
*/
if (!lockd_vg(cmd, id->vg_name, "ex", 0, &lockd_state)) {
if (is_lockd && !lockd_vg(cmd, id->vg_name, "ex", 0, &lockd_state)) {
log_error("ABORTING: Can't lock VG for %s.", id->display_name);
return 0;
}
@ -229,7 +232,7 @@ int wait_for_single_lv(struct cmd_context *cmd, struct poll_operation_id *id,
unlock_and_release_vg(cmd, vg, vg->name);
if (!lockd_vg(cmd, id->vg_name, "un", 0, &lockd_state))
if (is_lockd && !lockd_vg(cmd, id->vg_name, "un", 0, &lockd_state))
stack;
wait_before_testing = 1;
@ -240,7 +243,7 @@ int wait_for_single_lv(struct cmd_context *cmd, struct poll_operation_id *id,
out:
if (vg)
unlock_and_release_vg(cmd, vg, vg->name);
if (!lockd_vg(cmd, id->vg_name, "un", 0, &lockd_state))
if (is_lockd && !lockd_vg(cmd, id->vg_name, "un", 0, &lockd_state))
stack;
return ret;

View File

@ -2178,6 +2178,7 @@ static int _process_vgnameid_list(struct cmd_context *cmd, uint32_t read_flags,
int ret;
int skip;
int notfound;
int is_lockd;
int process_all = 0;
int do_report_ret_code = 1;
@ -2197,6 +2198,7 @@ static int _process_vgnameid_list(struct cmd_context *cmd, uint32_t read_flags,
vg_uuid = vgnl->vgid;
skip = 0;
notfound = 0;
is_lockd = lvmcache_vg_is_lockd_type(cmd, vg_name, vg_uuid);
uuid[0] = '\0';
if (is_orphan_vg(vg_name)) {
@ -2214,8 +2216,8 @@ static int _process_vgnameid_list(struct cmd_context *cmd, uint32_t read_flags,
}
log_very_verbose("Processing VG %s %s", vg_name, uuid);
if (!lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
do_lockd:
if (is_lockd && !lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
stack;
ret_max = ECMD_FAILED;
report_log_ret_code(ret_max);
@ -2237,6 +2239,14 @@ static int _process_vgnameid_list(struct cmd_context *cmd, uint32_t read_flags,
if (skip || notfound)
goto endvg;
if (!is_lockd && vg_is_shared(vg)) {
/* The lock_type changed since label_scan, won't really occur in practice. */
log_debug("Repeat lock and read for local to shared vg");
unlock_and_release_vg(cmd, vg, vg_name);
is_lockd = 1;
goto do_lockd;
}
/* Process this VG? */
if ((process_all ||
(!dm_list_empty(arg_vgnames) && str_list_match_item(arg_vgnames, vg_name)) ||
@ -2257,7 +2267,7 @@ static int _process_vgnameid_list(struct cmd_context *cmd, uint32_t read_flags,
unlock_vg(cmd, vg, vg_name);
endvg:
release_vg(vg);
if (!lockd_vg(cmd, vg_name, "un", 0, &lockd_state))
if (is_lockd && !lockd_vg(cmd, vg_name, "un", 0, &lockd_state))
stack;
log_set_report_object_name_and_id(NULL, NULL);
@ -3876,6 +3886,7 @@ static int _process_lv_vgnameid_list(struct cmd_context *cmd, uint32_t read_flag
int ret;
int skip;
int notfound;
int is_lockd;
int do_report_ret_code = 1;
log_set_report_object_type(LOG_REPORT_OBJECT_TYPE_VG);
@ -3885,6 +3896,7 @@ static int _process_lv_vgnameid_list(struct cmd_context *cmd, uint32_t read_flag
vg_uuid = vgnl->vgid;
skip = 0;
notfound = 0;
is_lockd = lvmcache_vg_is_lockd_type(cmd, vg_name, vg_uuid);
uuid[0] = '\0';
if (vg_uuid && !id_write_format((const struct id*)vg_uuid, uuid, sizeof(uuid)))
@ -3930,7 +3942,8 @@ static int _process_lv_vgnameid_list(struct cmd_context *cmd, uint32_t read_flag
log_very_verbose("Processing VG %s %s", vg_name, vg_uuid ? uuid : "");
if (!lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
do_lockd:
if (is_lockd && !lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
ret_max = ECMD_FAILED;
report_log_ret_code(ret_max);
continue;
@ -3951,6 +3964,14 @@ static int _process_lv_vgnameid_list(struct cmd_context *cmd, uint32_t read_flag
if (skip || notfound)
goto endvg;
if (!is_lockd && vg_is_shared(vg)) {
/* The lock_type changed since label_scan, won't really occur in practice. */
log_debug("Repeat lock and read for local to shared vg");
unlock_and_release_vg(cmd, vg, vg_name);
is_lockd = 1;
goto do_lockd;
}
ret = process_each_lv_in_vg(cmd, vg, &lvnames, tags_arg, 0,
handle, check_single_lv, process_single_lv);
if (ret != ECMD_PROCESSED)
@ -3962,7 +3983,7 @@ static int _process_lv_vgnameid_list(struct cmd_context *cmd, uint32_t read_flag
unlock_vg(cmd, vg, vg_name);
endvg:
release_vg(vg);
if (!lockd_vg(cmd, vg_name, "un", 0, &lockd_state))
if (is_lockd && !lockd_vg(cmd, vg_name, "un", 0, &lockd_state))
stack;
log_set_report_object_name_and_id(NULL, NULL);
}
@ -4516,6 +4537,7 @@ static int _process_pvs_in_vgs(struct cmd_context *cmd, uint32_t read_flags,
int ret;
int skip;
int notfound;
int is_lockd;
int do_report_ret_code = 1;
log_set_report_object_type(LOG_REPORT_OBJECT_TYPE_VG);
@ -4525,6 +4547,7 @@ static int _process_pvs_in_vgs(struct cmd_context *cmd, uint32_t read_flags,
vg_uuid = vgnl->vgid;
skip = 0;
notfound = 0;
is_lockd = lvmcache_vg_is_lockd_type(cmd, vg_name, vg_uuid);
uuid[0] = '\0';
if (is_orphan_vg(vg_name)) {
@ -4540,8 +4563,8 @@ static int _process_pvs_in_vgs(struct cmd_context *cmd, uint32_t read_flags,
ret_max = ECMD_FAILED;
goto_out;
}
if (!lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
do_lockd:
if (is_lockd && !lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
ret_max = ECMD_FAILED;
report_log_ret_code(ret_max);
continue;
@ -4564,6 +4587,14 @@ static int _process_pvs_in_vgs(struct cmd_context *cmd, uint32_t read_flags,
if (notfound)
goto endvg;
if (vg && !is_lockd && vg_is_shared(vg)) {
/* The lock_type changed since label_scan, won't really occur in practice. */
log_debug("Repeat lock and read for local to shared vg");
unlock_and_release_vg(cmd, vg, vg_name);
is_lockd = 1;
goto do_lockd;
}
/*
* Don't call "continue" when skip is set, because we need to remove
* error_vg->pvs entries from devices list.
@ -4586,7 +4617,7 @@ endvg:
if (error_vg)
unlock_and_release_vg(cmd, error_vg, vg_name);
release_vg(vg);
if (!lockd_vg(cmd, vg_name, "un", 0, &lockd_state))
if (is_lockd && !lockd_vg(cmd, vg_name, "un", 0, &lockd_state))
stack;
/* Quit early when possible. */