mirror of
git://sourceware.org/git/lvm2.git
synced 2024-12-21 13:34:40 +03:00
Replace fs_unlock by sync_local_dev_names to notify local clvmd. (2.02.80)
Introduce sync_local_dev_names and CLVMD_CMD_SYNC_NAMES to issue fs_unlock.
This commit is contained in:
parent
fd1aaee33f
commit
a8de276520
@ -1,5 +1,7 @@
|
||||
Version 2.02.81 -
|
||||
===================================
|
||||
Replace fs_unlock by sync_local_dev_names to notify local clvmd. (2.02.80)
|
||||
Introduce sync_local_dev_names and CLVMD_CMD_SYNC_NAMES to issue fs_unlock.
|
||||
Accept fusion fio in device type filter.
|
||||
Add disk to mirrored log type conversion.
|
||||
|
||||
|
@ -71,4 +71,5 @@ static const char CLVMD_SOCKNAME[]= DEFAULT_RUN_DIR "/clvmd.sock";
|
||||
#define CLVMD_CMD_SET_DEBUG 42
|
||||
#define CLVMD_CMD_VG_BACKUP 43
|
||||
#define CLVMD_CMD_RESTART 44
|
||||
#define CLVMD_CMD_SYNC_NAMES 45
|
||||
#endif
|
||||
|
@ -139,6 +139,10 @@ int do_command(struct local_client *client, struct clvm_header *msg, int msglen,
|
||||
do_refresh_cache();
|
||||
break;
|
||||
|
||||
case CLVMD_CMD_SYNC_NAMES:
|
||||
lvm_do_fs_unlock();
|
||||
break;
|
||||
|
||||
case CLVMD_CMD_SET_DEBUG:
|
||||
debug = args[0];
|
||||
break;
|
||||
@ -275,6 +279,7 @@ int do_pre_command(struct local_client *client)
|
||||
case CLVMD_CMD_GET_CLUSTERNAME:
|
||||
case CLVMD_CMD_SET_DEBUG:
|
||||
case CLVMD_CMD_VG_BACKUP:
|
||||
case CLVMD_CMD_SYNC_NAMES:
|
||||
case CLVMD_CMD_LOCK_QUERY:
|
||||
case CLVMD_CMD_RESTART:
|
||||
break;
|
||||
@ -307,6 +312,7 @@ int do_post_command(struct local_client *client)
|
||||
|
||||
case CLVMD_CMD_LOCK_VG:
|
||||
case CLVMD_CMD_VG_BACKUP:
|
||||
case CLVMD_CMD_SYNC_NAMES:
|
||||
case CLVMD_CMD_LOCK_QUERY:
|
||||
/* Nothing to do here */
|
||||
break;
|
||||
|
@ -897,6 +897,7 @@ struct dm_hash_node *get_next_excl_lock(struct dm_hash_node *v, char **name)
|
||||
void lvm_do_fs_unlock(void)
|
||||
{
|
||||
pthread_mutex_lock(&lvm_lock);
|
||||
DEBUGLOG("Syncing device names\n");
|
||||
fs_unlock();
|
||||
pthread_mutex_unlock(&lvm_lock);
|
||||
}
|
||||
|
@ -403,6 +403,7 @@ int fs_rename_lv(struct logical_volume *lv, const char *dev,
|
||||
void fs_unlock(void)
|
||||
{
|
||||
if (!memlock()) {
|
||||
log_debug("Syncing device names");
|
||||
/* Wait for all processed udev devices */
|
||||
if (!dm_udev_wait(_fs_cookie))
|
||||
stack;
|
||||
|
@ -345,14 +345,15 @@ static int _lock_for_cluster(struct cmd_context *cmd, unsigned char clvmd_cmd,
|
||||
* locks are cluster-wide.
|
||||
* Also, if the lock is exclusive it makes no sense to try to
|
||||
* acquire it on all nodes, so just do that on the local node too.
|
||||
* One exception, is that P_ locks /do/ get distributed across
|
||||
* the cluster because they might have side-effects.
|
||||
* One exception, is that P_ locks (except VG_SYNC_NAMES) /do/ get
|
||||
* distributed across the cluster because they might have side-effects.
|
||||
*/
|
||||
if (strncmp(name, "P_", 2) &&
|
||||
(clvmd_cmd == CLVMD_CMD_LOCK_VG ||
|
||||
(flags & LCK_TYPE_MASK) == LCK_EXCL ||
|
||||
(flags & LCK_LOCAL) ||
|
||||
!(flags & LCK_CLUSTER_VG)))
|
||||
if ((strncmp(name, "P_", 2) &&
|
||||
(clvmd_cmd == CLVMD_CMD_LOCK_VG ||
|
||||
(flags & LCK_TYPE_MASK) == LCK_EXCL ||
|
||||
(flags & LCK_LOCAL) ||
|
||||
!(flags & LCK_CLUSTER_VG))) ||
|
||||
(clvmd_cmd == CLVMD_CMD_SYNC_NAMES && (flags & LCK_LOCAL)))
|
||||
node = ".";
|
||||
|
||||
status = _cluster_request(clvmd_cmd, node, args, len,
|
||||
@ -401,6 +402,11 @@ int lock_resource(struct cmd_context *cmd, const char *resource, uint32_t flags)
|
||||
|
||||
switch (flags & LCK_SCOPE_MASK) {
|
||||
case LCK_VG:
|
||||
if (!strcmp(resource, VG_SYNC_NAMES)) {
|
||||
log_very_verbose("Requesting sync names.");
|
||||
return _lock_for_cluster(cmd, CLVMD_CMD_SYNC_NAMES,
|
||||
flags & ~LCK_HOLD, resource);
|
||||
}
|
||||
if (flags == LCK_VG_BACKUP) {
|
||||
log_very_verbose("Requesting backup of VG metadata for %s",
|
||||
resource);
|
||||
|
@ -265,6 +265,9 @@ static int _file_lock_resource(struct cmd_context *cmd, const char *resource,
|
||||
if (strcmp(resource, VG_GLOBAL))
|
||||
lvmcache_drop_metadata(resource, 0);
|
||||
|
||||
if (!strcmp(resource, VG_SYNC_NAMES))
|
||||
fs_unlock();
|
||||
|
||||
/* LCK_CACHE does not require a real lock */
|
||||
if (flags & LCK_CACHE)
|
||||
break;
|
||||
|
@ -325,7 +325,7 @@ int check_lvm1_vg_inactive(struct cmd_context *cmd, const char *vgname)
|
||||
char path[PATH_MAX];
|
||||
|
||||
/* We'll allow operations on orphans */
|
||||
if (is_orphan_vg(vgname) || is_global_vg(vgname))
|
||||
if (!is_real_vg(vgname))
|
||||
return 1;
|
||||
|
||||
/* LVM1 is only present in 2.4 kernels. */
|
||||
|
@ -109,6 +109,7 @@ int check_lvm1_vg_inactive(struct cmd_context *cmd, const char *vgname);
|
||||
*/
|
||||
#define VG_ORPHANS "#orphans"
|
||||
#define VG_GLOBAL "#global"
|
||||
#define VG_SYNC_NAMES "#sync_names"
|
||||
|
||||
/*
|
||||
* Common combinations
|
||||
@ -169,6 +170,8 @@ int check_lvm1_vg_inactive(struct cmd_context *cmd, const char *vgname);
|
||||
lock_vol((vg)->cmd, (vg)->name, LCK_VG_REVERT)
|
||||
#define remote_backup_metadata(vg) \
|
||||
lock_vol((vg)->cmd, (vg)->name, LCK_VG_BACKUP)
|
||||
#define sync_local_dev_names(cmd) \
|
||||
lock_vol(cmd, VG_SYNC_NAMES, LCK_NONE | LCK_CACHE | LCK_LOCAL)
|
||||
|
||||
/* Process list of LVs */
|
||||
int suspend_lvs(struct cmd_context *cmd, struct dm_list *lvs);
|
||||
|
@ -38,6 +38,8 @@ static int _no_lock_resource(struct cmd_context *cmd, const char *resource,
|
||||
{
|
||||
switch (flags & LCK_SCOPE_MASK) {
|
||||
case LCK_VG:
|
||||
if (!strcmp(resource, VG_SYNC_NAMES))
|
||||
fs_unlock();
|
||||
break;
|
||||
case LCK_LV:
|
||||
switch (flags & LCK_TYPE_MASK) {
|
||||
|
@ -3021,7 +3021,7 @@ int set_lv(struct cmd_context *cmd, struct logical_volume *lv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
fs_unlock(); /* Wait until devices are available */
|
||||
sync_local_dev_names(cmd); /* Wait until devices are available */
|
||||
|
||||
log_verbose("Clearing start of logical volume \"%s\"", lv->name);
|
||||
|
||||
|
@ -362,6 +362,7 @@ int move_pvs_used_by_lv(struct volume_group *vg_from,
|
||||
const char *lv_name);
|
||||
int is_global_vg(const char *vg_name);
|
||||
int is_orphan_vg(const char *vg_name);
|
||||
int is_real_vg(const char *vg_name);
|
||||
int vg_missing_pv_count(const struct volume_group *vg);
|
||||
int vgs_are_compatible(struct cmd_context *cmd,
|
||||
struct volume_group *vg_from,
|
||||
|
@ -3560,6 +3560,14 @@ int is_orphan_vg(const char *vg_name)
|
||||
return (vg_name && !strncmp(vg_name, ORPHAN_PREFIX, sizeof(ORPHAN_PREFIX) - 1)) ? 1 : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Exclude pseudo VG names used for locking.
|
||||
*/
|
||||
int is_real_vg(const char *vg_name)
|
||||
{
|
||||
return (vg_name && *vg_name != '#');
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns:
|
||||
* 0 - fail
|
||||
|
@ -55,7 +55,7 @@ int exec_cmd(struct cmd_context *cmd, const char *const argv[], int *rstatus)
|
||||
|
||||
log_verbose("Executing: %s", _verbose_args(argv, buf, sizeof(buf)));
|
||||
|
||||
fs_unlock(); /* Flush oops and ensure cookie is not shared */
|
||||
sync_local_dev_names(cmd); /* Flush ops and reset dm cookie */
|
||||
|
||||
if ((pid = fork()) == -1) {
|
||||
log_error("fork failed: %s", strerror(errno));
|
||||
|
@ -507,7 +507,7 @@ static int _add_dev_node(const char *dev_name, uint32_t major, uint32_t minor,
|
||||
(void) dm_prepare_selinux_context(path, S_IFBLK);
|
||||
old_mask = umask(0);
|
||||
if (mknod(path, S_IFBLK | mode, dev) < 0) {
|
||||
log_error("Unable to make device node for '%s'", dev_name);
|
||||
log_error("%s: mknod for %s failed: %s", path, dev_name, strerror(errno));
|
||||
umask(old_mask);
|
||||
(void) dm_prepare_selinux_context(NULL, 0);
|
||||
return 0;
|
||||
|
@ -42,7 +42,7 @@ static int _become_daemon(struct cmd_context *cmd)
|
||||
|
||||
sigaction(SIGCHLD, &act, NULL);
|
||||
|
||||
fs_unlock(); /* Flush oops and ensure cookie is not shared */
|
||||
sync_local_dev_names(cmd); /* Flush ops and reset dm cookie */
|
||||
|
||||
if ((pid = fork()) == -1) {
|
||||
log_error("fork failed: %s", strerror(errno));
|
||||
|
Loading…
Reference in New Issue
Block a user